repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
kevinconway/confpy | confpy/core/namespace.py | Namespace.set | python | def set(self, name, value):
"""Set an option value.
Args:
name (str): The name of the option.
value: The value to set the option to.
Raises:
AttributeError: If the name is not registered.
TypeError: If the value is not a string or appropriate native type.
ValueError: If the value is a string but cannot be coerced.
"""
if name not in self._options:
raise AttributeError("Option {0} does not exist.".format(name))
return self._options[name].__set__(self, value) | Set an option value.
Args:
name (str): The name of the option.
value: The value to set the option to.
Raises:
AttributeError: If the name is not registered.
TypeError: If the value is not a string or appropriate native type.
ValueError: If the value is a string but cannot be coerced. | train | https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/core/namespace.py#L63-L79 | null | class Namespace(object):
"""A collection of configuration options."""
def __init__(self, description=None, **options):
"""Initalize the Namespace with options
Args:
description (str, optional): A human readable description of what
the Namespace contains.
**options: Each keyword should be an Option object which will be
added to the Namespace.
Raises:
TypeError: If an entry is not an Option object.
"""
self.__doc__ = description
self._options = {}
for name, option in compat.iteritems(options):
self.register(name, option)
super(Namespace, self).__init__()
@property
def description(self):
"""Get the description of what the namespace contains."""
return self.__doc__
def get(self, name, default=None):
"""Fetch an option from the dictionary.
Args:
name (str): The name of the option.
default: The value to return if the name is missing.
Returns:
any: The value stored by the option.
This method resolves the option to its value rather than returning
the option object itself. Use the 'options()' method or this object's
iter to get the raw options.
"""
option = self._options.get(name, None)
if option is None:
return default
return option.__get__(self)
def set(self, name, value):
"""Set an option value.
Args:
name (str): The name of the option.
value: The value to set the option to.
Raises:
AttributeError: If the name is not registered.
TypeError: If the value is not a string or appropriate native type.
ValueError: If the value is a string but cannot be coerced.
"""
if name not in self._options:
raise AttributeError("Option {0} does not exist.".format(name))
return self._options[name].__set__(self, value)
def register(self, name, option):
"""Register a new option with the namespace.
Args:
name (str): The name to register the option under.
option (option.Option): The option object to register.
Raises:
TypeError: If the option is not an option.Option object.
ValueError: If the name is already registered.
"""
if name in self._options:
raise ValueError("Option {0} already exists.".format(name))
if not isinstance(option, opt.Option):
raise TypeError("Options must be of type Option.")
self._options[name] = option
def options(self):
"""Get an iterable of two-tuples containing name and option.
The name in this case is the name given at registration time which is
used to identify an option and look it up on the object. The
option is the actual Option object.
"""
return compat.iteritems(self._options)
def __iter__(self):
"""Proxy iter attempts to the 'options' method."""
return iter(self.options())
def __setattr__(self, name, value):
"""Proxy attribute sets to the 'register' method if needed.
If the value is an option object this call gets proxied to 'register'.
If the value is anything else this method will follow the standard
setattr behaviour unless the target is an option in which case the
method is proxied to 'set'.
"""
if isinstance(value, opt.Option):
return self.register(name, value)
if not hasattr(self, name):
return object.__setattr__(self, name, value)
# The options dictionary may not be set yet if this is getting called
# from the init method. Check for the attribute before accessing it to
# avoid infinite recursion.
if hasattr(self, '_options') and name in self._options:
return self.set(name, value)
return object.__setattr__(self, name, value)
def __getattr__(self, name):
"""Lookup missing attributes in the options dictionary."""
# PY3 'hasattr' behaviour changed to utilize the 'getattr' which causes
# infinite recursion if it is used before the options dictionary is
# created. Lookup up the attribute directly in the instance dictionary
# here to avoid that scenario.
if '_options' not in self.__dict__ or name not in self._options:
raise AttributeError("Option {0} does not exist.".format(name))
return self.get(name)
|
kevinconway/confpy | confpy/core/namespace.py | Namespace.register | python | def register(self, name, option):
"""Register a new option with the namespace.
Args:
name (str): The name to register the option under.
option (option.Option): The option object to register.
Raises:
TypeError: If the option is not an option.Option object.
ValueError: If the name is already registered.
"""
if name in self._options:
raise ValueError("Option {0} already exists.".format(name))
if not isinstance(option, opt.Option):
raise TypeError("Options must be of type Option.")
self._options[name] = option | Register a new option with the namespace.
Args:
name (str): The name to register the option under.
option (option.Option): The option object to register.
Raises:
TypeError: If the option is not an option.Option object.
ValueError: If the name is already registered. | train | https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/core/namespace.py#L81-L100 | null | class Namespace(object):
"""A collection of configuration options."""
def __init__(self, description=None, **options):
"""Initalize the Namespace with options
Args:
description (str, optional): A human readable description of what
the Namespace contains.
**options: Each keyword should be an Option object which will be
added to the Namespace.
Raises:
TypeError: If an entry is not an Option object.
"""
self.__doc__ = description
self._options = {}
for name, option in compat.iteritems(options):
self.register(name, option)
super(Namespace, self).__init__()
@property
def description(self):
"""Get the description of what the namespace contains."""
return self.__doc__
def get(self, name, default=None):
"""Fetch an option from the dictionary.
Args:
name (str): The name of the option.
default: The value to return if the name is missing.
Returns:
any: The value stored by the option.
This method resolves the option to its value rather than returning
the option object itself. Use the 'options()' method or this object's
iter to get the raw options.
"""
option = self._options.get(name, None)
if option is None:
return default
return option.__get__(self)
def set(self, name, value):
"""Set an option value.
Args:
name (str): The name of the option.
value: The value to set the option to.
Raises:
AttributeError: If the name is not registered.
TypeError: If the value is not a string or appropriate native type.
ValueError: If the value is a string but cannot be coerced.
"""
if name not in self._options:
raise AttributeError("Option {0} does not exist.".format(name))
return self._options[name].__set__(self, value)
def register(self, name, option):
"""Register a new option with the namespace.
Args:
name (str): The name to register the option under.
option (option.Option): The option object to register.
Raises:
TypeError: If the option is not an option.Option object.
ValueError: If the name is already registered.
"""
if name in self._options:
raise ValueError("Option {0} already exists.".format(name))
if not isinstance(option, opt.Option):
raise TypeError("Options must be of type Option.")
self._options[name] = option
def options(self):
"""Get an iterable of two-tuples containing name and option.
The name in this case is the name given at registration time which is
used to identify an option and look it up on the object. The
option is the actual Option object.
"""
return compat.iteritems(self._options)
def __iter__(self):
"""Proxy iter attempts to the 'options' method."""
return iter(self.options())
def __setattr__(self, name, value):
"""Proxy attribute sets to the 'register' method if needed.
If the value is an option object this call gets proxied to 'register'.
If the value is anything else this method will follow the standard
setattr behaviour unless the target is an option in which case the
method is proxied to 'set'.
"""
if isinstance(value, opt.Option):
return self.register(name, value)
if not hasattr(self, name):
return object.__setattr__(self, name, value)
# The options dictionary may not be set yet if this is getting called
# from the init method. Check for the attribute before accessing it to
# avoid infinite recursion.
if hasattr(self, '_options') and name in self._options:
return self.set(name, value)
return object.__setattr__(self, name, value)
def __getattr__(self, name):
"""Lookup missing attributes in the options dictionary."""
# PY3 'hasattr' behaviour changed to utilize the 'getattr' which causes
# infinite recursion if it is used before the options dictionary is
# created. Lookup up the attribute directly in the instance dictionary
# here to avoid that scenario.
if '_options' not in self.__dict__ or name not in self._options:
raise AttributeError("Option {0} does not exist.".format(name))
return self.get(name)
|
kevinconway/confpy | confpy/core/namespace.py | AutoNamespace.set | python | def set(self, name, value):
"""Set an option value.
Args:
name (str): The name of the option.
value: The value to set the option to.
Raises:
TypeError: If the value is not a string or appropriate native type.
ValueError: If the value is a string but cannot be coerced.
If the name is not registered a new option will be created using the
option generator.
"""
if name not in self._options:
self.register(name, self._generator())
return self._options[name].__set__(self, value) | Set an option value.
Args:
name (str): The name of the option.
value: The value to set the option to.
Raises:
TypeError: If the value is not a string or appropriate native type.
ValueError: If the value is a string but cannot be coerced.
If the name is not registered a new option will be created using the
option generator. | train | https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/core/namespace.py#L175-L193 | [
"def register(self, name, option):\n \"\"\"Register a new option with the namespace.\n\n Args:\n name (str): The name to register the option under.\n option (option.Option): The option object to register.\n\n Raises:\n TypeError: If the option is not an option.Option object.\n V... | class AutoNamespace(Namespace):
"""Namespace which automatically defined options of a given type."""
def __init__(self, description=None, type=None, **options):
"""Initialize the Namespace with a type generator.
Args:
description (str, optional): A human readable description of what
the Namespace contains.
type (Option): The Option class to use when generating dynamic
options.
**options: Each keyword should be an Option object which will be
added to the Namespace.
Raises:
ValueError: If type is not given or is not an Option class.
TypeError: If an entry is not an Option object.
"""
super(AutoNamespace, self).__init__(description=description, **options)
self._generator = type
def set(self, name, value):
"""Set an option value.
Args:
name (str): The name of the option.
value: The value to set the option to.
Raises:
TypeError: If the value is not a string or appropriate native type.
ValueError: If the value is a string but cannot be coerced.
If the name is not registered a new option will be created using the
option generator.
"""
if name not in self._options:
self.register(name, self._generator())
return self._options[name].__set__(self, value)
def __setattr__(self, name, value):
"""Proxy attribute sets to the 'register' method if needed.
If the value is an option object this call gets proxied to 'register'.
If the value is anything else this method will follow the standard
setattr behaviour unless the target is an option in which case the
method is proxied to 'set'.
"""
if isinstance(value, opt.Option):
return self.register(name, value)
if not hasattr(self, name):
return object.__setattr__(self, name, value)
# The options dictionary may not be set yet if this is getting called
# from the init method. Check for the attribute before accessing it to
# avoid infinite recursion.
if hasattr(self, '_options') and hasattr(self, '_generator'):
return self.set(name, value)
return object.__setattr__(self, name, value)
def __getattr__(self, name):
"""Lookup missing attributes in the options dictionary."""
# PY3 'hasattr' behaviour changed to utilize the 'getattr' which causes
# infinite recursion if it is used before the options dictionary is
# created. Lookup up the attribute directly in the instance dictionary
# here to avoid that scenario.
if (
'_options' not in self.__dict__ or
'_generator' not in self.__dict__
):
raise AttributeError("Attribute {0} does not exist.".format(name))
if name not in self._options:
self._options[name] = self._generator()
return self.get(name)
|
kevinconway/confpy | confpy/loaders/json.py | JsonFile.parsed | python | def parsed(self):
if not self._parsed:
self._parsed = json.loads(self.content)
return self._parsed | Get the JSON dictionary object which represents the content.
This property is cached and only parses the content once. | train | https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/loaders/json.py#L22-L31 | null | class JsonFile(base.ConfigurationFile):
"""Configuration file parser for JSON style files."""
def __init__(self, *args, **kwargs):
super(JsonFile, self).__init__(*args, **kwargs)
self._parsed = None
@property
@property
def namespaces(self):
"""Get an iterable of str representing namespaces within the config."""
return self.parsed.keys()
def items(self, namespace):
"""Get a dictionary of entries under a given namespace."""
return self.parsed.copy().get(namespace, {})
|
kevinconway/confpy | confpy/options/numopt.py | IntegerOption.coerce | python | def coerce(self, value):
"""Convert text values into integer values.
Args:
value (str or int): The value to coerce.
Raises:
TypeError: If the value is not an int or string.
ValueError: If the value is not int or an acceptable value.
Returns:
int: The integer value represented.
"""
if isinstance(value, int) or isinstance(value, compat.long):
return value
return int(value) | Convert text values into integer values.
Args:
value (str or int): The value to coerce.
Raises:
TypeError: If the value is not an int or string.
ValueError: If the value is not int or an acceptable value.
Returns:
int: The integer value represented. | train | https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/options/numopt.py#L16-L33 | null | class IntegerOption(option.Option):
"""An option which represents an integer value."""
def coerce(self, value):
"""Convert text values into integer values.
Args:
value (str or int): The value to coerce.
Raises:
TypeError: If the value is not an int or string.
ValueError: If the value is not int or an acceptable value.
Returns:
int: The integer value represented.
"""
if isinstance(value, int) or isinstance(value, compat.long):
return value
return int(value)
|
kevinconway/confpy | confpy/cmd.py | generate_example | python | def generate_example():
cmd_args = sys.argv[1:]
parser = argparse.ArgumentParser(description='Confpy example generator.')
parser.add_argument(
'--module',
action='append',
help='A python module which should be imported.',
)
parser.add_argument(
'--file',
action='append',
help='A python file which should be evaled.',
)
parser.add_argument(
'--format',
default='JSON',
choices=('JSON', 'INI'),
help='The output format of the configuration file.',
)
args = parser.parse_args(cmd_args)
for module in args.module or ():
__import__(module)
for source_file in args.file or ():
cfg = pyfile.PythonFile(path=source_file).config
cfg = config.Configuration()
print(example.generate_example(cfg, ext=args.format)) | Generate a configuration file example.
This utility will load some number of Python modules which are assumed
to register options with confpy and generate an example configuration file
based on those options. | train | https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/cmd.py#L16-L54 | [
"def generate_example(config, ext='json'):\n \"\"\"Generate an example file based on the given Configuration object.\n\n Args:\n config (confpy.core.configuration.Configuration): The configuration\n object on which to base the example.\n ext (str): The file extension to render. Choice... | """Command line applications for confpy."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import sys
from .core import config
from .loaders import pyfile
from . import example
|
kevinconway/confpy | confpy/loaders/base.py | ConfigurationFile.content | python | def content(self):
if not self._content:
self._content = self._read()
return self._content | Get the file contents.
This property is cached. The file is only read once. | train | https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/loaders/base.py#L35-L44 | [
"def _read(self):\n \"\"\"Open the file and return its contents.\"\"\"\n with open(self.path, 'r') as file_handle:\n\n content = file_handle.read()\n\n # Py27 INI config parser chokes if the content provided is not unicode.\n # All other versions seems to work appropriately. Forcing the value to\... | class ConfigurationFile(object):
"""Base class for configuration file parsers."""
def __init__(self, path, strict=True):
self._path = path
self._content = None
self._strict = strict
@property
def path(self):
"""Get the file path given at initialization."""
return self._path
@property
def abspath(self):
"""Get the absolute path to the file."""
return os.path.abspath(self._path)
@property
@property
def config(self):
"""Get a Configuration object from the file contents."""
conf = config.Configuration()
for namespace in self.namespaces:
if not hasattr(conf, namespace):
if not self._strict:
continue
raise exc.NamespaceNotRegistered(
"The namespace {0} is not registered.".format(namespace)
)
name = getattr(conf, namespace)
for item, value in compat.iteritems(self.items(namespace)):
if not hasattr(name, item):
if not self._strict:
continue
raise exc.OptionNotRegistered(
"The option {0} is not registered.".format(item)
)
setattr(name, item, value)
return conf
@property
def namespaces(self):
"""Get an iterable of str representing namespaces within the config."""
raise NotImplementedError()
def items(self, namespace):
"""Get a dictionary of entries under a given namespace."""
raise NotImplementedError()
def _read(self):
"""Open the file and return its contents."""
with open(self.path, 'r') as file_handle:
content = file_handle.read()
# Py27 INI config parser chokes if the content provided is not unicode.
# All other versions seems to work appropriately. Forcing the value to
# unicode here in order to resolve this issue.
return compat.unicode(content)
|
kevinconway/confpy | confpy/loaders/base.py | ConfigurationFile.config | python | def config(self):
conf = config.Configuration()
for namespace in self.namespaces:
if not hasattr(conf, namespace):
if not self._strict:
continue
raise exc.NamespaceNotRegistered(
"The namespace {0} is not registered.".format(namespace)
)
name = getattr(conf, namespace)
for item, value in compat.iteritems(self.items(namespace)):
if not hasattr(name, item):
if not self._strict:
continue
raise exc.OptionNotRegistered(
"The option {0} is not registered.".format(item)
)
setattr(name, item, value)
return conf | Get a Configuration object from the file contents. | train | https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/loaders/base.py#L47-L78 | [
"def iteritems(dictionary):\n \"\"\"Replacement to account for iteritems/items switch in Py3.\"\"\"\n if hasattr(dictionary, 'iteritems'):\n\n return dictionary.iteritems()\n\n return dictionary.items()\n",
"def items(self, namespace):\n \"\"\"Get a dictionary of entries under a given namespace... | class ConfigurationFile(object):
"""Base class for configuration file parsers."""
def __init__(self, path, strict=True):
self._path = path
self._content = None
self._strict = strict
@property
def path(self):
"""Get the file path given at initialization."""
return self._path
@property
def abspath(self):
"""Get the absolute path to the file."""
return os.path.abspath(self._path)
@property
def content(self):
"""Get the file contents.
This property is cached. The file is only read once.
"""
if not self._content:
self._content = self._read()
return self._content
@property
@property
def namespaces(self):
"""Get an iterable of str representing namespaces within the config."""
raise NotImplementedError()
def items(self, namespace):
"""Get a dictionary of entries under a given namespace."""
raise NotImplementedError()
def _read(self):
"""Open the file and return its contents."""
with open(self.path, 'r') as file_handle:
content = file_handle.read()
# Py27 INI config parser chokes if the content provided is not unicode.
# All other versions seems to work appropriately. Forcing the value to
# unicode here in order to resolve this issue.
return compat.unicode(content)
|
kevinconway/confpy | confpy/loaders/base.py | ConfigurationFile._read | python | def _read(self):
with open(self.path, 'r') as file_handle:
content = file_handle.read()
# Py27 INI config parser chokes if the content provided is not unicode.
# All other versions seems to work appropriately. Forcing the value to
# unicode here in order to resolve this issue.
return compat.unicode(content) | Open the file and return its contents. | train | https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/loaders/base.py#L89-L98 | null | class ConfigurationFile(object):
"""Base class for configuration file parsers."""
def __init__(self, path, strict=True):
self._path = path
self._content = None
self._strict = strict
@property
def path(self):
"""Get the file path given at initialization."""
return self._path
@property
def abspath(self):
"""Get the absolute path to the file."""
return os.path.abspath(self._path)
@property
def content(self):
"""Get the file contents.
This property is cached. The file is only read once.
"""
if not self._content:
self._content = self._read()
return self._content
@property
def config(self):
"""Get a Configuration object from the file contents."""
conf = config.Configuration()
for namespace in self.namespaces:
if not hasattr(conf, namespace):
if not self._strict:
continue
raise exc.NamespaceNotRegistered(
"The namespace {0} is not registered.".format(namespace)
)
name = getattr(conf, namespace)
for item, value in compat.iteritems(self.items(namespace)):
if not hasattr(name, item):
if not self._strict:
continue
raise exc.OptionNotRegistered(
"The option {0} is not registered.".format(item)
)
setattr(name, item, value)
return conf
@property
def namespaces(self):
"""Get an iterable of str representing namespaces within the config."""
raise NotImplementedError()
def items(self, namespace):
"""Get a dictionary of entries under a given namespace."""
raise NotImplementedError()
|
kevinconway/confpy | confpy/core/config.py | Configuration.register | python | def register(self, name, namespace):
if name in self._NAMESPACES:
raise ValueError("Namespace {0} already exists.".format(name))
if not isinstance(namespace, ns.Namespace):
raise TypeError("Namespaces must be of type Namespace.")
self._NAMESPACES[name] = namespace | Register a new namespace with the Configuration object.
Args:
name (str): The name of the section/namespace.
namespace (namespace.Namespace): The Namespace object to store.
Raises:
TypeError: If the namespace is not a Namespace object.
ValueError: If the namespace is already registered. | train | https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/core/config.py#L53-L72 | null | class Configuration(object):
"""A configuration file.
Instances of this class act as a global singleton by sharing a dictionary
of values which is attached to the class. All subclasses will also express
this behaviour. However, if a subclass wishes to maintain a dictionary
separate from this parent it should overwrite the '_NAMESPACES' attribute
with a new class dictionary.
"""
_NAMESPACES = {}
def __init__(self, **namespaces):
"""Initialize a configuration with a series of namespaces.
Args:
**namespaces: Each keyword should be a Namespace object which will
be added to the configuration file.
Raises:
TypeError: If an entry is not a Namespace object.
ValueError: If the namespace is already registered.
"""
super(Configuration, self).__init__()
for key, entry in compat.iteritems(namespaces):
self.register(key, entry)
def get(self, name, default=None):
"""Fetch a namespace from the dictionary.
Args:
name (str): The name of the section/namespace.
default: The value to return if the name is missing.
Returns:
namespace.Namespace: The namespace registered under the given name.
"""
return self._NAMESPACES.get(name, default)
def namespaces(self):
"""Get an iterable of two-tuples containing name and namespace.
The name in this case is the name given at registration time which is
used to identify a namespace and look it up on the object. The
namespace is the actual Namespace object.
"""
return iter(compat.iteritems(self._NAMESPACES))
def __iter__(self):
"""Proxy iter attempts to the 'namespaces' method."""
return self.namespaces()
def __setattr__(self, name, value):
"""Proxy all attribute sets to the 'register' method."""
self.register(name, value)
def __getattr__(self, name):
"""Lookup missing attributes in the _NAMESPACES dictionary."""
attr = self.get(name)
if not attr:
raise AttributeError("Namespace {0} does not exist.".format(name))
return attr
|
kevinconway/confpy | confpy/loaders/pyfile.py | PythonFile.parsed | python | def parsed(self):
if not self._parsed:
self._parsed = compile(self.content, self.path, 'exec')
return self._parsed | Get the code object which represents the compiled Python file.
This property is cached and only parses the content once. | train | https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/loaders/pyfile.py#L26-L35 | null | class PythonFile(base.ConfigurationFile):
"""Configuration file parser for Python files.
Unlike static format configuration files, Python files are expected to
generate side-effects by interacting in some way with the Configuration
singleton.
"""
def __init__(self, *args, **kwargs):
super(PythonFile, self).__init__(*args, **kwargs)
self._parsed = None
@property
@property
def config(self):
"""Get a Configuration object from the file contents."""
exec(self.parsed, {}, None)
return config.Configuration()
@property
def namespaces(self):
"""Get an empty iterable.
An iterable of namespaces cannot be generated from Python files.
"""
return ()
def items(self, namespace):
"""Get an empty iterable.
An iterable of items cannot be generated from Python files.
"""
return ()
|
kevinconway/confpy | confpy/example.py | generate_example | python | def generate_example(config, ext='json'):
template_name = 'example.{0}'.format(ext.lower())
template = ENV.get_template(template_name)
return template.render(config=config) | Generate an example file based on the given Configuration object.
Args:
config (confpy.core.configuration.Configuration): The configuration
object on which to base the example.
ext (str): The file extension to render. Choices: JSON and INI.
Returns:
str: The text of the example file. | train | https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/example.py#L60-L73 | null | """Configuration file example generator."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
try:
import jinja2
except ImportError as exc:
raise exc
except Exception as exc:
if sys.version_info[0] == 3 and sys.version_info[1] == 2:
raise ImportError(
"Example generator cannot be imported in Python 3.2.X."
)
raise exc
ENV = jinja2.Environment(
loader=jinja2.PackageLoader('confpy', 'templates'),
)
def generate_example_ini(config):
"""Generate an INI file based on the given Configuration object.
Args:
config (confpy.core.configuration.Configuration): The configuration
object on which to base the example.
Returns:
str: The text of the example INI file.
"""
return generate_example(config, ext='INI')
def generate_example_json(config):
"""Generate an JSON file based on the given Configuration object.
Args:
config (confpy.core.configuration.Configuration): The configuration
object on which to base the example.
Returns:
str: The text of the example JSON file.
"""
return generate_example(config, ext='JSON')
|
mardix/Yass | yass/cli.py | copy_resource | python | def copy_resource(src, dest):
package_name = "yass"
dest = (dest + "/" + os.path.basename(src)).rstrip("/")
if pkg_resources.resource_isdir(package_name, src):
if not os.path.isdir(dest):
os.makedirs(dest)
for res in pkg_resources.resource_listdir(__name__, src):
copy_resource(src + "/" + res, dest)
else:
if not os.path.isfile(dest) \
and os.path.splitext(src)[1] not in [".pyc"]:
with open(dest, "wb") as f:
f.write(pkg_resources.resource_string(__name__, src))
else:
print("File exists: %s " % dest) | To copy package data to destination | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/cli.py#L61-L78 | [
"def copy_resource(src, dest):\n \"\"\"\n To copy package data to destination\n \"\"\"\n package_name = \"yass\"\n dest = (dest + \"/\" + os.path.basename(src)).rstrip(\"/\")\n if pkg_resources.resource_isdir(package_name, src):\n if not os.path.isdir(dest):\n os.makedirs(dest)\n... |
import os
import sys
import time
import click
import pkg_resources
from livereload import Server, shell
from . import Yass, publisher
from .yass import PAGE_FORMAT
from .__about__ import *
CWD = os.getcwd()
TPL_HEADER = """
---
title: Page Title
description: Page Description
meta:
key: value
---
"""
TPL_BODY = {
# JADE
"jade": """
.row
.col-md-12.text-center
h1
strong.
{{ page.title }}
h3.
Ok Yass!
.row
.col-md-12
""",
# HTML
"html": """
<div class=\"row\">
<div class=\"col-md-12\">
This is Yass!
</div>
</div>
""",
# MD
"md": """
# My markdown Yass!
"""
}
def stamp_yass_current_version(dir):
f = os.path.join(dir, "yass.yml")
if os.path.isfile(f):
with open(f, "r+") as file:
content = file.read()
content = content.replace("##VERSION##", __version__)
file.seek(0)
file.write(content)
file.truncate()
def footer():
print("-" * 80)
def alert(message):
print("::ALERT::")
print(message)
def error(message):
print("::ERROR::")
print(message)
@click.group()
def cli():
"""
Yass: Yet Another Static Site (generator)
"""
pass
@cli.command("version")
def version():
"""Return the vesion of Yass"""
print(__version__)
footer()
@cli.command("build")
def build():
"""Build everything"""
print("Building pages...")
Yass(CWD).build()
print("Done!")
footer()
@cli.command("publish")
@click.argument("endpoint", default="s3")
@click.option("--purge-files", is_flag=True)
@click.option("--rebuild-manifest", is_flag=True)
@click.option("--skip-upload", is_flag=True)
def publish(endpoint, purge_files, rebuild_manifest, skip_upload):
"""Publish the site"""
print("Publishing site to %s ..." % endpoint.upper())
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError("%s endpoint is missing in the config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
print(">>>")
print("Setting S3 site...")
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
print("New bucket created: %s" % p.sitename)
if rebuild_manifest:
print(">>>")
print("Rebuilding site's manifest...")
p.create_manifest_from_s3_files()
if purge_files is True or endpoint.get("purge_files") is True:
print(">>>")
print("Purging files...")
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
if not skip_upload:
print(">>>")
print("Uploading your site...")
p.upload(yass.build_dir)
else:
print(">>>")
print("WARNING: files upload was skipped because of the use of --skip-upload")
print("")
print("Yass! Your site has been successfully published to: ")
print(p.website_endpoint_url)
footer()
@cli.command("setup-dns")
@click.argument("endpoint", default="s3")
def setup_dns(endpoint):
"""Setup site domain to route to static site"""
print("Setting up DNS...")
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError(
"%s endpoint is missing in the hosting config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
print("Setting AWS Route53 for: %s ..." % p.sitename)
p.setup_dns()
print("")
print("Yass! Route53 setup successfully!")
print("You can now visit the site at :")
print(p.sitename_endpoint)
footer()
@cli.command("create-site")
@click.argument("sitename")
def create_site(sitename):
"""Create a new site directory and init Yass"""
sitepath = os.path.join(CWD, sitename)
if os.path.isdir(sitepath):
print("Site directory '%s' exists already!" % sitename)
else:
print("Creating site: %s..." % sitename)
os.makedirs(sitepath)
copy_resource("skel/", sitepath)
stamp_yass_current_version(sitepath)
print("Site created successfully!")
print("CD into '%s' and run 'yass serve' to view the site" % sitename)
footer()
@cli.command("init")
def init():
"""Initialize Yass in the current directory """
yass_conf = os.path.join(CWD, "yass.yml")
if os.path.isfile(yass_conf):
print("::ALERT::")
print("It seems like Yass is already initialized here.")
print("If it's a mistake, delete 'yass.yml' in this directory")
else:
print("Init Yass in %s ..." % CWD)
copy_resource("skel/", CWD)
stamp_yass_current_version(CWD)
print("Yass init successfully!")
print("Run 'yass serve' to view the site")
footer()
@cli.command("create-page")
@click.argument("pagename")
def create_page(pagename):
""" Create a new page Omit the extension, it will create it as .jade file """
page = pagename.lstrip("/").rstrip("/")
_, _ext = os.path.splitext(pagename)
# If the file doesn't have an extension, we'll just create one
if not _ext or _ext == "":
page += ".jade"
if not page.endswith(PAGE_FORMAT):
error("Can't create '%s'" % page)
print("Invalid filename format")
print("Filename must be in: '%s'" % " | ".join(PAGE_FORMAT))
else:
engine = Yass(CWD)
markup = "jade"
if page.endswith(".md"):
markup = "md"
if page.endswith(".html"):
markup = "html"
dest_file = os.path.join(engine.pages_dir, page)
dest_dir = os.path.dirname(dest_file)
content = TPL_HEADER
content += TPL_BODY[markup]
if os.path.isfile(dest_file):
error("File exists already")
print("Location: %s" % dest_file)
else:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
with open(dest_file, "w") as f:
f.write(content)
print("New page created: '%s'" % page)
print("Location: %s" % dest_file)
footer()
@cli.command()
@click.option("-p", "--port", default=None)
@click.option("--no-livereload", default=None)
@click.option("--open-url", default=None)
def serve(port, no_livereload, open_url):
"""Serve the site """
engine = Yass(CWD)
if not port:
port = engine.config.get("local_server.port", 8000)
if no_livereload is None:
no_livereload = True if engine.config.get("local_server.livereload") is False else False
if open_url is None:
open_url = False if engine.config.get("local_server.open_url") is False else True
print("Serving at %s" % port)
print("Livereload is %s" % ("OFF" if no_livereload else "ON"))
def build_static():
engine.build_static()
def build_pages():
engine.build_pages()
engine.build()
server = Server()
if no_livereload is False:
server.watch(engine.static_dir + "/", build_static)
server.watch(engine.pages_dir + "/", build_pages)
server.watch(engine.templates_dir + "/", build_pages)
server.watch(engine.data_dir + "/", build_pages)
server.serve(open_url_delay=open_url, port=port, root=engine.build_dir)
@cli.command("clean")
def clean():
"""Clean the build dir """
print("Cleaning build dir...")
Yass(CWD).clean_build_dir()
print("Done!")
footer()
def cmd():
try:
print("*" * 80)
print("=" * 80)
print("Yass %s!" % __version__)
print("-" * 80)
yass_conf = os.path.join(CWD, "yass.yml")
yass_init = os.path.isfile(yass_conf)
sys_argv = sys.argv
exempt_argv = ["init", "create-site", "version"]
if len(sys_argv) > 1:
if not yass_init and sys_argv[1] not in exempt_argv:
error("Yass is not initialized yet in this directory: %s" % CWD)
print("Run 'yass init' to initialize Yass in the current directory")
footer()
else:
cli()
else:
cli()
except Exception as e:
print("Ohhh noooooo! Something bad happens")
print(">> %s " % e.__repr__())
|
mardix/Yass | yass/cli.py | publish | python | def publish(endpoint, purge_files, rebuild_manifest, skip_upload):
print("Publishing site to %s ..." % endpoint.upper())
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError("%s endpoint is missing in the config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
print(">>>")
print("Setting S3 site...")
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
print("New bucket created: %s" % p.sitename)
if rebuild_manifest:
print(">>>")
print("Rebuilding site's manifest...")
p.create_manifest_from_s3_files()
if purge_files is True or endpoint.get("purge_files") is True:
print(">>>")
print("Purging files...")
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
if not skip_upload:
print(">>>")
print("Uploading your site...")
p.upload(yass.build_dir)
else:
print(">>>")
print("WARNING: files upload was skipped because of the use of --skip-upload")
print("")
print("Yass! Your site has been successfully published to: ")
print(p.website_endpoint_url)
footer() | Publish the site | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/cli.py#L134-L188 | [
"def footer():\n print(\"-\" * 80)\n"
] |
import os
import sys
import time
import click
import pkg_resources
from livereload import Server, shell
from . import Yass, publisher
from .yass import PAGE_FORMAT
from .__about__ import *
CWD = os.getcwd()
TPL_HEADER = """
---
title: Page Title
description: Page Description
meta:
key: value
---
"""
TPL_BODY = {
# JADE
"jade": """
.row
.col-md-12.text-center
h1
strong.
{{ page.title }}
h3.
Ok Yass!
.row
.col-md-12
""",
# HTML
"html": """
<div class=\"row\">
<div class=\"col-md-12\">
This is Yass!
</div>
</div>
""",
# MD
"md": """
# My markdown Yass!
"""
}
def copy_resource(src, dest):
"""
To copy package data to destination
"""
package_name = "yass"
dest = (dest + "/" + os.path.basename(src)).rstrip("/")
if pkg_resources.resource_isdir(package_name, src):
if not os.path.isdir(dest):
os.makedirs(dest)
for res in pkg_resources.resource_listdir(__name__, src):
copy_resource(src + "/" + res, dest)
else:
if not os.path.isfile(dest) \
and os.path.splitext(src)[1] not in [".pyc"]:
with open(dest, "wb") as f:
f.write(pkg_resources.resource_string(__name__, src))
else:
print("File exists: %s " % dest)
def stamp_yass_current_version(dir):
f = os.path.join(dir, "yass.yml")
if os.path.isfile(f):
with open(f, "r+") as file:
content = file.read()
content = content.replace("##VERSION##", __version__)
file.seek(0)
file.write(content)
file.truncate()
def footer():
print("-" * 80)
def alert(message):
print("::ALERT::")
print(message)
def error(message):
print("::ERROR::")
print(message)
@click.group()
def cli():
"""
Yass: Yet Another Static Site (generator)
"""
pass
@cli.command("version")
def version():
"""Return the vesion of Yass"""
print(__version__)
footer()
@cli.command("build")
def build():
"""Build everything"""
print("Building pages...")
Yass(CWD).build()
print("Done!")
footer()
@cli.command("publish")
@click.argument("endpoint", default="s3")
@click.option("--purge-files", is_flag=True)
@click.option("--rebuild-manifest", is_flag=True)
@click.option("--skip-upload", is_flag=True)
@cli.command("setup-dns")
@click.argument("endpoint", default="s3")
def setup_dns(endpoint):
"""Setup site domain to route to static site"""
print("Setting up DNS...")
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError(
"%s endpoint is missing in the hosting config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
print("Setting AWS Route53 for: %s ..." % p.sitename)
p.setup_dns()
print("")
print("Yass! Route53 setup successfully!")
print("You can now visit the site at :")
print(p.sitename_endpoint)
footer()
@cli.command("create-site")
@click.argument("sitename")
def create_site(sitename):
"""Create a new site directory and init Yass"""
sitepath = os.path.join(CWD, sitename)
if os.path.isdir(sitepath):
print("Site directory '%s' exists already!" % sitename)
else:
print("Creating site: %s..." % sitename)
os.makedirs(sitepath)
copy_resource("skel/", sitepath)
stamp_yass_current_version(sitepath)
print("Site created successfully!")
print("CD into '%s' and run 'yass serve' to view the site" % sitename)
footer()
@cli.command("init")
def init():
"""Initialize Yass in the current directory """
yass_conf = os.path.join(CWD, "yass.yml")
if os.path.isfile(yass_conf):
print("::ALERT::")
print("It seems like Yass is already initialized here.")
print("If it's a mistake, delete 'yass.yml' in this directory")
else:
print("Init Yass in %s ..." % CWD)
copy_resource("skel/", CWD)
stamp_yass_current_version(CWD)
print("Yass init successfully!")
print("Run 'yass serve' to view the site")
footer()
@cli.command("create-page")
@click.argument("pagename")
def create_page(pagename):
""" Create a new page Omit the extension, it will create it as .jade file """
page = pagename.lstrip("/").rstrip("/")
_, _ext = os.path.splitext(pagename)
# If the file doesn't have an extension, we'll just create one
if not _ext or _ext == "":
page += ".jade"
if not page.endswith(PAGE_FORMAT):
error("Can't create '%s'" % page)
print("Invalid filename format")
print("Filename must be in: '%s'" % " | ".join(PAGE_FORMAT))
else:
engine = Yass(CWD)
markup = "jade"
if page.endswith(".md"):
markup = "md"
if page.endswith(".html"):
markup = "html"
dest_file = os.path.join(engine.pages_dir, page)
dest_dir = os.path.dirname(dest_file)
content = TPL_HEADER
content += TPL_BODY[markup]
if os.path.isfile(dest_file):
error("File exists already")
print("Location: %s" % dest_file)
else:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
with open(dest_file, "w") as f:
f.write(content)
print("New page created: '%s'" % page)
print("Location: %s" % dest_file)
footer()
@cli.command()
@click.option("-p", "--port", default=None)
@click.option("--no-livereload", default=None)
@click.option("--open-url", default=None)
def serve(port, no_livereload, open_url):
"""Serve the site """
engine = Yass(CWD)
if not port:
port = engine.config.get("local_server.port", 8000)
if no_livereload is None:
no_livereload = True if engine.config.get("local_server.livereload") is False else False
if open_url is None:
open_url = False if engine.config.get("local_server.open_url") is False else True
print("Serving at %s" % port)
print("Livereload is %s" % ("OFF" if no_livereload else "ON"))
def build_static():
engine.build_static()
def build_pages():
engine.build_pages()
engine.build()
server = Server()
if no_livereload is False:
server.watch(engine.static_dir + "/", build_static)
server.watch(engine.pages_dir + "/", build_pages)
server.watch(engine.templates_dir + "/", build_pages)
server.watch(engine.data_dir + "/", build_pages)
server.serve(open_url_delay=open_url, port=port, root=engine.build_dir)
@cli.command("clean")
def clean():
"""Clean the build dir """
print("Cleaning build dir...")
Yass(CWD).clean_build_dir()
print("Done!")
footer()
def cmd():
try:
print("*" * 80)
print("=" * 80)
print("Yass %s!" % __version__)
print("-" * 80)
yass_conf = os.path.join(CWD, "yass.yml")
yass_init = os.path.isfile(yass_conf)
sys_argv = sys.argv
exempt_argv = ["init", "create-site", "version"]
if len(sys_argv) > 1:
if not yass_init and sys_argv[1] not in exempt_argv:
error("Yass is not initialized yet in this directory: %s" % CWD)
print("Run 'yass init' to initialize Yass in the current directory")
footer()
else:
cli()
else:
cli()
except Exception as e:
print("Ohhh noooooo! Something bad happens")
print(">> %s " % e.__repr__())
|
mardix/Yass | yass/cli.py | setup_dns | python | def setup_dns(endpoint):
print("Setting up DNS...")
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError(
"%s endpoint is missing in the hosting config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
print("Setting AWS Route53 for: %s ..." % p.sitename)
p.setup_dns()
print("")
print("Yass! Route53 setup successfully!")
print("You can now visit the site at :")
print(p.sitename_endpoint)
footer() | Setup site domain to route to static site | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/cli.py#L194-L222 | [
"def footer():\n print(\"-\" * 80)\n"
] |
import os
import sys
import time
import click
import pkg_resources
from livereload import Server, shell
from . import Yass, publisher
from .yass import PAGE_FORMAT
from .__about__ import *
CWD = os.getcwd()
TPL_HEADER = """
---
title: Page Title
description: Page Description
meta:
key: value
---
"""
TPL_BODY = {
# JADE
"jade": """
.row
.col-md-12.text-center
h1
strong.
{{ page.title }}
h3.
Ok Yass!
.row
.col-md-12
""",
# HTML
"html": """
<div class=\"row\">
<div class=\"col-md-12\">
This is Yass!
</div>
</div>
""",
# MD
"md": """
# My markdown Yass!
"""
}
def copy_resource(src, dest):
"""
To copy package data to destination
"""
package_name = "yass"
dest = (dest + "/" + os.path.basename(src)).rstrip("/")
if pkg_resources.resource_isdir(package_name, src):
if not os.path.isdir(dest):
os.makedirs(dest)
for res in pkg_resources.resource_listdir(__name__, src):
copy_resource(src + "/" + res, dest)
else:
if not os.path.isfile(dest) \
and os.path.splitext(src)[1] not in [".pyc"]:
with open(dest, "wb") as f:
f.write(pkg_resources.resource_string(__name__, src))
else:
print("File exists: %s " % dest)
def stamp_yass_current_version(dir):
f = os.path.join(dir, "yass.yml")
if os.path.isfile(f):
with open(f, "r+") as file:
content = file.read()
content = content.replace("##VERSION##", __version__)
file.seek(0)
file.write(content)
file.truncate()
def footer():
print("-" * 80)
def alert(message):
print("::ALERT::")
print(message)
def error(message):
print("::ERROR::")
print(message)
@click.group()
def cli():
"""
Yass: Yet Another Static Site (generator)
"""
pass
@cli.command("version")
def version():
"""Return the vesion of Yass"""
print(__version__)
footer()
@cli.command("build")
def build():
"""Build everything"""
print("Building pages...")
Yass(CWD).build()
print("Done!")
footer()
@cli.command("publish")
@click.argument("endpoint", default="s3")
@click.option("--purge-files", is_flag=True)
@click.option("--rebuild-manifest", is_flag=True)
@click.option("--skip-upload", is_flag=True)
def publish(endpoint, purge_files, rebuild_manifest, skip_upload):
"""Publish the site"""
print("Publishing site to %s ..." % endpoint.upper())
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError("%s endpoint is missing in the config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
print(">>>")
print("Setting S3 site...")
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
print("New bucket created: %s" % p.sitename)
if rebuild_manifest:
print(">>>")
print("Rebuilding site's manifest...")
p.create_manifest_from_s3_files()
if purge_files is True or endpoint.get("purge_files") is True:
print(">>>")
print("Purging files...")
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
if not skip_upload:
print(">>>")
print("Uploading your site...")
p.upload(yass.build_dir)
else:
print(">>>")
print("WARNING: files upload was skipped because of the use of --skip-upload")
print("")
print("Yass! Your site has been successfully published to: ")
print(p.website_endpoint_url)
footer()
@cli.command("setup-dns")
@click.argument("endpoint", default="s3")
@cli.command("create-site")
@click.argument("sitename")
def create_site(sitename):
"""Create a new site directory and init Yass"""
sitepath = os.path.join(CWD, sitename)
if os.path.isdir(sitepath):
print("Site directory '%s' exists already!" % sitename)
else:
print("Creating site: %s..." % sitename)
os.makedirs(sitepath)
copy_resource("skel/", sitepath)
stamp_yass_current_version(sitepath)
print("Site created successfully!")
print("CD into '%s' and run 'yass serve' to view the site" % sitename)
footer()
@cli.command("init")
def init():
"""Initialize Yass in the current directory """
yass_conf = os.path.join(CWD, "yass.yml")
if os.path.isfile(yass_conf):
print("::ALERT::")
print("It seems like Yass is already initialized here.")
print("If it's a mistake, delete 'yass.yml' in this directory")
else:
print("Init Yass in %s ..." % CWD)
copy_resource("skel/", CWD)
stamp_yass_current_version(CWD)
print("Yass init successfully!")
print("Run 'yass serve' to view the site")
footer()
@cli.command("create-page")
@click.argument("pagename")
def create_page(pagename):
""" Create a new page Omit the extension, it will create it as .jade file """
page = pagename.lstrip("/").rstrip("/")
_, _ext = os.path.splitext(pagename)
# If the file doesn't have an extension, we'll just create one
if not _ext or _ext == "":
page += ".jade"
if not page.endswith(PAGE_FORMAT):
error("Can't create '%s'" % page)
print("Invalid filename format")
print("Filename must be in: '%s'" % " | ".join(PAGE_FORMAT))
else:
engine = Yass(CWD)
markup = "jade"
if page.endswith(".md"):
markup = "md"
if page.endswith(".html"):
markup = "html"
dest_file = os.path.join(engine.pages_dir, page)
dest_dir = os.path.dirname(dest_file)
content = TPL_HEADER
content += TPL_BODY[markup]
if os.path.isfile(dest_file):
error("File exists already")
print("Location: %s" % dest_file)
else:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
with open(dest_file, "w") as f:
f.write(content)
print("New page created: '%s'" % page)
print("Location: %s" % dest_file)
footer()
@cli.command()
@click.option("-p", "--port", default=None)
@click.option("--no-livereload", default=None)
@click.option("--open-url", default=None)
def serve(port, no_livereload, open_url):
"""Serve the site """
engine = Yass(CWD)
if not port:
port = engine.config.get("local_server.port", 8000)
if no_livereload is None:
no_livereload = True if engine.config.get("local_server.livereload") is False else False
if open_url is None:
open_url = False if engine.config.get("local_server.open_url") is False else True
print("Serving at %s" % port)
print("Livereload is %s" % ("OFF" if no_livereload else "ON"))
def build_static():
engine.build_static()
def build_pages():
engine.build_pages()
engine.build()
server = Server()
if no_livereload is False:
server.watch(engine.static_dir + "/", build_static)
server.watch(engine.pages_dir + "/", build_pages)
server.watch(engine.templates_dir + "/", build_pages)
server.watch(engine.data_dir + "/", build_pages)
server.serve(open_url_delay=open_url, port=port, root=engine.build_dir)
@cli.command("clean")
def clean():
"""Clean the build dir """
print("Cleaning build dir...")
Yass(CWD).clean_build_dir()
print("Done!")
footer()
def cmd():
try:
print("*" * 80)
print("=" * 80)
print("Yass %s!" % __version__)
print("-" * 80)
yass_conf = os.path.join(CWD, "yass.yml")
yass_init = os.path.isfile(yass_conf)
sys_argv = sys.argv
exempt_argv = ["init", "create-site", "version"]
if len(sys_argv) > 1:
if not yass_init and sys_argv[1] not in exempt_argv:
error("Yass is not initialized yet in this directory: %s" % CWD)
print("Run 'yass init' to initialize Yass in the current directory")
footer()
else:
cli()
else:
cli()
except Exception as e:
print("Ohhh noooooo! Something bad happens")
print(">> %s " % e.__repr__())
|
mardix/Yass | yass/cli.py | create_site | python | def create_site(sitename):
sitepath = os.path.join(CWD, sitename)
if os.path.isdir(sitepath):
print("Site directory '%s' exists already!" % sitename)
else:
print("Creating site: %s..." % sitename)
os.makedirs(sitepath)
copy_resource("skel/", sitepath)
stamp_yass_current_version(sitepath)
print("Site created successfully!")
print("CD into '%s' and run 'yass serve' to view the site" % sitename)
footer() | Create a new site directory and init Yass | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/cli.py#L227-L240 | [
"def copy_resource(src, dest):\n \"\"\"\n To copy package data to destination\n \"\"\"\n package_name = \"yass\"\n dest = (dest + \"/\" + os.path.basename(src)).rstrip(\"/\")\n if pkg_resources.resource_isdir(package_name, src):\n if not os.path.isdir(dest):\n os.makedirs(dest)\n... |
import os
import sys
import time
import click
import pkg_resources
from livereload import Server, shell
from . import Yass, publisher
from .yass import PAGE_FORMAT
from .__about__ import *
CWD = os.getcwd()
TPL_HEADER = """
---
title: Page Title
description: Page Description
meta:
key: value
---
"""
TPL_BODY = {
# JADE
"jade": """
.row
.col-md-12.text-center
h1
strong.
{{ page.title }}
h3.
Ok Yass!
.row
.col-md-12
""",
# HTML
"html": """
<div class=\"row\">
<div class=\"col-md-12\">
This is Yass!
</div>
</div>
""",
# MD
"md": """
# My markdown Yass!
"""
}
def copy_resource(src, dest):
"""
To copy package data to destination
"""
package_name = "yass"
dest = (dest + "/" + os.path.basename(src)).rstrip("/")
if pkg_resources.resource_isdir(package_name, src):
if not os.path.isdir(dest):
os.makedirs(dest)
for res in pkg_resources.resource_listdir(__name__, src):
copy_resource(src + "/" + res, dest)
else:
if not os.path.isfile(dest) \
and os.path.splitext(src)[1] not in [".pyc"]:
with open(dest, "wb") as f:
f.write(pkg_resources.resource_string(__name__, src))
else:
print("File exists: %s " % dest)
def stamp_yass_current_version(dir):
f = os.path.join(dir, "yass.yml")
if os.path.isfile(f):
with open(f, "r+") as file:
content = file.read()
content = content.replace("##VERSION##", __version__)
file.seek(0)
file.write(content)
file.truncate()
def footer():
print("-" * 80)
def alert(message):
print("::ALERT::")
print(message)
def error(message):
print("::ERROR::")
print(message)
@click.group()
def cli():
"""
Yass: Yet Another Static Site (generator)
"""
pass
@cli.command("version")
def version():
"""Return the vesion of Yass"""
print(__version__)
footer()
@cli.command("build")
def build():
"""Build everything"""
print("Building pages...")
Yass(CWD).build()
print("Done!")
footer()
@cli.command("publish")
@click.argument("endpoint", default="s3")
@click.option("--purge-files", is_flag=True)
@click.option("--rebuild-manifest", is_flag=True)
@click.option("--skip-upload", is_flag=True)
def publish(endpoint, purge_files, rebuild_manifest, skip_upload):
"""Publish the site"""
print("Publishing site to %s ..." % endpoint.upper())
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError("%s endpoint is missing in the config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
print(">>>")
print("Setting S3 site...")
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
print("New bucket created: %s" % p.sitename)
if rebuild_manifest:
print(">>>")
print("Rebuilding site's manifest...")
p.create_manifest_from_s3_files()
if purge_files is True or endpoint.get("purge_files") is True:
print(">>>")
print("Purging files...")
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
if not skip_upload:
print(">>>")
print("Uploading your site...")
p.upload(yass.build_dir)
else:
print(">>>")
print("WARNING: files upload was skipped because of the use of --skip-upload")
print("")
print("Yass! Your site has been successfully published to: ")
print(p.website_endpoint_url)
footer()
@cli.command("setup-dns")
@click.argument("endpoint", default="s3")
def setup_dns(endpoint):
"""Setup site domain to route to static site"""
print("Setting up DNS...")
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError(
"%s endpoint is missing in the hosting config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
print("Setting AWS Route53 for: %s ..." % p.sitename)
p.setup_dns()
print("")
print("Yass! Route53 setup successfully!")
print("You can now visit the site at :")
print(p.sitename_endpoint)
footer()
@cli.command("create-site")
@click.argument("sitename")
@cli.command("init")
def init():
"""Initialize Yass in the current directory """
yass_conf = os.path.join(CWD, "yass.yml")
if os.path.isfile(yass_conf):
print("::ALERT::")
print("It seems like Yass is already initialized here.")
print("If it's a mistake, delete 'yass.yml' in this directory")
else:
print("Init Yass in %s ..." % CWD)
copy_resource("skel/", CWD)
stamp_yass_current_version(CWD)
print("Yass init successfully!")
print("Run 'yass serve' to view the site")
footer()
@cli.command("create-page")
@click.argument("pagename")
def create_page(pagename):
""" Create a new page Omit the extension, it will create it as .jade file """
page = pagename.lstrip("/").rstrip("/")
_, _ext = os.path.splitext(pagename)
# If the file doesn't have an extension, we'll just create one
if not _ext or _ext == "":
page += ".jade"
if not page.endswith(PAGE_FORMAT):
error("Can't create '%s'" % page)
print("Invalid filename format")
print("Filename must be in: '%s'" % " | ".join(PAGE_FORMAT))
else:
engine = Yass(CWD)
markup = "jade"
if page.endswith(".md"):
markup = "md"
if page.endswith(".html"):
markup = "html"
dest_file = os.path.join(engine.pages_dir, page)
dest_dir = os.path.dirname(dest_file)
content = TPL_HEADER
content += TPL_BODY[markup]
if os.path.isfile(dest_file):
error("File exists already")
print("Location: %s" % dest_file)
else:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
with open(dest_file, "w") as f:
f.write(content)
print("New page created: '%s'" % page)
print("Location: %s" % dest_file)
footer()
@cli.command()
@click.option("-p", "--port", default=None)
@click.option("--no-livereload", default=None)
@click.option("--open-url", default=None)
def serve(port, no_livereload, open_url):
"""Serve the site """
engine = Yass(CWD)
if not port:
port = engine.config.get("local_server.port", 8000)
if no_livereload is None:
no_livereload = True if engine.config.get("local_server.livereload") is False else False
if open_url is None:
open_url = False if engine.config.get("local_server.open_url") is False else True
print("Serving at %s" % port)
print("Livereload is %s" % ("OFF" if no_livereload else "ON"))
def build_static():
engine.build_static()
def build_pages():
engine.build_pages()
engine.build()
server = Server()
if no_livereload is False:
server.watch(engine.static_dir + "/", build_static)
server.watch(engine.pages_dir + "/", build_pages)
server.watch(engine.templates_dir + "/", build_pages)
server.watch(engine.data_dir + "/", build_pages)
server.serve(open_url_delay=open_url, port=port, root=engine.build_dir)
@cli.command("clean")
def clean():
"""Clean the build dir """
print("Cleaning build dir...")
Yass(CWD).clean_build_dir()
print("Done!")
footer()
def cmd():
try:
print("*" * 80)
print("=" * 80)
print("Yass %s!" % __version__)
print("-" * 80)
yass_conf = os.path.join(CWD, "yass.yml")
yass_init = os.path.isfile(yass_conf)
sys_argv = sys.argv
exempt_argv = ["init", "create-site", "version"]
if len(sys_argv) > 1:
if not yass_init and sys_argv[1] not in exempt_argv:
error("Yass is not initialized yet in this directory: %s" % CWD)
print("Run 'yass init' to initialize Yass in the current directory")
footer()
else:
cli()
else:
cli()
except Exception as e:
print("Ohhh noooooo! Something bad happens")
print(">> %s " % e.__repr__())
|
mardix/Yass | yass/cli.py | init | python | def init():
yass_conf = os.path.join(CWD, "yass.yml")
if os.path.isfile(yass_conf):
print("::ALERT::")
print("It seems like Yass is already initialized here.")
print("If it's a mistake, delete 'yass.yml' in this directory")
else:
print("Init Yass in %s ..." % CWD)
copy_resource("skel/", CWD)
stamp_yass_current_version(CWD)
print("Yass init successfully!")
print("Run 'yass serve' to view the site")
footer() | Initialize Yass in the current directory | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/cli.py#L244-L258 | [
"def copy_resource(src, dest):\n \"\"\"\n To copy package data to destination\n \"\"\"\n package_name = \"yass\"\n dest = (dest + \"/\" + os.path.basename(src)).rstrip(\"/\")\n if pkg_resources.resource_isdir(package_name, src):\n if not os.path.isdir(dest):\n os.makedirs(dest)\n... |
import os
import sys
import time
import click
import pkg_resources
from livereload import Server, shell
from . import Yass, publisher
from .yass import PAGE_FORMAT
from .__about__ import *
CWD = os.getcwd()
TPL_HEADER = """
---
title: Page Title
description: Page Description
meta:
key: value
---
"""
TPL_BODY = {
# JADE
"jade": """
.row
.col-md-12.text-center
h1
strong.
{{ page.title }}
h3.
Ok Yass!
.row
.col-md-12
""",
# HTML
"html": """
<div class=\"row\">
<div class=\"col-md-12\">
This is Yass!
</div>
</div>
""",
# MD
"md": """
# My markdown Yass!
"""
}
def copy_resource(src, dest):
"""
To copy package data to destination
"""
package_name = "yass"
dest = (dest + "/" + os.path.basename(src)).rstrip("/")
if pkg_resources.resource_isdir(package_name, src):
if not os.path.isdir(dest):
os.makedirs(dest)
for res in pkg_resources.resource_listdir(__name__, src):
copy_resource(src + "/" + res, dest)
else:
if not os.path.isfile(dest) \
and os.path.splitext(src)[1] not in [".pyc"]:
with open(dest, "wb") as f:
f.write(pkg_resources.resource_string(__name__, src))
else:
print("File exists: %s " % dest)
def stamp_yass_current_version(dir):
f = os.path.join(dir, "yass.yml")
if os.path.isfile(f):
with open(f, "r+") as file:
content = file.read()
content = content.replace("##VERSION##", __version__)
file.seek(0)
file.write(content)
file.truncate()
def footer():
print("-" * 80)
def alert(message):
print("::ALERT::")
print(message)
def error(message):
print("::ERROR::")
print(message)
@click.group()
def cli():
"""
Yass: Yet Another Static Site (generator)
"""
pass
@cli.command("version")
def version():
"""Return the vesion of Yass"""
print(__version__)
footer()
@cli.command("build")
def build():
"""Build everything"""
print("Building pages...")
Yass(CWD).build()
print("Done!")
footer()
@cli.command("publish")
@click.argument("endpoint", default="s3")
@click.option("--purge-files", is_flag=True)
@click.option("--rebuild-manifest", is_flag=True)
@click.option("--skip-upload", is_flag=True)
def publish(endpoint, purge_files, rebuild_manifest, skip_upload):
"""Publish the site"""
print("Publishing site to %s ..." % endpoint.upper())
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError("%s endpoint is missing in the config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
print(">>>")
print("Setting S3 site...")
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
print("New bucket created: %s" % p.sitename)
if rebuild_manifest:
print(">>>")
print("Rebuilding site's manifest...")
p.create_manifest_from_s3_files()
if purge_files is True or endpoint.get("purge_files") is True:
print(">>>")
print("Purging files...")
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
if not skip_upload:
print(">>>")
print("Uploading your site...")
p.upload(yass.build_dir)
else:
print(">>>")
print("WARNING: files upload was skipped because of the use of --skip-upload")
print("")
print("Yass! Your site has been successfully published to: ")
print(p.website_endpoint_url)
footer()
@cli.command("setup-dns")
@click.argument("endpoint", default="s3")
def setup_dns(endpoint):
"""Setup site domain to route to static site"""
print("Setting up DNS...")
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError(
"%s endpoint is missing in the hosting config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
print("Setting AWS Route53 for: %s ..." % p.sitename)
p.setup_dns()
print("")
print("Yass! Route53 setup successfully!")
print("You can now visit the site at :")
print(p.sitename_endpoint)
footer()
@cli.command("create-site")
@click.argument("sitename")
def create_site(sitename):
"""Create a new site directory and init Yass"""
sitepath = os.path.join(CWD, sitename)
if os.path.isdir(sitepath):
print("Site directory '%s' exists already!" % sitename)
else:
print("Creating site: %s..." % sitename)
os.makedirs(sitepath)
copy_resource("skel/", sitepath)
stamp_yass_current_version(sitepath)
print("Site created successfully!")
print("CD into '%s' and run 'yass serve' to view the site" % sitename)
footer()
@cli.command("init")
@cli.command("create-page")
@click.argument("pagename")
def create_page(pagename):
""" Create a new page Omit the extension, it will create it as .jade file """
page = pagename.lstrip("/").rstrip("/")
_, _ext = os.path.splitext(pagename)
# If the file doesn't have an extension, we'll just create one
if not _ext or _ext == "":
page += ".jade"
if not page.endswith(PAGE_FORMAT):
error("Can't create '%s'" % page)
print("Invalid filename format")
print("Filename must be in: '%s'" % " | ".join(PAGE_FORMAT))
else:
engine = Yass(CWD)
markup = "jade"
if page.endswith(".md"):
markup = "md"
if page.endswith(".html"):
markup = "html"
dest_file = os.path.join(engine.pages_dir, page)
dest_dir = os.path.dirname(dest_file)
content = TPL_HEADER
content += TPL_BODY[markup]
if os.path.isfile(dest_file):
error("File exists already")
print("Location: %s" % dest_file)
else:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
with open(dest_file, "w") as f:
f.write(content)
print("New page created: '%s'" % page)
print("Location: %s" % dest_file)
footer()
@cli.command()
@click.option("-p", "--port", default=None)
@click.option("--no-livereload", default=None)
@click.option("--open-url", default=None)
def serve(port, no_livereload, open_url):
"""Serve the site """
engine = Yass(CWD)
if not port:
port = engine.config.get("local_server.port", 8000)
if no_livereload is None:
no_livereload = True if engine.config.get("local_server.livereload") is False else False
if open_url is None:
open_url = False if engine.config.get("local_server.open_url") is False else True
print("Serving at %s" % port)
print("Livereload is %s" % ("OFF" if no_livereload else "ON"))
def build_static():
engine.build_static()
def build_pages():
engine.build_pages()
engine.build()
server = Server()
if no_livereload is False:
server.watch(engine.static_dir + "/", build_static)
server.watch(engine.pages_dir + "/", build_pages)
server.watch(engine.templates_dir + "/", build_pages)
server.watch(engine.data_dir + "/", build_pages)
server.serve(open_url_delay=open_url, port=port, root=engine.build_dir)
@cli.command("clean")
def clean():
"""Clean the build dir """
print("Cleaning build dir...")
Yass(CWD).clean_build_dir()
print("Done!")
footer()
def cmd():
try:
print("*" * 80)
print("=" * 80)
print("Yass %s!" % __version__)
print("-" * 80)
yass_conf = os.path.join(CWD, "yass.yml")
yass_init = os.path.isfile(yass_conf)
sys_argv = sys.argv
exempt_argv = ["init", "create-site", "version"]
if len(sys_argv) > 1:
if not yass_init and sys_argv[1] not in exempt_argv:
error("Yass is not initialized yet in this directory: %s" % CWD)
print("Run 'yass init' to initialize Yass in the current directory")
footer()
else:
cli()
else:
cli()
except Exception as e:
print("Ohhh noooooo! Something bad happens")
print(">> %s " % e.__repr__())
|
mardix/Yass | yass/cli.py | create_page | python | def create_page(pagename):
page = pagename.lstrip("/").rstrip("/")
_, _ext = os.path.splitext(pagename)
# If the file doesn't have an extension, we'll just create one
if not _ext or _ext == "":
page += ".jade"
if not page.endswith(PAGE_FORMAT):
error("Can't create '%s'" % page)
print("Invalid filename format")
print("Filename must be in: '%s'" % " | ".join(PAGE_FORMAT))
else:
engine = Yass(CWD)
markup = "jade"
if page.endswith(".md"):
markup = "md"
if page.endswith(".html"):
markup = "html"
dest_file = os.path.join(engine.pages_dir, page)
dest_dir = os.path.dirname(dest_file)
content = TPL_HEADER
content += TPL_BODY[markup]
if os.path.isfile(dest_file):
error("File exists already")
print("Location: %s" % dest_file)
else:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
with open(dest_file, "w") as f:
f.write(content)
print("New page created: '%s'" % page)
print("Location: %s" % dest_file)
footer() | Create a new page Omit the extension, it will create it as .jade file | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/cli.py#L263-L304 | [
"def error(message):\n print(\"::ERROR::\")\n print(message)\n",
"def footer():\n print(\"-\" * 80)\n"
] |
import os
import sys
import time
import click
import pkg_resources
from livereload import Server, shell
from . import Yass, publisher
from .yass import PAGE_FORMAT
from .__about__ import *
CWD = os.getcwd()
TPL_HEADER = """
---
title: Page Title
description: Page Description
meta:
key: value
---
"""
TPL_BODY = {
# JADE
"jade": """
.row
.col-md-12.text-center
h1
strong.
{{ page.title }}
h3.
Ok Yass!
.row
.col-md-12
""",
# HTML
"html": """
<div class=\"row\">
<div class=\"col-md-12\">
This is Yass!
</div>
</div>
""",
# MD
"md": """
# My markdown Yass!
"""
}
def copy_resource(src, dest):
"""
To copy package data to destination
"""
package_name = "yass"
dest = (dest + "/" + os.path.basename(src)).rstrip("/")
if pkg_resources.resource_isdir(package_name, src):
if not os.path.isdir(dest):
os.makedirs(dest)
for res in pkg_resources.resource_listdir(__name__, src):
copy_resource(src + "/" + res, dest)
else:
if not os.path.isfile(dest) \
and os.path.splitext(src)[1] not in [".pyc"]:
with open(dest, "wb") as f:
f.write(pkg_resources.resource_string(__name__, src))
else:
print("File exists: %s " % dest)
def stamp_yass_current_version(dir):
f = os.path.join(dir, "yass.yml")
if os.path.isfile(f):
with open(f, "r+") as file:
content = file.read()
content = content.replace("##VERSION##", __version__)
file.seek(0)
file.write(content)
file.truncate()
def footer():
print("-" * 80)
def alert(message):
print("::ALERT::")
print(message)
def error(message):
print("::ERROR::")
print(message)
@click.group()
def cli():
"""
Yass: Yet Another Static Site (generator)
"""
pass
@cli.command("version")
def version():
"""Return the vesion of Yass"""
print(__version__)
footer()
@cli.command("build")
def build():
"""Build everything"""
print("Building pages...")
Yass(CWD).build()
print("Done!")
footer()
@cli.command("publish")
@click.argument("endpoint", default="s3")
@click.option("--purge-files", is_flag=True)
@click.option("--rebuild-manifest", is_flag=True)
@click.option("--skip-upload", is_flag=True)
def publish(endpoint, purge_files, rebuild_manifest, skip_upload):
"""Publish the site"""
print("Publishing site to %s ..." % endpoint.upper())
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError("%s endpoint is missing in the config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
print(">>>")
print("Setting S3 site...")
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
print("New bucket created: %s" % p.sitename)
if rebuild_manifest:
print(">>>")
print("Rebuilding site's manifest...")
p.create_manifest_from_s3_files()
if purge_files is True or endpoint.get("purge_files") is True:
print(">>>")
print("Purging files...")
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
if not skip_upload:
print(">>>")
print("Uploading your site...")
p.upload(yass.build_dir)
else:
print(">>>")
print("WARNING: files upload was skipped because of the use of --skip-upload")
print("")
print("Yass! Your site has been successfully published to: ")
print(p.website_endpoint_url)
footer()
@cli.command("setup-dns")
@click.argument("endpoint", default="s3")
def setup_dns(endpoint):
"""Setup site domain to route to static site"""
print("Setting up DNS...")
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError(
"%s endpoint is missing in the hosting config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
print("Setting AWS Route53 for: %s ..." % p.sitename)
p.setup_dns()
print("")
print("Yass! Route53 setup successfully!")
print("You can now visit the site at :")
print(p.sitename_endpoint)
footer()
@cli.command("create-site")
@click.argument("sitename")
def create_site(sitename):
"""Create a new site directory and init Yass"""
sitepath = os.path.join(CWD, sitename)
if os.path.isdir(sitepath):
print("Site directory '%s' exists already!" % sitename)
else:
print("Creating site: %s..." % sitename)
os.makedirs(sitepath)
copy_resource("skel/", sitepath)
stamp_yass_current_version(sitepath)
print("Site created successfully!")
print("CD into '%s' and run 'yass serve' to view the site" % sitename)
footer()
@cli.command("init")
def init():
"""Initialize Yass in the current directory """
yass_conf = os.path.join(CWD, "yass.yml")
if os.path.isfile(yass_conf):
print("::ALERT::")
print("It seems like Yass is already initialized here.")
print("If it's a mistake, delete 'yass.yml' in this directory")
else:
print("Init Yass in %s ..." % CWD)
copy_resource("skel/", CWD)
stamp_yass_current_version(CWD)
print("Yass init successfully!")
print("Run 'yass serve' to view the site")
footer()
@cli.command("create-page")
@click.argument("pagename")
@cli.command()
@click.option("-p", "--port", default=None)
@click.option("--no-livereload", default=None)
@click.option("--open-url", default=None)
def serve(port, no_livereload, open_url):
"""Serve the site """
engine = Yass(CWD)
if not port:
port = engine.config.get("local_server.port", 8000)
if no_livereload is None:
no_livereload = True if engine.config.get("local_server.livereload") is False else False
if open_url is None:
open_url = False if engine.config.get("local_server.open_url") is False else True
print("Serving at %s" % port)
print("Livereload is %s" % ("OFF" if no_livereload else "ON"))
def build_static():
engine.build_static()
def build_pages():
engine.build_pages()
engine.build()
server = Server()
if no_livereload is False:
server.watch(engine.static_dir + "/", build_static)
server.watch(engine.pages_dir + "/", build_pages)
server.watch(engine.templates_dir + "/", build_pages)
server.watch(engine.data_dir + "/", build_pages)
server.serve(open_url_delay=open_url, port=port, root=engine.build_dir)
@cli.command("clean")
def clean():
"""Clean the build dir """
print("Cleaning build dir...")
Yass(CWD).clean_build_dir()
print("Done!")
footer()
def cmd():
try:
print("*" * 80)
print("=" * 80)
print("Yass %s!" % __version__)
print("-" * 80)
yass_conf = os.path.join(CWD, "yass.yml")
yass_init = os.path.isfile(yass_conf)
sys_argv = sys.argv
exempt_argv = ["init", "create-site", "version"]
if len(sys_argv) > 1:
if not yass_init and sys_argv[1] not in exempt_argv:
error("Yass is not initialized yet in this directory: %s" % CWD)
print("Run 'yass init' to initialize Yass in the current directory")
footer()
else:
cli()
else:
cli()
except Exception as e:
print("Ohhh noooooo! Something bad happens")
print(">> %s " % e.__repr__())
|
mardix/Yass | yass/cli.py | serve | python | def serve(port, no_livereload, open_url):
engine = Yass(CWD)
if not port:
port = engine.config.get("local_server.port", 8000)
if no_livereload is None:
no_livereload = True if engine.config.get("local_server.livereload") is False else False
if open_url is None:
open_url = False if engine.config.get("local_server.open_url") is False else True
print("Serving at %s" % port)
print("Livereload is %s" % ("OFF" if no_livereload else "ON"))
def build_static():
engine.build_static()
def build_pages():
engine.build_pages()
engine.build()
server = Server()
if no_livereload is False:
server.watch(engine.static_dir + "/", build_static)
server.watch(engine.pages_dir + "/", build_pages)
server.watch(engine.templates_dir + "/", build_pages)
server.watch(engine.data_dir + "/", build_pages)
server.serve(open_url_delay=open_url, port=port, root=engine.build_dir) | Serve the site | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/cli.py#L310-L339 | [
"def build(self):\n self.clean_build_dir()\n if not os.path.isdir(self.build_dir):\n os.makedirs(self.build_dir)\n self.build_static()\n self.build_pages()\n"
] |
import os
import sys
import time
import click
import pkg_resources
from livereload import Server, shell
from . import Yass, publisher
from .yass import PAGE_FORMAT
from .__about__ import *
CWD = os.getcwd()
TPL_HEADER = """
---
title: Page Title
description: Page Description
meta:
key: value
---
"""
TPL_BODY = {
# JADE
"jade": """
.row
.col-md-12.text-center
h1
strong.
{{ page.title }}
h3.
Ok Yass!
.row
.col-md-12
""",
# HTML
"html": """
<div class=\"row\">
<div class=\"col-md-12\">
This is Yass!
</div>
</div>
""",
# MD
"md": """
# My markdown Yass!
"""
}
def copy_resource(src, dest):
"""
To copy package data to destination
"""
package_name = "yass"
dest = (dest + "/" + os.path.basename(src)).rstrip("/")
if pkg_resources.resource_isdir(package_name, src):
if not os.path.isdir(dest):
os.makedirs(dest)
for res in pkg_resources.resource_listdir(__name__, src):
copy_resource(src + "/" + res, dest)
else:
if not os.path.isfile(dest) \
and os.path.splitext(src)[1] not in [".pyc"]:
with open(dest, "wb") as f:
f.write(pkg_resources.resource_string(__name__, src))
else:
print("File exists: %s " % dest)
def stamp_yass_current_version(dir):
f = os.path.join(dir, "yass.yml")
if os.path.isfile(f):
with open(f, "r+") as file:
content = file.read()
content = content.replace("##VERSION##", __version__)
file.seek(0)
file.write(content)
file.truncate()
def footer():
print("-" * 80)
def alert(message):
print("::ALERT::")
print(message)
def error(message):
print("::ERROR::")
print(message)
@click.group()
def cli():
"""
Yass: Yet Another Static Site (generator)
"""
pass
@cli.command("version")
def version():
"""Return the vesion of Yass"""
print(__version__)
footer()
@cli.command("build")
def build():
"""Build everything"""
print("Building pages...")
Yass(CWD).build()
print("Done!")
footer()
@cli.command("publish")
@click.argument("endpoint", default="s3")
@click.option("--purge-files", is_flag=True)
@click.option("--rebuild-manifest", is_flag=True)
@click.option("--skip-upload", is_flag=True)
def publish(endpoint, purge_files, rebuild_manifest, skip_upload):
"""Publish the site"""
print("Publishing site to %s ..." % endpoint.upper())
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError("%s endpoint is missing in the config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
print(">>>")
print("Setting S3 site...")
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
print("New bucket created: %s" % p.sitename)
if rebuild_manifest:
print(">>>")
print("Rebuilding site's manifest...")
p.create_manifest_from_s3_files()
if purge_files is True or endpoint.get("purge_files") is True:
print(">>>")
print("Purging files...")
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
if not skip_upload:
print(">>>")
print("Uploading your site...")
p.upload(yass.build_dir)
else:
print(">>>")
print("WARNING: files upload was skipped because of the use of --skip-upload")
print("")
print("Yass! Your site has been successfully published to: ")
print(p.website_endpoint_url)
footer()
@cli.command("setup-dns")
@click.argument("endpoint", default="s3")
def setup_dns(endpoint):
"""Setup site domain to route to static site"""
print("Setting up DNS...")
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError(
"%s endpoint is missing in the hosting config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
print("Setting AWS Route53 for: %s ..." % p.sitename)
p.setup_dns()
print("")
print("Yass! Route53 setup successfully!")
print("You can now visit the site at :")
print(p.sitename_endpoint)
footer()
@cli.command("create-site")
@click.argument("sitename")
def create_site(sitename):
"""Create a new site directory and init Yass"""
sitepath = os.path.join(CWD, sitename)
if os.path.isdir(sitepath):
print("Site directory '%s' exists already!" % sitename)
else:
print("Creating site: %s..." % sitename)
os.makedirs(sitepath)
copy_resource("skel/", sitepath)
stamp_yass_current_version(sitepath)
print("Site created successfully!")
print("CD into '%s' and run 'yass serve' to view the site" % sitename)
footer()
@cli.command("init")
def init():
"""Initialize Yass in the current directory """
yass_conf = os.path.join(CWD, "yass.yml")
if os.path.isfile(yass_conf):
print("::ALERT::")
print("It seems like Yass is already initialized here.")
print("If it's a mistake, delete 'yass.yml' in this directory")
else:
print("Init Yass in %s ..." % CWD)
copy_resource("skel/", CWD)
stamp_yass_current_version(CWD)
print("Yass init successfully!")
print("Run 'yass serve' to view the site")
footer()
@cli.command("create-page")
@click.argument("pagename")
def create_page(pagename):
""" Create a new page Omit the extension, it will create it as .jade file """
page = pagename.lstrip("/").rstrip("/")
_, _ext = os.path.splitext(pagename)
# If the file doesn't have an extension, we'll just create one
if not _ext or _ext == "":
page += ".jade"
if not page.endswith(PAGE_FORMAT):
error("Can't create '%s'" % page)
print("Invalid filename format")
print("Filename must be in: '%s'" % " | ".join(PAGE_FORMAT))
else:
engine = Yass(CWD)
markup = "jade"
if page.endswith(".md"):
markup = "md"
if page.endswith(".html"):
markup = "html"
dest_file = os.path.join(engine.pages_dir, page)
dest_dir = os.path.dirname(dest_file)
content = TPL_HEADER
content += TPL_BODY[markup]
if os.path.isfile(dest_file):
error("File exists already")
print("Location: %s" % dest_file)
else:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
with open(dest_file, "w") as f:
f.write(content)
print("New page created: '%s'" % page)
print("Location: %s" % dest_file)
footer()
@cli.command()
@click.option("-p", "--port", default=None)
@click.option("--no-livereload", default=None)
@click.option("--open-url", default=None)
@cli.command("clean")
def clean():
"""Clean the build dir """
print("Cleaning build dir...")
Yass(CWD).clean_build_dir()
print("Done!")
footer()
def cmd():
try:
print("*" * 80)
print("=" * 80)
print("Yass %s!" % __version__)
print("-" * 80)
yass_conf = os.path.join(CWD, "yass.yml")
yass_init = os.path.isfile(yass_conf)
sys_argv = sys.argv
exempt_argv = ["init", "create-site", "version"]
if len(sys_argv) > 1:
if not yass_init and sys_argv[1] not in exempt_argv:
error("Yass is not initialized yet in this directory: %s" % CWD)
print("Run 'yass init' to initialize Yass in the current directory")
footer()
else:
cli()
else:
cli()
except Exception as e:
print("Ohhh noooooo! Something bad happens")
print(">> %s " % e.__repr__())
|
mardix/Yass | yass/utils.py | load_conf | python | def load_conf(yml_file, conf={}):
with open(yml_file) as f:
data = yaml.load(f)
if conf:
data.update(conf)
return dictdot(data) | To load the config
:param yml_file: the config file path
:param conf: dict, to override global config
:return: dict | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/utils.py#L58-L69 | null |
import os
import re
import yaml
import mimetypes
MIMETYPE_MAP = {
'.js': 'application/javascript',
'.mov': 'video/quicktime',
'.mp4': 'video/mp4',
'.m4v': 'video/x-m4v',
'.3gp': 'video/3gpp',
'.woff': 'application/font-woff',
'.woff2': 'font/woff2',
'.eot': 'application/vnd.ms-fontobject',
'.ttf': 'application/x-font-truetype',
'.otf': 'application/x-font-opentype',
'.svg': 'image/svg+xml',
}
MIMETYPE_DEFAULT = 'application/octet-stream'
def get_mimetype(filename):
mimetype, _ = mimetypes.guess_type(filename)
if mimetype:
return mimetype
base, ext = os.path.splitext(filename)
ext = ext.lower()
if ext in MIMETYPE_MAP:
return MIMETYPE_MAP[ext]
return MIMETYPE_DEFAULT
class dictdot(dict):
"""
A dict extension that allows dot notation to access the data.
ie: dict.get('key.key2.0.keyx'). Still can use dict[key1][k2]
To create: dictdot(my)
"""
def get(self, key, default=None):
""" access data via dot notation """
try:
val = self
if "." not in key:
return self[key]
for k in key.split('.'):
if k.isdigit():
k = int(k)
val = val[k]
return val
except (TypeError, KeyError, IndexError) as e:
return default
def extract_sitename(s):
return re.sub(r"https?://(www\.)?", '', s).replace("www.", "")
def chunk_list(items, size):
"""
Return a list of chunks
:param items: List
:param size: int The number of items per chunk
:return: List
"""
size = max(1, size)
return [items[i:i + size] for i in range(0, len(items), size)]
#---
|
mardix/Yass | yass/publisher.py | S3Website.head_bucket | python | def head_bucket(self, name):
try:
self.s3.head_bucket(Bucket=name)
info = self.s3.get_bucket_website(Bucket=self.sitename)
if not info:
return False, 404, "Configure improrperly"
return True, None, None
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] in ["403", "404"]:
return False, e.response["Error"]["Code"], e.response["Error"]["Message"]
else:
raise e | Check if a bucket exists
:param name:
:return: | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/publisher.py#L123-L139 | null | class S3Website(object):
"""
To manage S3 website and domain on Route53
"""
S3_HOSTED_ZONE_IDS = {
'us-east-1': 'Z3AQBSTGFYJSTF',
'us-west-1': 'Z2F56UZL2M1ACD',
'us-west-2': 'Z3BJ6K6RIION7M',
'ap-south-1': 'Z11RGJOFQNVJUP',
'ap-northeast-1': 'Z2M4EHUR26P7ZW',
'ap-northeast-2': 'Z3W03O7B5YMIYP',
'ap-southeast-1': 'Z3O0J2DXBE1FTB',
'ap-southeast-2': 'Z1WCIGYICN2BYD',
'eu-central-1': 'Z21DNDUVLTQW6Q',
'eu-west-1': 'Z1BKCTXD74EZPE',
'sa-east-1': 'Z7KQH4QJS55SO',
'us-gov-west-1': 'Z31GFT0UA1I2HV',
}
manifest_file = ".yass-manifest"
def __init__(self,
sitename,
region="us-east-1",
aws_access_key_id=None,
aws_secret_access_key=None):
"""
:param sitename: the website name to create, without WWW.
:param region: the region of the site
:param access_key_id: AWS
:param secret_access_key: AWS
:param setup_dns: bool - If True it will create route53
:param allow_www: Bool - If true, it will create a second bucket with www.
"""
# This will be used to pass to concurrent upload
self.aws_params = {
"aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key,
"region_name": region
}
self.region = region
self.s3 = boto3.client('s3', **self.aws_params)
self.sitename = sitename
self.www_sitename = "www." + self.sitename
self.website_endpoint = "%s.s3-website-%s.amazonaws.com" % (self.sitename, region)
self.website_endpoint_url = "http://" + self.website_endpoint
self.sitename_endpoint = "http://" + self.sitename
exists, error_code, error_message = self.head_bucket(self.sitename)
self.website_exists = exists
def setup_dns(self):
route53 = boto3.client('route53', **self.aws_params)
hosted_zone_id = self._get_route53_hosted_zone_by_domain(self.sitename)
if not hosted_zone_id:
caller_reference_uuid = "%s" % (uuid.uuid4())
response = route53.create_hosted_zone(
Name=self.sitename,
CallerReference=caller_reference_uuid,
HostedZoneConfig={'Comment': "HostedZone created by YASS!", 'PrivateZone': False})
hosted_zone_id = response['HostedZone']['Id']
website_dns_name = "s3-website-%s.amazonaws.com" % self.region
redirect_dns_name = "s3-website-%s.amazonaws.com" % self.region
change_batch_payload = {
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': self.sitename,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': self.S3_HOSTED_ZONE_IDS[self.region],
'DNSName': website_dns_name,
'EvaluateTargetHealth': False
}
}
},
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': self.www_sitename,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': self.S3_HOSTED_ZONE_IDS[self.region],
'DNSName': redirect_dns_name,
'EvaluateTargetHealth': False
}
}
}
]
}
response = route53.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch_payload)
return True if response and "ChangeInfo" in response else False
def _get_route53_hosted_zone_by_domain(self, domain):
route53 = boto3.client('route53', **self.aws_params)
hosted_zone = route53.list_hosted_zones()
if hosted_zone or "HostedZones" in hosted_zone:
for hz in hosted_zone["HostedZones"]:
if hz["Name"].rstrip(".") == domain:
return hz["Id"]
return None
def create_website(self):
exists, error_code, error_message = self.head_bucket(self.sitename)
if not exists:
if error_code == "404":
# Allow read access
policy_payload = {
"Version": "2012-10-17",
"Statement": [{
"Sid": "Allow Public Access to All Objects",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::%s/*" % (self.sitename)
}
]
}
# Make bucket website and add index.html and error.html
website_payload = {
'ErrorDocument': {
'Key': 'error.html'
},
'IndexDocument': {
'Suffix': 'index.html'
}
}
self.s3.create_bucket(Bucket=self.sitename)
self.s3.put_bucket_policy(Bucket=self.sitename,
Policy=json.dumps(policy_payload))
self.s3.put_bucket_website(Bucket=self.sitename,
WebsiteConfiguration=website_payload)
return True
else:
raise Exception("Can't create website's bucket '%s' on AWS S3. "
"Error: %s" % (self.sitename, error_message))
return False
def create_www_website(self):
exists, error_code, error_message = self.head_bucket(self.sitename)
if not exists:
if error_code == "404":
self.s3.create_bucket(Bucket=self.www_sitename)
redirect_payload = {
'RedirectAllRequestsTo': {
'HostName': self.sitename,
'Protocol': 'http'
}
}
bucket_website_redirect = self.s3.BucketWebsite(self.www_sitename)
bucket_website_redirect.put(WebsiteConfiguration=redirect_payload)
return True
else:
raise Exception("Can't create website's bucket '%s' on AWS S3. "
"Error: %s" % (self.www_sitename, error_message))
return False
def upload(self, build_dir):
"""
:param build_dir: The directory to upload
:param save_manifest: bool: To save manifest file
:param purge: bool : To delete previously uploaded files
:return:
"""
files_list = []
for root, dirs, files in os.walk(build_dir):
for filename in files:
local_path = os.path.join(root, filename)
s3_path = os.path.relpath(local_path, build_dir)
mimetype = utils.get_mimetype(local_path)
kwargs = dict(aws_params=self.aws_params,
bucket_name=self.sitename,
local_path=local_path,
s3_path=s3_path,
mimetype=mimetype)
files_list.append(s3_path)
threading.Thread(target=self._upload_file, kwargs=kwargs)\
.start()
# Save the files that have been uploaded
self._set_manifest_data(files_list)
def purge_files(self, exclude_files=["index.html", "error.html"]):
"""
To delete files that are in the manifest
:param excludes_files: list : files to not delete
:return:
"""
for chunk in utils.chunk_list(self._get_manifest_data(), 1000):
try:
self.s3.delete_objects(
Bucket=self.sitename,
Delete={
'Objects': [{"Key": f} for f in chunk
if f not in exclude_files]
}
)
except Exception as ex:
pass
def create_manifest_from_s3_files(self):
"""
To create a manifest db for the current
:return:
"""
for k in self.s3.list_objects(Bucket=self.sitename)['Contents']:
key = k["Key"]
files = []
if key not in [self.manifest_file]:
files.append(key)
self._set_manifest_data(files)
def _set_manifest_data(self, files_list):
"""
Write manifest files
:param files_list: list
:return:
"""
if files_list:
data = ",".join(files_list)
self.s3.put_object(Bucket=self.sitename,
Key=self.manifest_file,
Body=data,
ACL='private')
def _get_manifest_data(self):
"""
Return the list of items in the manifest
:return: list
"""
with tempfile.NamedTemporaryFile(delete=True) as tmp:
try:
self.s3.download_fileobj(self.sitename, self.manifest_file, tmp)
tmp.seek(0)
data = tmp.read()
if data is not None:
return data.split(",")
except Exception as ex:
pass
return []
@staticmethod
def _upload_file(aws_params, bucket_name, local_path, s3_path, mimetype):
s3 = boto3.client("s3", **aws_params)
s3.upload_file(local_path,
Bucket=bucket_name,
Key=s3_path,
ExtraArgs={"ContentType": mimetype})
|
mardix/Yass | yass/publisher.py | S3Website.purge_files | python | def purge_files(self, exclude_files=["index.html", "error.html"]):
for chunk in utils.chunk_list(self._get_manifest_data(), 1000):
try:
self.s3.delete_objects(
Bucket=self.sitename,
Delete={
'Objects': [{"Key": f} for f in chunk
if f not in exclude_files]
}
)
except Exception as ex:
pass | To delete files that are in the manifest
:param excludes_files: list : files to not delete
:return: | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/publisher.py#L224-L240 | [
"def chunk_list(items, size):\n \"\"\"\n Return a list of chunks\n :param items: List\n :param size: int The number of items per chunk\n :return: List\n \"\"\"\n size = max(1, size)\n return [items[i:i + size] for i in range(0, len(items), size)]\n",
"def _get_manifest_data(self):\n \"\... | class S3Website(object):
"""
To manage S3 website and domain on Route53
"""
S3_HOSTED_ZONE_IDS = {
'us-east-1': 'Z3AQBSTGFYJSTF',
'us-west-1': 'Z2F56UZL2M1ACD',
'us-west-2': 'Z3BJ6K6RIION7M',
'ap-south-1': 'Z11RGJOFQNVJUP',
'ap-northeast-1': 'Z2M4EHUR26P7ZW',
'ap-northeast-2': 'Z3W03O7B5YMIYP',
'ap-southeast-1': 'Z3O0J2DXBE1FTB',
'ap-southeast-2': 'Z1WCIGYICN2BYD',
'eu-central-1': 'Z21DNDUVLTQW6Q',
'eu-west-1': 'Z1BKCTXD74EZPE',
'sa-east-1': 'Z7KQH4QJS55SO',
'us-gov-west-1': 'Z31GFT0UA1I2HV',
}
manifest_file = ".yass-manifest"
def __init__(self,
sitename,
region="us-east-1",
aws_access_key_id=None,
aws_secret_access_key=None):
"""
:param sitename: the website name to create, without WWW.
:param region: the region of the site
:param access_key_id: AWS
:param secret_access_key: AWS
:param setup_dns: bool - If True it will create route53
:param allow_www: Bool - If true, it will create a second bucket with www.
"""
# This will be used to pass to concurrent upload
self.aws_params = {
"aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key,
"region_name": region
}
self.region = region
self.s3 = boto3.client('s3', **self.aws_params)
self.sitename = sitename
self.www_sitename = "www." + self.sitename
self.website_endpoint = "%s.s3-website-%s.amazonaws.com" % (self.sitename, region)
self.website_endpoint_url = "http://" + self.website_endpoint
self.sitename_endpoint = "http://" + self.sitename
exists, error_code, error_message = self.head_bucket(self.sitename)
self.website_exists = exists
def setup_dns(self):
route53 = boto3.client('route53', **self.aws_params)
hosted_zone_id = self._get_route53_hosted_zone_by_domain(self.sitename)
if not hosted_zone_id:
caller_reference_uuid = "%s" % (uuid.uuid4())
response = route53.create_hosted_zone(
Name=self.sitename,
CallerReference=caller_reference_uuid,
HostedZoneConfig={'Comment': "HostedZone created by YASS!", 'PrivateZone': False})
hosted_zone_id = response['HostedZone']['Id']
website_dns_name = "s3-website-%s.amazonaws.com" % self.region
redirect_dns_name = "s3-website-%s.amazonaws.com" % self.region
change_batch_payload = {
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': self.sitename,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': self.S3_HOSTED_ZONE_IDS[self.region],
'DNSName': website_dns_name,
'EvaluateTargetHealth': False
}
}
},
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': self.www_sitename,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': self.S3_HOSTED_ZONE_IDS[self.region],
'DNSName': redirect_dns_name,
'EvaluateTargetHealth': False
}
}
}
]
}
response = route53.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch_payload)
return True if response and "ChangeInfo" in response else False
def _get_route53_hosted_zone_by_domain(self, domain):
route53 = boto3.client('route53', **self.aws_params)
hosted_zone = route53.list_hosted_zones()
if hosted_zone or "HostedZones" in hosted_zone:
for hz in hosted_zone["HostedZones"]:
if hz["Name"].rstrip(".") == domain:
return hz["Id"]
return None
def head_bucket(self, name):
"""
Check if a bucket exists
:param name:
:return:
"""
try:
self.s3.head_bucket(Bucket=name)
info = self.s3.get_bucket_website(Bucket=self.sitename)
if not info:
return False, 404, "Configure improrperly"
return True, None, None
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] in ["403", "404"]:
return False, e.response["Error"]["Code"], e.response["Error"]["Message"]
else:
raise e
def create_website(self):
exists, error_code, error_message = self.head_bucket(self.sitename)
if not exists:
if error_code == "404":
# Allow read access
policy_payload = {
"Version": "2012-10-17",
"Statement": [{
"Sid": "Allow Public Access to All Objects",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::%s/*" % (self.sitename)
}
]
}
# Make bucket website and add index.html and error.html
website_payload = {
'ErrorDocument': {
'Key': 'error.html'
},
'IndexDocument': {
'Suffix': 'index.html'
}
}
self.s3.create_bucket(Bucket=self.sitename)
self.s3.put_bucket_policy(Bucket=self.sitename,
Policy=json.dumps(policy_payload))
self.s3.put_bucket_website(Bucket=self.sitename,
WebsiteConfiguration=website_payload)
return True
else:
raise Exception("Can't create website's bucket '%s' on AWS S3. "
"Error: %s" % (self.sitename, error_message))
return False
def create_www_website(self):
exists, error_code, error_message = self.head_bucket(self.sitename)
if not exists:
if error_code == "404":
self.s3.create_bucket(Bucket=self.www_sitename)
redirect_payload = {
'RedirectAllRequestsTo': {
'HostName': self.sitename,
'Protocol': 'http'
}
}
bucket_website_redirect = self.s3.BucketWebsite(self.www_sitename)
bucket_website_redirect.put(WebsiteConfiguration=redirect_payload)
return True
else:
raise Exception("Can't create website's bucket '%s' on AWS S3. "
"Error: %s" % (self.www_sitename, error_message))
return False
def upload(self, build_dir):
"""
:param build_dir: The directory to upload
:param save_manifest: bool: To save manifest file
:param purge: bool : To delete previously uploaded files
:return:
"""
files_list = []
for root, dirs, files in os.walk(build_dir):
for filename in files:
local_path = os.path.join(root, filename)
s3_path = os.path.relpath(local_path, build_dir)
mimetype = utils.get_mimetype(local_path)
kwargs = dict(aws_params=self.aws_params,
bucket_name=self.sitename,
local_path=local_path,
s3_path=s3_path,
mimetype=mimetype)
files_list.append(s3_path)
threading.Thread(target=self._upload_file, kwargs=kwargs)\
.start()
# Save the files that have been uploaded
self._set_manifest_data(files_list)
def create_manifest_from_s3_files(self):
"""
To create a manifest db for the current
:return:
"""
for k in self.s3.list_objects(Bucket=self.sitename)['Contents']:
key = k["Key"]
files = []
if key not in [self.manifest_file]:
files.append(key)
self._set_manifest_data(files)
def _set_manifest_data(self, files_list):
"""
Write manifest files
:param files_list: list
:return:
"""
if files_list:
data = ",".join(files_list)
self.s3.put_object(Bucket=self.sitename,
Key=self.manifest_file,
Body=data,
ACL='private')
def _get_manifest_data(self):
"""
Return the list of items in the manifest
:return: list
"""
with tempfile.NamedTemporaryFile(delete=True) as tmp:
try:
self.s3.download_fileobj(self.sitename, self.manifest_file, tmp)
tmp.seek(0)
data = tmp.read()
if data is not None:
return data.split(",")
except Exception as ex:
pass
return []
@staticmethod
def _upload_file(aws_params, bucket_name, local_path, s3_path, mimetype):
s3 = boto3.client("s3", **aws_params)
s3.upload_file(local_path,
Bucket=bucket_name,
Key=s3_path,
ExtraArgs={"ContentType": mimetype})
|
mardix/Yass | yass/publisher.py | S3Website.create_manifest_from_s3_files | python | def create_manifest_from_s3_files(self):
for k in self.s3.list_objects(Bucket=self.sitename)['Contents']:
key = k["Key"]
files = []
if key not in [self.manifest_file]:
files.append(key)
self._set_manifest_data(files) | To create a manifest db for the current
:return: | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/publisher.py#L242-L252 | [
"def _set_manifest_data(self, files_list):\n \"\"\"\n Write manifest files\n :param files_list: list\n :return:\n \"\"\"\n if files_list:\n data = \",\".join(files_list)\n self.s3.put_object(Bucket=self.sitename,\n Key=self.manifest_file,\n ... | class S3Website(object):
"""
To manage S3 website and domain on Route53
"""
S3_HOSTED_ZONE_IDS = {
'us-east-1': 'Z3AQBSTGFYJSTF',
'us-west-1': 'Z2F56UZL2M1ACD',
'us-west-2': 'Z3BJ6K6RIION7M',
'ap-south-1': 'Z11RGJOFQNVJUP',
'ap-northeast-1': 'Z2M4EHUR26P7ZW',
'ap-northeast-2': 'Z3W03O7B5YMIYP',
'ap-southeast-1': 'Z3O0J2DXBE1FTB',
'ap-southeast-2': 'Z1WCIGYICN2BYD',
'eu-central-1': 'Z21DNDUVLTQW6Q',
'eu-west-1': 'Z1BKCTXD74EZPE',
'sa-east-1': 'Z7KQH4QJS55SO',
'us-gov-west-1': 'Z31GFT0UA1I2HV',
}
manifest_file = ".yass-manifest"
def __init__(self,
sitename,
region="us-east-1",
aws_access_key_id=None,
aws_secret_access_key=None):
"""
:param sitename: the website name to create, without WWW.
:param region: the region of the site
:param access_key_id: AWS
:param secret_access_key: AWS
:param setup_dns: bool - If True it will create route53
:param allow_www: Bool - If true, it will create a second bucket with www.
"""
# This will be used to pass to concurrent upload
self.aws_params = {
"aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key,
"region_name": region
}
self.region = region
self.s3 = boto3.client('s3', **self.aws_params)
self.sitename = sitename
self.www_sitename = "www." + self.sitename
self.website_endpoint = "%s.s3-website-%s.amazonaws.com" % (self.sitename, region)
self.website_endpoint_url = "http://" + self.website_endpoint
self.sitename_endpoint = "http://" + self.sitename
exists, error_code, error_message = self.head_bucket(self.sitename)
self.website_exists = exists
def setup_dns(self):
route53 = boto3.client('route53', **self.aws_params)
hosted_zone_id = self._get_route53_hosted_zone_by_domain(self.sitename)
if not hosted_zone_id:
caller_reference_uuid = "%s" % (uuid.uuid4())
response = route53.create_hosted_zone(
Name=self.sitename,
CallerReference=caller_reference_uuid,
HostedZoneConfig={'Comment': "HostedZone created by YASS!", 'PrivateZone': False})
hosted_zone_id = response['HostedZone']['Id']
website_dns_name = "s3-website-%s.amazonaws.com" % self.region
redirect_dns_name = "s3-website-%s.amazonaws.com" % self.region
change_batch_payload = {
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': self.sitename,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': self.S3_HOSTED_ZONE_IDS[self.region],
'DNSName': website_dns_name,
'EvaluateTargetHealth': False
}
}
},
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': self.www_sitename,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': self.S3_HOSTED_ZONE_IDS[self.region],
'DNSName': redirect_dns_name,
'EvaluateTargetHealth': False
}
}
}
]
}
response = route53.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch_payload)
return True if response and "ChangeInfo" in response else False
def _get_route53_hosted_zone_by_domain(self, domain):
route53 = boto3.client('route53', **self.aws_params)
hosted_zone = route53.list_hosted_zones()
if hosted_zone or "HostedZones" in hosted_zone:
for hz in hosted_zone["HostedZones"]:
if hz["Name"].rstrip(".") == domain:
return hz["Id"]
return None
def head_bucket(self, name):
"""
Check if a bucket exists
:param name:
:return:
"""
try:
self.s3.head_bucket(Bucket=name)
info = self.s3.get_bucket_website(Bucket=self.sitename)
if not info:
return False, 404, "Configure improrperly"
return True, None, None
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] in ["403", "404"]:
return False, e.response["Error"]["Code"], e.response["Error"]["Message"]
else:
raise e
def create_website(self):
exists, error_code, error_message = self.head_bucket(self.sitename)
if not exists:
if error_code == "404":
# Allow read access
policy_payload = {
"Version": "2012-10-17",
"Statement": [{
"Sid": "Allow Public Access to All Objects",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::%s/*" % (self.sitename)
}
]
}
# Make bucket website and add index.html and error.html
website_payload = {
'ErrorDocument': {
'Key': 'error.html'
},
'IndexDocument': {
'Suffix': 'index.html'
}
}
self.s3.create_bucket(Bucket=self.sitename)
self.s3.put_bucket_policy(Bucket=self.sitename,
Policy=json.dumps(policy_payload))
self.s3.put_bucket_website(Bucket=self.sitename,
WebsiteConfiguration=website_payload)
return True
else:
raise Exception("Can't create website's bucket '%s' on AWS S3. "
"Error: %s" % (self.sitename, error_message))
return False
def create_www_website(self):
exists, error_code, error_message = self.head_bucket(self.sitename)
if not exists:
if error_code == "404":
self.s3.create_bucket(Bucket=self.www_sitename)
redirect_payload = {
'RedirectAllRequestsTo': {
'HostName': self.sitename,
'Protocol': 'http'
}
}
bucket_website_redirect = self.s3.BucketWebsite(self.www_sitename)
bucket_website_redirect.put(WebsiteConfiguration=redirect_payload)
return True
else:
raise Exception("Can't create website's bucket '%s' on AWS S3. "
"Error: %s" % (self.www_sitename, error_message))
return False
def upload(self, build_dir):
"""
:param build_dir: The directory to upload
:param save_manifest: bool: To save manifest file
:param purge: bool : To delete previously uploaded files
:return:
"""
files_list = []
for root, dirs, files in os.walk(build_dir):
for filename in files:
local_path = os.path.join(root, filename)
s3_path = os.path.relpath(local_path, build_dir)
mimetype = utils.get_mimetype(local_path)
kwargs = dict(aws_params=self.aws_params,
bucket_name=self.sitename,
local_path=local_path,
s3_path=s3_path,
mimetype=mimetype)
files_list.append(s3_path)
threading.Thread(target=self._upload_file, kwargs=kwargs)\
.start()
# Save the files that have been uploaded
self._set_manifest_data(files_list)
def purge_files(self, exclude_files=["index.html", "error.html"]):
"""
To delete files that are in the manifest
:param excludes_files: list : files to not delete
:return:
"""
for chunk in utils.chunk_list(self._get_manifest_data(), 1000):
try:
self.s3.delete_objects(
Bucket=self.sitename,
Delete={
'Objects': [{"Key": f} for f in chunk
if f not in exclude_files]
}
)
except Exception as ex:
pass
def _set_manifest_data(self, files_list):
"""
Write manifest files
:param files_list: list
:return:
"""
if files_list:
data = ",".join(files_list)
self.s3.put_object(Bucket=self.sitename,
Key=self.manifest_file,
Body=data,
ACL='private')
def _get_manifest_data(self):
"""
Return the list of items in the manifest
:return: list
"""
with tempfile.NamedTemporaryFile(delete=True) as tmp:
try:
self.s3.download_fileobj(self.sitename, self.manifest_file, tmp)
tmp.seek(0)
data = tmp.read()
if data is not None:
return data.split(",")
except Exception as ex:
pass
return []
@staticmethod
def _upload_file(aws_params, bucket_name, local_path, s3_path, mimetype):
s3 = boto3.client("s3", **aws_params)
s3.upload_file(local_path,
Bucket=bucket_name,
Key=s3_path,
ExtraArgs={"ContentType": mimetype})
|
mardix/Yass | yass/publisher.py | S3Website._set_manifest_data | python | def _set_manifest_data(self, files_list):
if files_list:
data = ",".join(files_list)
self.s3.put_object(Bucket=self.sitename,
Key=self.manifest_file,
Body=data,
ACL='private') | Write manifest files
:param files_list: list
:return: | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/publisher.py#L254-L265 | null | class S3Website(object):
"""
To manage S3 website and domain on Route53
"""
S3_HOSTED_ZONE_IDS = {
'us-east-1': 'Z3AQBSTGFYJSTF',
'us-west-1': 'Z2F56UZL2M1ACD',
'us-west-2': 'Z3BJ6K6RIION7M',
'ap-south-1': 'Z11RGJOFQNVJUP',
'ap-northeast-1': 'Z2M4EHUR26P7ZW',
'ap-northeast-2': 'Z3W03O7B5YMIYP',
'ap-southeast-1': 'Z3O0J2DXBE1FTB',
'ap-southeast-2': 'Z1WCIGYICN2BYD',
'eu-central-1': 'Z21DNDUVLTQW6Q',
'eu-west-1': 'Z1BKCTXD74EZPE',
'sa-east-1': 'Z7KQH4QJS55SO',
'us-gov-west-1': 'Z31GFT0UA1I2HV',
}
manifest_file = ".yass-manifest"
def __init__(self,
sitename,
region="us-east-1",
aws_access_key_id=None,
aws_secret_access_key=None):
"""
:param sitename: the website name to create, without WWW.
:param region: the region of the site
:param access_key_id: AWS
:param secret_access_key: AWS
:param setup_dns: bool - If True it will create route53
:param allow_www: Bool - If true, it will create a second bucket with www.
"""
# This will be used to pass to concurrent upload
self.aws_params = {
"aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key,
"region_name": region
}
self.region = region
self.s3 = boto3.client('s3', **self.aws_params)
self.sitename = sitename
self.www_sitename = "www." + self.sitename
self.website_endpoint = "%s.s3-website-%s.amazonaws.com" % (self.sitename, region)
self.website_endpoint_url = "http://" + self.website_endpoint
self.sitename_endpoint = "http://" + self.sitename
exists, error_code, error_message = self.head_bucket(self.sitename)
self.website_exists = exists
def setup_dns(self):
route53 = boto3.client('route53', **self.aws_params)
hosted_zone_id = self._get_route53_hosted_zone_by_domain(self.sitename)
if not hosted_zone_id:
caller_reference_uuid = "%s" % (uuid.uuid4())
response = route53.create_hosted_zone(
Name=self.sitename,
CallerReference=caller_reference_uuid,
HostedZoneConfig={'Comment': "HostedZone created by YASS!", 'PrivateZone': False})
hosted_zone_id = response['HostedZone']['Id']
website_dns_name = "s3-website-%s.amazonaws.com" % self.region
redirect_dns_name = "s3-website-%s.amazonaws.com" % self.region
change_batch_payload = {
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': self.sitename,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': self.S3_HOSTED_ZONE_IDS[self.region],
'DNSName': website_dns_name,
'EvaluateTargetHealth': False
}
}
},
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': self.www_sitename,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': self.S3_HOSTED_ZONE_IDS[self.region],
'DNSName': redirect_dns_name,
'EvaluateTargetHealth': False
}
}
}
]
}
response = route53.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch_payload)
return True if response and "ChangeInfo" in response else False
def _get_route53_hosted_zone_by_domain(self, domain):
route53 = boto3.client('route53', **self.aws_params)
hosted_zone = route53.list_hosted_zones()
if hosted_zone or "HostedZones" in hosted_zone:
for hz in hosted_zone["HostedZones"]:
if hz["Name"].rstrip(".") == domain:
return hz["Id"]
return None
def head_bucket(self, name):
"""
Check if a bucket exists
:param name:
:return:
"""
try:
self.s3.head_bucket(Bucket=name)
info = self.s3.get_bucket_website(Bucket=self.sitename)
if not info:
return False, 404, "Configure improrperly"
return True, None, None
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] in ["403", "404"]:
return False, e.response["Error"]["Code"], e.response["Error"]["Message"]
else:
raise e
def create_website(self):
exists, error_code, error_message = self.head_bucket(self.sitename)
if not exists:
if error_code == "404":
# Allow read access
policy_payload = {
"Version": "2012-10-17",
"Statement": [{
"Sid": "Allow Public Access to All Objects",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::%s/*" % (self.sitename)
}
]
}
# Make bucket website and add index.html and error.html
website_payload = {
'ErrorDocument': {
'Key': 'error.html'
},
'IndexDocument': {
'Suffix': 'index.html'
}
}
self.s3.create_bucket(Bucket=self.sitename)
self.s3.put_bucket_policy(Bucket=self.sitename,
Policy=json.dumps(policy_payload))
self.s3.put_bucket_website(Bucket=self.sitename,
WebsiteConfiguration=website_payload)
return True
else:
raise Exception("Can't create website's bucket '%s' on AWS S3. "
"Error: %s" % (self.sitename, error_message))
return False
def create_www_website(self):
exists, error_code, error_message = self.head_bucket(self.sitename)
if not exists:
if error_code == "404":
self.s3.create_bucket(Bucket=self.www_sitename)
redirect_payload = {
'RedirectAllRequestsTo': {
'HostName': self.sitename,
'Protocol': 'http'
}
}
bucket_website_redirect = self.s3.BucketWebsite(self.www_sitename)
bucket_website_redirect.put(WebsiteConfiguration=redirect_payload)
return True
else:
raise Exception("Can't create website's bucket '%s' on AWS S3. "
"Error: %s" % (self.www_sitename, error_message))
return False
def upload(self, build_dir):
"""
:param build_dir: The directory to upload
:param save_manifest: bool: To save manifest file
:param purge: bool : To delete previously uploaded files
:return:
"""
files_list = []
for root, dirs, files in os.walk(build_dir):
for filename in files:
local_path = os.path.join(root, filename)
s3_path = os.path.relpath(local_path, build_dir)
mimetype = utils.get_mimetype(local_path)
kwargs = dict(aws_params=self.aws_params,
bucket_name=self.sitename,
local_path=local_path,
s3_path=s3_path,
mimetype=mimetype)
files_list.append(s3_path)
threading.Thread(target=self._upload_file, kwargs=kwargs)\
.start()
# Save the files that have been uploaded
self._set_manifest_data(files_list)
def purge_files(self, exclude_files=["index.html", "error.html"]):
"""
To delete files that are in the manifest
:param excludes_files: list : files to not delete
:return:
"""
for chunk in utils.chunk_list(self._get_manifest_data(), 1000):
try:
self.s3.delete_objects(
Bucket=self.sitename,
Delete={
'Objects': [{"Key": f} for f in chunk
if f not in exclude_files]
}
)
except Exception as ex:
pass
def create_manifest_from_s3_files(self):
"""
To create a manifest db for the current
:return:
"""
for k in self.s3.list_objects(Bucket=self.sitename)['Contents']:
key = k["Key"]
files = []
if key not in [self.manifest_file]:
files.append(key)
self._set_manifest_data(files)
def _get_manifest_data(self):
"""
Return the list of items in the manifest
:return: list
"""
with tempfile.NamedTemporaryFile(delete=True) as tmp:
try:
self.s3.download_fileobj(self.sitename, self.manifest_file, tmp)
tmp.seek(0)
data = tmp.read()
if data is not None:
return data.split(",")
except Exception as ex:
pass
return []
@staticmethod
def _upload_file(aws_params, bucket_name, local_path, s3_path, mimetype):
s3 = boto3.client("s3", **aws_params)
s3.upload_file(local_path,
Bucket=bucket_name,
Key=s3_path,
ExtraArgs={"ContentType": mimetype})
|
mardix/Yass | yass/publisher.py | S3Website._get_manifest_data | python | def _get_manifest_data(self):
with tempfile.NamedTemporaryFile(delete=True) as tmp:
try:
self.s3.download_fileobj(self.sitename, self.manifest_file, tmp)
tmp.seek(0)
data = tmp.read()
if data is not None:
return data.split(",")
except Exception as ex:
pass
return [] | Return the list of items in the manifest
:return: list | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/publisher.py#L267-L281 | null | class S3Website(object):
"""
To manage S3 website and domain on Route53
"""
S3_HOSTED_ZONE_IDS = {
'us-east-1': 'Z3AQBSTGFYJSTF',
'us-west-1': 'Z2F56UZL2M1ACD',
'us-west-2': 'Z3BJ6K6RIION7M',
'ap-south-1': 'Z11RGJOFQNVJUP',
'ap-northeast-1': 'Z2M4EHUR26P7ZW',
'ap-northeast-2': 'Z3W03O7B5YMIYP',
'ap-southeast-1': 'Z3O0J2DXBE1FTB',
'ap-southeast-2': 'Z1WCIGYICN2BYD',
'eu-central-1': 'Z21DNDUVLTQW6Q',
'eu-west-1': 'Z1BKCTXD74EZPE',
'sa-east-1': 'Z7KQH4QJS55SO',
'us-gov-west-1': 'Z31GFT0UA1I2HV',
}
manifest_file = ".yass-manifest"
def __init__(self,
sitename,
region="us-east-1",
aws_access_key_id=None,
aws_secret_access_key=None):
"""
:param sitename: the website name to create, without WWW.
:param region: the region of the site
:param access_key_id: AWS
:param secret_access_key: AWS
:param setup_dns: bool - If True it will create route53
:param allow_www: Bool - If true, it will create a second bucket with www.
"""
# This will be used to pass to concurrent upload
self.aws_params = {
"aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key,
"region_name": region
}
self.region = region
self.s3 = boto3.client('s3', **self.aws_params)
self.sitename = sitename
self.www_sitename = "www." + self.sitename
self.website_endpoint = "%s.s3-website-%s.amazonaws.com" % (self.sitename, region)
self.website_endpoint_url = "http://" + self.website_endpoint
self.sitename_endpoint = "http://" + self.sitename
exists, error_code, error_message = self.head_bucket(self.sitename)
self.website_exists = exists
def setup_dns(self):
route53 = boto3.client('route53', **self.aws_params)
hosted_zone_id = self._get_route53_hosted_zone_by_domain(self.sitename)
if not hosted_zone_id:
caller_reference_uuid = "%s" % (uuid.uuid4())
response = route53.create_hosted_zone(
Name=self.sitename,
CallerReference=caller_reference_uuid,
HostedZoneConfig={'Comment': "HostedZone created by YASS!", 'PrivateZone': False})
hosted_zone_id = response['HostedZone']['Id']
website_dns_name = "s3-website-%s.amazonaws.com" % self.region
redirect_dns_name = "s3-website-%s.amazonaws.com" % self.region
change_batch_payload = {
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': self.sitename,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': self.S3_HOSTED_ZONE_IDS[self.region],
'DNSName': website_dns_name,
'EvaluateTargetHealth': False
}
}
},
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': self.www_sitename,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': self.S3_HOSTED_ZONE_IDS[self.region],
'DNSName': redirect_dns_name,
'EvaluateTargetHealth': False
}
}
}
]
}
response = route53.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch_payload)
return True if response and "ChangeInfo" in response else False
def _get_route53_hosted_zone_by_domain(self, domain):
route53 = boto3.client('route53', **self.aws_params)
hosted_zone = route53.list_hosted_zones()
if hosted_zone or "HostedZones" in hosted_zone:
for hz in hosted_zone["HostedZones"]:
if hz["Name"].rstrip(".") == domain:
return hz["Id"]
return None
def head_bucket(self, name):
"""
Check if a bucket exists
:param name:
:return:
"""
try:
self.s3.head_bucket(Bucket=name)
info = self.s3.get_bucket_website(Bucket=self.sitename)
if not info:
return False, 404, "Configure improrperly"
return True, None, None
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] in ["403", "404"]:
return False, e.response["Error"]["Code"], e.response["Error"]["Message"]
else:
raise e
def create_website(self):
exists, error_code, error_message = self.head_bucket(self.sitename)
if not exists:
if error_code == "404":
# Allow read access
policy_payload = {
"Version": "2012-10-17",
"Statement": [{
"Sid": "Allow Public Access to All Objects",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::%s/*" % (self.sitename)
}
]
}
# Make bucket website and add index.html and error.html
website_payload = {
'ErrorDocument': {
'Key': 'error.html'
},
'IndexDocument': {
'Suffix': 'index.html'
}
}
self.s3.create_bucket(Bucket=self.sitename)
self.s3.put_bucket_policy(Bucket=self.sitename,
Policy=json.dumps(policy_payload))
self.s3.put_bucket_website(Bucket=self.sitename,
WebsiteConfiguration=website_payload)
return True
else:
raise Exception("Can't create website's bucket '%s' on AWS S3. "
"Error: %s" % (self.sitename, error_message))
return False
def create_www_website(self):
exists, error_code, error_message = self.head_bucket(self.sitename)
if not exists:
if error_code == "404":
self.s3.create_bucket(Bucket=self.www_sitename)
redirect_payload = {
'RedirectAllRequestsTo': {
'HostName': self.sitename,
'Protocol': 'http'
}
}
bucket_website_redirect = self.s3.BucketWebsite(self.www_sitename)
bucket_website_redirect.put(WebsiteConfiguration=redirect_payload)
return True
else:
raise Exception("Can't create website's bucket '%s' on AWS S3. "
"Error: %s" % (self.www_sitename, error_message))
return False
def upload(self, build_dir):
"""
:param build_dir: The directory to upload
:param save_manifest: bool: To save manifest file
:param purge: bool : To delete previously uploaded files
:return:
"""
files_list = []
for root, dirs, files in os.walk(build_dir):
for filename in files:
local_path = os.path.join(root, filename)
s3_path = os.path.relpath(local_path, build_dir)
mimetype = utils.get_mimetype(local_path)
kwargs = dict(aws_params=self.aws_params,
bucket_name=self.sitename,
local_path=local_path,
s3_path=s3_path,
mimetype=mimetype)
files_list.append(s3_path)
threading.Thread(target=self._upload_file, kwargs=kwargs)\
.start()
# Save the files that have been uploaded
self._set_manifest_data(files_list)
def purge_files(self, exclude_files=["index.html", "error.html"]):
"""
To delete files that are in the manifest
:param excludes_files: list : files to not delete
:return:
"""
for chunk in utils.chunk_list(self._get_manifest_data(), 1000):
try:
self.s3.delete_objects(
Bucket=self.sitename,
Delete={
'Objects': [{"Key": f} for f in chunk
if f not in exclude_files]
}
)
except Exception as ex:
pass
def create_manifest_from_s3_files(self):
"""
To create a manifest db for the current
:return:
"""
for k in self.s3.list_objects(Bucket=self.sitename)['Contents']:
key = k["Key"]
files = []
if key not in [self.manifest_file]:
files.append(key)
self._set_manifest_data(files)
def _set_manifest_data(self, files_list):
"""
Write manifest files
:param files_list: list
:return:
"""
if files_list:
data = ",".join(files_list)
self.s3.put_object(Bucket=self.sitename,
Key=self.manifest_file,
Body=data,
ACL='private')
@staticmethod
def _upload_file(aws_params, bucket_name, local_path, s3_path, mimetype):
s3 = boto3.client("s3", **aws_params)
s3.upload_file(local_path,
Bucket=bucket_name,
Key=s3_path,
ExtraArgs={"ContentType": mimetype})
|
mardix/Yass | yass/yass.py | Yass._yass_vars | python | def _yass_vars(self):
utc = arrow.utcnow()
return {
"NAME": __title__,
"VERSION": __version__,
"URL": __uri__,
"GENERATOR": "%s %s" % (__title__, __version__),
"YEAR": utc.year
} | Global variables | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/yass.py#L99-L108 | null | class Yass(object):
RE_BLOCK_BODY = re.compile(r'{%\s*block\s+body\s*%}')
RE_BLOCK_BODY_PARSED = re.compile(r'{%\s*block\s+body\s*%}(.*?){%\s*endblock\s*%}')
RE_EXTENDS = re.compile(r'{%\s*extends\s+(.*?)\s*%}')
default_page_meta = {
"title": "", # The title of the page
"markup": None, # The markup to use. ie: md | jade | html (default)
"slug": None, # The pretty url new name of the file. A file with the same name will be created
"url": "", # This will be added when processed. Should never be modified
"description": "", # Page description
"pretty_url": True, # By default, all url will be pretty (search engine friendly) Set to False to keep the .html
"meta": {},
"layout": None, # The layout for the page
"template": None # The page template.
}
tpl_env = None
_templates = {}
_pages_meta = {}
def __init__(self, root_dir, config=None):
"""
:param root_dir: The application root dir
:param config: (dict), Dict configuration, will override previously set data
"""
self.root_dir = root_dir
self.build_dir = os.path.join(self.root_dir, "build")
self.static_dir = os.path.join(self.root_dir, "static")
self.content_dir = os.path.join(self.root_dir, "content")
self.pages_dir = os.path.join(self.root_dir, "pages")
self.templates_dir = os.path.join(self.root_dir, "templates")
self.data_dir = os.path.join(self.root_dir, "data")
self.build_static_dir = os.path.join(self.build_dir, "static")
config_file = os.path.join(self.root_dir, "yass.yml")
self.config = utils.load_conf(config_file, config)
self.default_layout = self.config.get("default_layout", DEFAULT_LAYOUT)
self.site_config = utils.dictdot(self.config.get("site", {}))
self.site_config.setdefault("base_url", "/")
self.base_url = self.site_config.get("base_url")
self.sitename = utils.extract_sitename(self.config.get("sitename"))
self._data = self._load_data()
self._init_jinja({
"site": self.site_config,
"data": self._data,
"__YASS__": self._yass_vars()
})
self._init_webassets()
def _init_jinja(self, global_context={}):
loader = jinja2.ChoiceLoader([
# global macros
jinja2.DictLoader({
"yass.macros": pkg_resources.resource_string(__name__, "extras/macros.html"),
}),
jinja2.FileSystemLoader(self.templates_dir)
])
self.tpl_env = jinja2.Environment(loader=loader,
extensions=[
'pyjade.ext.jinja.PyJadeExtension',
'yass.extras.htmlcompress.HTMLCompress',
'yass.extras.jade.JadeTagExtension',
'yass.extras.md.MarkdownExtension',
'yass.extras.md.MarkdownTagExtension',
AssetsExtension
])
self.tpl_env.globals.update(global_context)
self.tpl_env.filters.update({
"format_datetime": lambda dt, format: arrow.get(dt).format(format),
"yass_link_to": self._link_to, # link for a
"yass_url_to": self._url_to # url for a page
})
def _get_page_meta(self, page):
"""
Cache the page meta from the frontmatter and assign new keys
The cache data will be used to build links or other properties
"""
meta = self._pages_meta.get(page)
if not meta:
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_, _ext = os.path.splitext(src_file)
markup = _ext.replace(".", "")
_meta, _ = frontmatter.parse(f.read())
meta = self.default_page_meta.copy()
meta["meta"].update(self.config.get("site.meta", {}))
meta.update(_meta)
dest_file, url = self._get_dest_file_and_url(page, meta)
meta["url"] = url
meta["filepath"] = dest_file
if meta.get("markup") is None:
meta["markup"] = markup
self._pages_meta[page] = meta
return meta
def _get_page_content(self, page):
""" Get the page content without the frontmatter """
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_meta, content = frontmatter.parse(f.read())
return content
def _link_to(self, page, text=None, title=None, _class="", id="", alt="", **kwargs):
""" Build the A HREF LINK To a page."""
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>".format(
url=meta.get("url", "/") + anchor,
text=text or meta.get("title") or title,
title=title or "",
_class=_class,
id=id
)
def _url_to(self, page):
""" Get the url of a page """
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return meta.get("url")
def _get_dest_file_and_url(self, filepath, page_meta={}):
""" Return tuple of the file destination and url """
filename = filepath.split("/")[-1]
filepath_base = filepath.replace(filename, "").rstrip("/")
slug = page_meta.get("slug")
fname = slugify(slug) if slug else filename \
.replace(".html", "") \
.replace(".md", "") \
.replace(".jade", "")
if page_meta.get("pretty_url") is False:
dest_file = os.path.join(filepath_base, "%s.html" % fname)
else:
dest_dir = filepath_base
if filename not in ["index.html", "index.md", "index.jade"]:
dest_dir = os.path.join(filepath_base, fname)
dest_file = os.path.join(dest_dir, "index.html")
url = "/" + dest_file.replace("index.html", "")
return dest_file, url
def _load_data(self):
data = {}
# Load data from the data directory
for root, _, files in os.walk(self.data_dir):
for fname in files:
if fname.endswith((".json",)):
name = fname.replace(".json", "")
fname = os.path.join(root, fname)
if os.path.isfile(fname):
with open(fname) as f:
_ = json.load(f)
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
# data_api_urls
# Doing API call to retrieve the data and assign it to its key
# Data must be JSON
data_api_urls = self.site_config.get("data_api_urls")
if data_api_urls:
for name, url in data_api_urls.items():
try:
r = requests.get(url)
if r.status_code == 200:
_ = r.json()
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
else:
raise Exception("`%s -> %s` returns status code %s" % (name, url, r.status_code))
except Exception as e:
raise Exception("Data API URLS Error: %s" % e)
return utils.dictdot(data)
def _init_webassets(self):
assets_env = WAEnv(directory="./static",
url=self.config.get("static_url", "/static"))
bundles = self.config.get("assets_bundles", {})
assets_env.register(bundles)
self.tpl_env.assets_environment = assets_env
self.webassets_cmd = None
if bundles:
handler = logging.StreamHandler if self.config.get("debug", False) \
else logging.NullHandler
log = logging.getLogger('webassets')
log.addHandler(handler())
log.setLevel(logging.DEBUG)
self.webassets_cmd = CommandLineEnvironment(assets_env, log)
def clean_build_dir(self):
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
def build_static(self):
""" Build static files """
if not os.path.isdir(self.build_static_dir):
os.makedirs(self.build_static_dir)
copy_tree(self.static_dir, self.build_static_dir)
if self.webassets_cmd:
self.webassets_cmd.build()
def build_pages(self):
"""Iterate over the pages_dir and build the pages """
for root, _, files in os.walk(self.pages_dir):
base_dir = root.replace(self.pages_dir, "").lstrip("/")
if not base_dir.startswith("_"):
for f in files:
src_file = os.path.join(base_dir, f)
self._build_page(src_file)
def _build_page(self, filepath):
""" To build from filepath, relative to pages_dir """
filename = filepath.split("/")[-1]
# If filename starts with _ (underscore) or . (dot) do not build
if not filename.startswith(("_", ".")) and (filename.endswith(PAGE_FORMAT)):
meta = self._get_page_meta(filepath)
content = self._get_page_content(filepath)
# The default context for the page
_default_page = {
"build_dir": self.build_dir,
"filepath": meta["filepath"],
"context": {"page": meta},
"content": content,
"markup": meta.get("markup"),
"template": meta.get("template"),
"layout": meta.get("layout") or self.default_layout
}
# GENERATOR
# Allows to generate
_generator = meta.get("_generator")
if _generator:
data = self._data.get(_generator.get("data_source"))
# We want these back in meta in they exists in the data
special_meta = ["title", "slug", "description"]
# SINGLE
if _generator.get("type") == "single":
for d in data:
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
for _ in special_meta:
if _ in d:
dmeta[_] = d.get(_)
# If generator has the slug, it will substitute if
# Slug in the generator must have token from the data
# to generate the slug
if "slug" in _generator:
dmeta["slug"] = _generator.get("slug").format(**d)
# Slug is required
if "slug" not in dmeta:
print("WARNING: Skipping page because it's missing `slug`")
continue
slug = dmeta.get("slug")
dmeta["url"] = slug
dmeta["context"] = d
page.update({
"filepath": slug,
"context": {"page": dmeta}
})
self.create_page(**page)
if _generator.get("type") == "pagination":
per_page = int(_generator.get("per_page", self.site_config.get("pagination.per_page", 10)))
left_edge = int(_generator.get("left_edge", self.site_config.get("pagination.left_edge", 2)))
left_current = int(_generator.get("left_edge", self.site_config.get("pagination.left_current", 3)))
right_current = int(_generator.get("right_current", self.site_config.get("pagination.right_current", 4)))
right_edge = int(_generator.get("right_edge", self.site_config.get("pagination.right_edge", 2)))
padding = _generator.get("padding")
slug = _generator.get("slug")
limit = _generator.get("limit")
if "limit" in _generator:
data = data[:int(limit)]
data_chunks = utils.chunk_list(data, per_page)
len_data = len(data)
for i, d in enumerate(data_chunks):
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
page_num = i + 1
_paginator = Paginator([],
total=len_data,
page=page_num,
per_page=per_page,
padding=padding,
left_edge=left_edge,
right_edge=right_edge,
left_current=left_current,
right_current=right_current)
_paginator.slug = slug
_paginator.index_slug = _generator.get("index_slug")
_slug = slug.format(**{"page_num": page_num})
dmeta["url"] = _slug
dmeta["context"] = d
dmeta["paginator"] = _paginator
page.update({
"filepath": _slug,
"context": {"page": dmeta}
})
self.create_page(**page)
# First page need to generate the index
if i == 0 and _generator.get("index_slug"):
page["filepath"] = _generator.get("index_slug")
self.create_page(**page)
# NORMAL PAGE
else:
self.create_page(**_default_page)
def create_page(self, build_dir, filepath, context={}, content=None, template=None, markup=None, layout=None):
"""
To dynamically create a page and save it in the build_dir
:param build_dir: (path) The base directory that will hold the created page
:param filepath: (string) the name of the file to create. May contain slash to indicate directory
It will also create the url based on that name
If the filename doesn't end with .html, it will create a subdirectory
and create `index.html`
If file contains `.html` it will stays as is
ie:
post/waldo/where-is-waldo/ -> post/waldo/where-is-waldo/index.html
another/music/new-rap-song.html -> another/music/new-rap-song.html
post/page/5 -> post/page/5/index.html
:param context: (dict) context data
:param content: (text) The content of the file to be created. Will be overriden by template
:param template: (path) if source is not provided, template can be used to create the page.
Along with context it allows to create dynamic pages.
The file is relative to `/templates/`
file can be in html|jade|md
:param markup: (string: html|jade|md), when using content. To indicate which markup to use.
based on the markup it will parse the data
html: will render as is
jade and md: convert to the appropriate format
:param layout: (string) when using content. The layout to use.
The file location is relative to `/templates/`
file can be in html|jade|md
:return:
"""
build_dir = build_dir.rstrip("/")
filepath = filepath.lstrip("/").rstrip("/")
if not filepath.endswith(".html"):
filepath += "/index.html"
dest_file = os.path.join(build_dir, filepath)
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
_context = context
if "page" not in _context:
_context["page"] = self.default_page_meta.copy()
if "url" not in _context["page"]:
_context["page"]["url"] = "/" + filepath.lstrip("/").replace(
"index.html", "")
if template:
if template not in self._templates:
self._templates[template] = self.tpl_env.get_template(template)
tpl = self._templates[template]
else:
if markup == "md":
_context["page"]["__toc__"] = md.get_toc(content)
content = md.convert(content)
elif markup == "jade":
content = jade.convert(content)
# Page must be extended by a layout and have a block 'body'
# These tags will be included if they are missing
if re.search(self.RE_EXTENDS, content) is None:
layout = layout or self.default_layout
content = "\n{% extends '{}' %} \n\n".replace("{}",
layout) + content
if re.search(self.RE_BLOCK_BODY, content) is None:
_layout_block = re.search(self.RE_EXTENDS, content).group(0)
content = content.replace(_layout_block, "")
content = "\n" + _layout_block + "\n" + \
"{% block body %} \n" + content.strip() + "\n{% endblock %}"
tpl = self.tpl_env.from_string(content)
with open(dest_file, "w") as fw:
fw.write(tpl.render(**_context))
def build(self):
self.clean_build_dir()
if not os.path.isdir(self.build_dir):
os.makedirs(self.build_dir)
self.build_static()
self.build_pages()
def publish(self, target="S3", sitename=None, purge_files=True):
"""
To publish programatically
:param target: Where to pusblish at, S3
:param sitename: The site name
:param purge_files: if True, it will delete old files
:return:
"""
self.build()
endpoint = self.config.get("hosting.%s" % target)
if target.upper() == "S3":
p = publisher.S3Website(sitename=sitename or self.config.get("sitename"),
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
p.create_manifest_from_s3_files()
if purge_files:
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
p.upload(self.build_dir)
return p.website_endpoint_url
|
mardix/Yass | yass/yass.py | Yass._get_page_meta | python | def _get_page_meta(self, page):
meta = self._pages_meta.get(page)
if not meta:
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_, _ext = os.path.splitext(src_file)
markup = _ext.replace(".", "")
_meta, _ = frontmatter.parse(f.read())
meta = self.default_page_meta.copy()
meta["meta"].update(self.config.get("site.meta", {}))
meta.update(_meta)
dest_file, url = self._get_dest_file_and_url(page, meta)
meta["url"] = url
meta["filepath"] = dest_file
if meta.get("markup") is None:
meta["markup"] = markup
self._pages_meta[page] = meta
return meta | Cache the page meta from the frontmatter and assign new keys
The cache data will be used to build links or other properties | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/yass.py#L136-L158 | null | class Yass(object):
RE_BLOCK_BODY = re.compile(r'{%\s*block\s+body\s*%}')
RE_BLOCK_BODY_PARSED = re.compile(r'{%\s*block\s+body\s*%}(.*?){%\s*endblock\s*%}')
RE_EXTENDS = re.compile(r'{%\s*extends\s+(.*?)\s*%}')
default_page_meta = {
"title": "", # The title of the page
"markup": None, # The markup to use. ie: md | jade | html (default)
"slug": None, # The pretty url new name of the file. A file with the same name will be created
"url": "", # This will be added when processed. Should never be modified
"description": "", # Page description
"pretty_url": True, # By default, all url will be pretty (search engine friendly) Set to False to keep the .html
"meta": {},
"layout": None, # The layout for the page
"template": None # The page template.
}
tpl_env = None
_templates = {}
_pages_meta = {}
def __init__(self, root_dir, config=None):
"""
:param root_dir: The application root dir
:param config: (dict), Dict configuration, will override previously set data
"""
self.root_dir = root_dir
self.build_dir = os.path.join(self.root_dir, "build")
self.static_dir = os.path.join(self.root_dir, "static")
self.content_dir = os.path.join(self.root_dir, "content")
self.pages_dir = os.path.join(self.root_dir, "pages")
self.templates_dir = os.path.join(self.root_dir, "templates")
self.data_dir = os.path.join(self.root_dir, "data")
self.build_static_dir = os.path.join(self.build_dir, "static")
config_file = os.path.join(self.root_dir, "yass.yml")
self.config = utils.load_conf(config_file, config)
self.default_layout = self.config.get("default_layout", DEFAULT_LAYOUT)
self.site_config = utils.dictdot(self.config.get("site", {}))
self.site_config.setdefault("base_url", "/")
self.base_url = self.site_config.get("base_url")
self.sitename = utils.extract_sitename(self.config.get("sitename"))
self._data = self._load_data()
self._init_jinja({
"site": self.site_config,
"data": self._data,
"__YASS__": self._yass_vars()
})
self._init_webassets()
def _yass_vars(self):
""" Global variables """
utc = arrow.utcnow()
return {
"NAME": __title__,
"VERSION": __version__,
"URL": __uri__,
"GENERATOR": "%s %s" % (__title__, __version__),
"YEAR": utc.year
}
def _init_jinja(self, global_context={}):
loader = jinja2.ChoiceLoader([
# global macros
jinja2.DictLoader({
"yass.macros": pkg_resources.resource_string(__name__, "extras/macros.html"),
}),
jinja2.FileSystemLoader(self.templates_dir)
])
self.tpl_env = jinja2.Environment(loader=loader,
extensions=[
'pyjade.ext.jinja.PyJadeExtension',
'yass.extras.htmlcompress.HTMLCompress',
'yass.extras.jade.JadeTagExtension',
'yass.extras.md.MarkdownExtension',
'yass.extras.md.MarkdownTagExtension',
AssetsExtension
])
self.tpl_env.globals.update(global_context)
self.tpl_env.filters.update({
"format_datetime": lambda dt, format: arrow.get(dt).format(format),
"yass_link_to": self._link_to, # link for a
"yass_url_to": self._url_to # url for a page
})
def _get_page_content(self, page):
""" Get the page content without the frontmatter """
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_meta, content = frontmatter.parse(f.read())
return content
def _link_to(self, page, text=None, title=None, _class="", id="", alt="", **kwargs):
""" Build the A HREF LINK To a page."""
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>".format(
url=meta.get("url", "/") + anchor,
text=text or meta.get("title") or title,
title=title or "",
_class=_class,
id=id
)
def _url_to(self, page):
""" Get the url of a page """
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return meta.get("url")
def _get_dest_file_and_url(self, filepath, page_meta={}):
""" Return tuple of the file destination and url """
filename = filepath.split("/")[-1]
filepath_base = filepath.replace(filename, "").rstrip("/")
slug = page_meta.get("slug")
fname = slugify(slug) if slug else filename \
.replace(".html", "") \
.replace(".md", "") \
.replace(".jade", "")
if page_meta.get("pretty_url") is False:
dest_file = os.path.join(filepath_base, "%s.html" % fname)
else:
dest_dir = filepath_base
if filename not in ["index.html", "index.md", "index.jade"]:
dest_dir = os.path.join(filepath_base, fname)
dest_file = os.path.join(dest_dir, "index.html")
url = "/" + dest_file.replace("index.html", "")
return dest_file, url
def _load_data(self):
data = {}
# Load data from the data directory
for root, _, files in os.walk(self.data_dir):
for fname in files:
if fname.endswith((".json",)):
name = fname.replace(".json", "")
fname = os.path.join(root, fname)
if os.path.isfile(fname):
with open(fname) as f:
_ = json.load(f)
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
# data_api_urls
# Doing API call to retrieve the data and assign it to its key
# Data must be JSON
data_api_urls = self.site_config.get("data_api_urls")
if data_api_urls:
for name, url in data_api_urls.items():
try:
r = requests.get(url)
if r.status_code == 200:
_ = r.json()
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
else:
raise Exception("`%s -> %s` returns status code %s" % (name, url, r.status_code))
except Exception as e:
raise Exception("Data API URLS Error: %s" % e)
return utils.dictdot(data)
def _init_webassets(self):
assets_env = WAEnv(directory="./static",
url=self.config.get("static_url", "/static"))
bundles = self.config.get("assets_bundles", {})
assets_env.register(bundles)
self.tpl_env.assets_environment = assets_env
self.webassets_cmd = None
if bundles:
handler = logging.StreamHandler if self.config.get("debug", False) \
else logging.NullHandler
log = logging.getLogger('webassets')
log.addHandler(handler())
log.setLevel(logging.DEBUG)
self.webassets_cmd = CommandLineEnvironment(assets_env, log)
def clean_build_dir(self):
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
def build_static(self):
""" Build static files """
if not os.path.isdir(self.build_static_dir):
os.makedirs(self.build_static_dir)
copy_tree(self.static_dir, self.build_static_dir)
if self.webassets_cmd:
self.webassets_cmd.build()
def build_pages(self):
"""Iterate over the pages_dir and build the pages """
for root, _, files in os.walk(self.pages_dir):
base_dir = root.replace(self.pages_dir, "").lstrip("/")
if not base_dir.startswith("_"):
for f in files:
src_file = os.path.join(base_dir, f)
self._build_page(src_file)
def _build_page(self, filepath):
""" To build from filepath, relative to pages_dir """
filename = filepath.split("/")[-1]
# If filename starts with _ (underscore) or . (dot) do not build
if not filename.startswith(("_", ".")) and (filename.endswith(PAGE_FORMAT)):
meta = self._get_page_meta(filepath)
content = self._get_page_content(filepath)
# The default context for the page
_default_page = {
"build_dir": self.build_dir,
"filepath": meta["filepath"],
"context": {"page": meta},
"content": content,
"markup": meta.get("markup"),
"template": meta.get("template"),
"layout": meta.get("layout") or self.default_layout
}
# GENERATOR
# Allows to generate
_generator = meta.get("_generator")
if _generator:
data = self._data.get(_generator.get("data_source"))
# We want these back in meta in they exists in the data
special_meta = ["title", "slug", "description"]
# SINGLE
if _generator.get("type") == "single":
for d in data:
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
for _ in special_meta:
if _ in d:
dmeta[_] = d.get(_)
# If generator has the slug, it will substitute if
# Slug in the generator must have token from the data
# to generate the slug
if "slug" in _generator:
dmeta["slug"] = _generator.get("slug").format(**d)
# Slug is required
if "slug" not in dmeta:
print("WARNING: Skipping page because it's missing `slug`")
continue
slug = dmeta.get("slug")
dmeta["url"] = slug
dmeta["context"] = d
page.update({
"filepath": slug,
"context": {"page": dmeta}
})
self.create_page(**page)
if _generator.get("type") == "pagination":
per_page = int(_generator.get("per_page", self.site_config.get("pagination.per_page", 10)))
left_edge = int(_generator.get("left_edge", self.site_config.get("pagination.left_edge", 2)))
left_current = int(_generator.get("left_edge", self.site_config.get("pagination.left_current", 3)))
right_current = int(_generator.get("right_current", self.site_config.get("pagination.right_current", 4)))
right_edge = int(_generator.get("right_edge", self.site_config.get("pagination.right_edge", 2)))
padding = _generator.get("padding")
slug = _generator.get("slug")
limit = _generator.get("limit")
if "limit" in _generator:
data = data[:int(limit)]
data_chunks = utils.chunk_list(data, per_page)
len_data = len(data)
for i, d in enumerate(data_chunks):
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
page_num = i + 1
_paginator = Paginator([],
total=len_data,
page=page_num,
per_page=per_page,
padding=padding,
left_edge=left_edge,
right_edge=right_edge,
left_current=left_current,
right_current=right_current)
_paginator.slug = slug
_paginator.index_slug = _generator.get("index_slug")
_slug = slug.format(**{"page_num": page_num})
dmeta["url"] = _slug
dmeta["context"] = d
dmeta["paginator"] = _paginator
page.update({
"filepath": _slug,
"context": {"page": dmeta}
})
self.create_page(**page)
# First page need to generate the index
if i == 0 and _generator.get("index_slug"):
page["filepath"] = _generator.get("index_slug")
self.create_page(**page)
# NORMAL PAGE
else:
self.create_page(**_default_page)
def create_page(self, build_dir, filepath, context={}, content=None, template=None, markup=None, layout=None):
"""
To dynamically create a page and save it in the build_dir
:param build_dir: (path) The base directory that will hold the created page
:param filepath: (string) the name of the file to create. May contain slash to indicate directory
It will also create the url based on that name
If the filename doesn't end with .html, it will create a subdirectory
and create `index.html`
If file contains `.html` it will stays as is
ie:
post/waldo/where-is-waldo/ -> post/waldo/where-is-waldo/index.html
another/music/new-rap-song.html -> another/music/new-rap-song.html
post/page/5 -> post/page/5/index.html
:param context: (dict) context data
:param content: (text) The content of the file to be created. Will be overriden by template
:param template: (path) if source is not provided, template can be used to create the page.
Along with context it allows to create dynamic pages.
The file is relative to `/templates/`
file can be in html|jade|md
:param markup: (string: html|jade|md), when using content. To indicate which markup to use.
based on the markup it will parse the data
html: will render as is
jade and md: convert to the appropriate format
:param layout: (string) when using content. The layout to use.
The file location is relative to `/templates/`
file can be in html|jade|md
:return:
"""
build_dir = build_dir.rstrip("/")
filepath = filepath.lstrip("/").rstrip("/")
if not filepath.endswith(".html"):
filepath += "/index.html"
dest_file = os.path.join(build_dir, filepath)
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
_context = context
if "page" not in _context:
_context["page"] = self.default_page_meta.copy()
if "url" not in _context["page"]:
_context["page"]["url"] = "/" + filepath.lstrip("/").replace(
"index.html", "")
if template:
if template not in self._templates:
self._templates[template] = self.tpl_env.get_template(template)
tpl = self._templates[template]
else:
if markup == "md":
_context["page"]["__toc__"] = md.get_toc(content)
content = md.convert(content)
elif markup == "jade":
content = jade.convert(content)
# Page must be extended by a layout and have a block 'body'
# These tags will be included if they are missing
if re.search(self.RE_EXTENDS, content) is None:
layout = layout or self.default_layout
content = "\n{% extends '{}' %} \n\n".replace("{}",
layout) + content
if re.search(self.RE_BLOCK_BODY, content) is None:
_layout_block = re.search(self.RE_EXTENDS, content).group(0)
content = content.replace(_layout_block, "")
content = "\n" + _layout_block + "\n" + \
"{% block body %} \n" + content.strip() + "\n{% endblock %}"
tpl = self.tpl_env.from_string(content)
with open(dest_file, "w") as fw:
fw.write(tpl.render(**_context))
def build(self):
self.clean_build_dir()
if not os.path.isdir(self.build_dir):
os.makedirs(self.build_dir)
self.build_static()
self.build_pages()
def publish(self, target="S3", sitename=None, purge_files=True):
"""
To publish programatically
:param target: Where to pusblish at, S3
:param sitename: The site name
:param purge_files: if True, it will delete old files
:return:
"""
self.build()
endpoint = self.config.get("hosting.%s" % target)
if target.upper() == "S3":
p = publisher.S3Website(sitename=sitename or self.config.get("sitename"),
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
p.create_manifest_from_s3_files()
if purge_files:
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
p.upload(self.build_dir)
return p.website_endpoint_url
|
mardix/Yass | yass/yass.py | Yass._get_page_content | python | def _get_page_content(self, page):
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_meta, content = frontmatter.parse(f.read())
return content | Get the page content without the frontmatter | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/yass.py#L160-L165 | null | class Yass(object):
RE_BLOCK_BODY = re.compile(r'{%\s*block\s+body\s*%}')
RE_BLOCK_BODY_PARSED = re.compile(r'{%\s*block\s+body\s*%}(.*?){%\s*endblock\s*%}')
RE_EXTENDS = re.compile(r'{%\s*extends\s+(.*?)\s*%}')
default_page_meta = {
"title": "", # The title of the page
"markup": None, # The markup to use. ie: md | jade | html (default)
"slug": None, # The pretty url new name of the file. A file with the same name will be created
"url": "", # This will be added when processed. Should never be modified
"description": "", # Page description
"pretty_url": True, # By default, all url will be pretty (search engine friendly) Set to False to keep the .html
"meta": {},
"layout": None, # The layout for the page
"template": None # The page template.
}
tpl_env = None
_templates = {}
_pages_meta = {}
def __init__(self, root_dir, config=None):
"""
:param root_dir: The application root dir
:param config: (dict), Dict configuration, will override previously set data
"""
self.root_dir = root_dir
self.build_dir = os.path.join(self.root_dir, "build")
self.static_dir = os.path.join(self.root_dir, "static")
self.content_dir = os.path.join(self.root_dir, "content")
self.pages_dir = os.path.join(self.root_dir, "pages")
self.templates_dir = os.path.join(self.root_dir, "templates")
self.data_dir = os.path.join(self.root_dir, "data")
self.build_static_dir = os.path.join(self.build_dir, "static")
config_file = os.path.join(self.root_dir, "yass.yml")
self.config = utils.load_conf(config_file, config)
self.default_layout = self.config.get("default_layout", DEFAULT_LAYOUT)
self.site_config = utils.dictdot(self.config.get("site", {}))
self.site_config.setdefault("base_url", "/")
self.base_url = self.site_config.get("base_url")
self.sitename = utils.extract_sitename(self.config.get("sitename"))
self._data = self._load_data()
self._init_jinja({
"site": self.site_config,
"data": self._data,
"__YASS__": self._yass_vars()
})
self._init_webassets()
def _yass_vars(self):
""" Global variables """
utc = arrow.utcnow()
return {
"NAME": __title__,
"VERSION": __version__,
"URL": __uri__,
"GENERATOR": "%s %s" % (__title__, __version__),
"YEAR": utc.year
}
def _init_jinja(self, global_context={}):
loader = jinja2.ChoiceLoader([
# global macros
jinja2.DictLoader({
"yass.macros": pkg_resources.resource_string(__name__, "extras/macros.html"),
}),
jinja2.FileSystemLoader(self.templates_dir)
])
self.tpl_env = jinja2.Environment(loader=loader,
extensions=[
'pyjade.ext.jinja.PyJadeExtension',
'yass.extras.htmlcompress.HTMLCompress',
'yass.extras.jade.JadeTagExtension',
'yass.extras.md.MarkdownExtension',
'yass.extras.md.MarkdownTagExtension',
AssetsExtension
])
self.tpl_env.globals.update(global_context)
self.tpl_env.filters.update({
"format_datetime": lambda dt, format: arrow.get(dt).format(format),
"yass_link_to": self._link_to, # link for a
"yass_url_to": self._url_to # url for a page
})
def _get_page_meta(self, page):
"""
Cache the page meta from the frontmatter and assign new keys
The cache data will be used to build links or other properties
"""
meta = self._pages_meta.get(page)
if not meta:
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_, _ext = os.path.splitext(src_file)
markup = _ext.replace(".", "")
_meta, _ = frontmatter.parse(f.read())
meta = self.default_page_meta.copy()
meta["meta"].update(self.config.get("site.meta", {}))
meta.update(_meta)
dest_file, url = self._get_dest_file_and_url(page, meta)
meta["url"] = url
meta["filepath"] = dest_file
if meta.get("markup") is None:
meta["markup"] = markup
self._pages_meta[page] = meta
return meta
def _link_to(self, page, text=None, title=None, _class="", id="", alt="", **kwargs):
""" Build the A HREF LINK To a page."""
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>".format(
url=meta.get("url", "/") + anchor,
text=text or meta.get("title") or title,
title=title or "",
_class=_class,
id=id
)
def _url_to(self, page):
""" Get the url of a page """
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return meta.get("url")
def _get_dest_file_and_url(self, filepath, page_meta={}):
""" Return tuple of the file destination and url """
filename = filepath.split("/")[-1]
filepath_base = filepath.replace(filename, "").rstrip("/")
slug = page_meta.get("slug")
fname = slugify(slug) if slug else filename \
.replace(".html", "") \
.replace(".md", "") \
.replace(".jade", "")
if page_meta.get("pretty_url") is False:
dest_file = os.path.join(filepath_base, "%s.html" % fname)
else:
dest_dir = filepath_base
if filename not in ["index.html", "index.md", "index.jade"]:
dest_dir = os.path.join(filepath_base, fname)
dest_file = os.path.join(dest_dir, "index.html")
url = "/" + dest_file.replace("index.html", "")
return dest_file, url
def _load_data(self):
data = {}
# Load data from the data directory
for root, _, files in os.walk(self.data_dir):
for fname in files:
if fname.endswith((".json",)):
name = fname.replace(".json", "")
fname = os.path.join(root, fname)
if os.path.isfile(fname):
with open(fname) as f:
_ = json.load(f)
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
# data_api_urls
# Doing API call to retrieve the data and assign it to its key
# Data must be JSON
data_api_urls = self.site_config.get("data_api_urls")
if data_api_urls:
for name, url in data_api_urls.items():
try:
r = requests.get(url)
if r.status_code == 200:
_ = r.json()
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
else:
raise Exception("`%s -> %s` returns status code %s" % (name, url, r.status_code))
except Exception as e:
raise Exception("Data API URLS Error: %s" % e)
return utils.dictdot(data)
def _init_webassets(self):
assets_env = WAEnv(directory="./static",
url=self.config.get("static_url", "/static"))
bundles = self.config.get("assets_bundles", {})
assets_env.register(bundles)
self.tpl_env.assets_environment = assets_env
self.webassets_cmd = None
if bundles:
handler = logging.StreamHandler if self.config.get("debug", False) \
else logging.NullHandler
log = logging.getLogger('webassets')
log.addHandler(handler())
log.setLevel(logging.DEBUG)
self.webassets_cmd = CommandLineEnvironment(assets_env, log)
def clean_build_dir(self):
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
def build_static(self):
""" Build static files """
if not os.path.isdir(self.build_static_dir):
os.makedirs(self.build_static_dir)
copy_tree(self.static_dir, self.build_static_dir)
if self.webassets_cmd:
self.webassets_cmd.build()
def build_pages(self):
"""Iterate over the pages_dir and build the pages """
for root, _, files in os.walk(self.pages_dir):
base_dir = root.replace(self.pages_dir, "").lstrip("/")
if not base_dir.startswith("_"):
for f in files:
src_file = os.path.join(base_dir, f)
self._build_page(src_file)
def _build_page(self, filepath):
""" To build from filepath, relative to pages_dir """
filename = filepath.split("/")[-1]
# If filename starts with _ (underscore) or . (dot) do not build
if not filename.startswith(("_", ".")) and (filename.endswith(PAGE_FORMAT)):
meta = self._get_page_meta(filepath)
content = self._get_page_content(filepath)
# The default context for the page
_default_page = {
"build_dir": self.build_dir,
"filepath": meta["filepath"],
"context": {"page": meta},
"content": content,
"markup": meta.get("markup"),
"template": meta.get("template"),
"layout": meta.get("layout") or self.default_layout
}
# GENERATOR
# Allows to generate
_generator = meta.get("_generator")
if _generator:
data = self._data.get(_generator.get("data_source"))
# We want these back in meta in they exists in the data
special_meta = ["title", "slug", "description"]
# SINGLE
if _generator.get("type") == "single":
for d in data:
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
for _ in special_meta:
if _ in d:
dmeta[_] = d.get(_)
# If generator has the slug, it will substitute if
# Slug in the generator must have token from the data
# to generate the slug
if "slug" in _generator:
dmeta["slug"] = _generator.get("slug").format(**d)
# Slug is required
if "slug" not in dmeta:
print("WARNING: Skipping page because it's missing `slug`")
continue
slug = dmeta.get("slug")
dmeta["url"] = slug
dmeta["context"] = d
page.update({
"filepath": slug,
"context": {"page": dmeta}
})
self.create_page(**page)
if _generator.get("type") == "pagination":
per_page = int(_generator.get("per_page", self.site_config.get("pagination.per_page", 10)))
left_edge = int(_generator.get("left_edge", self.site_config.get("pagination.left_edge", 2)))
left_current = int(_generator.get("left_edge", self.site_config.get("pagination.left_current", 3)))
right_current = int(_generator.get("right_current", self.site_config.get("pagination.right_current", 4)))
right_edge = int(_generator.get("right_edge", self.site_config.get("pagination.right_edge", 2)))
padding = _generator.get("padding")
slug = _generator.get("slug")
limit = _generator.get("limit")
if "limit" in _generator:
data = data[:int(limit)]
data_chunks = utils.chunk_list(data, per_page)
len_data = len(data)
for i, d in enumerate(data_chunks):
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
page_num = i + 1
_paginator = Paginator([],
total=len_data,
page=page_num,
per_page=per_page,
padding=padding,
left_edge=left_edge,
right_edge=right_edge,
left_current=left_current,
right_current=right_current)
_paginator.slug = slug
_paginator.index_slug = _generator.get("index_slug")
_slug = slug.format(**{"page_num": page_num})
dmeta["url"] = _slug
dmeta["context"] = d
dmeta["paginator"] = _paginator
page.update({
"filepath": _slug,
"context": {"page": dmeta}
})
self.create_page(**page)
# First page need to generate the index
if i == 0 and _generator.get("index_slug"):
page["filepath"] = _generator.get("index_slug")
self.create_page(**page)
# NORMAL PAGE
else:
self.create_page(**_default_page)
def create_page(self, build_dir, filepath, context={}, content=None, template=None, markup=None, layout=None):
"""
To dynamically create a page and save it in the build_dir
:param build_dir: (path) The base directory that will hold the created page
:param filepath: (string) the name of the file to create. May contain slash to indicate directory
It will also create the url based on that name
If the filename doesn't end with .html, it will create a subdirectory
and create `index.html`
If file contains `.html` it will stays as is
ie:
post/waldo/where-is-waldo/ -> post/waldo/where-is-waldo/index.html
another/music/new-rap-song.html -> another/music/new-rap-song.html
post/page/5 -> post/page/5/index.html
:param context: (dict) context data
:param content: (text) The content of the file to be created. Will be overriden by template
:param template: (path) if source is not provided, template can be used to create the page.
Along with context it allows to create dynamic pages.
The file is relative to `/templates/`
file can be in html|jade|md
:param markup: (string: html|jade|md), when using content. To indicate which markup to use.
based on the markup it will parse the data
html: will render as is
jade and md: convert to the appropriate format
:param layout: (string) when using content. The layout to use.
The file location is relative to `/templates/`
file can be in html|jade|md
:return:
"""
build_dir = build_dir.rstrip("/")
filepath = filepath.lstrip("/").rstrip("/")
if not filepath.endswith(".html"):
filepath += "/index.html"
dest_file = os.path.join(build_dir, filepath)
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
_context = context
if "page" not in _context:
_context["page"] = self.default_page_meta.copy()
if "url" not in _context["page"]:
_context["page"]["url"] = "/" + filepath.lstrip("/").replace(
"index.html", "")
if template:
if template not in self._templates:
self._templates[template] = self.tpl_env.get_template(template)
tpl = self._templates[template]
else:
if markup == "md":
_context["page"]["__toc__"] = md.get_toc(content)
content = md.convert(content)
elif markup == "jade":
content = jade.convert(content)
# Page must be extended by a layout and have a block 'body'
# These tags will be included if they are missing
if re.search(self.RE_EXTENDS, content) is None:
layout = layout or self.default_layout
content = "\n{% extends '{}' %} \n\n".replace("{}",
layout) + content
if re.search(self.RE_BLOCK_BODY, content) is None:
_layout_block = re.search(self.RE_EXTENDS, content).group(0)
content = content.replace(_layout_block, "")
content = "\n" + _layout_block + "\n" + \
"{% block body %} \n" + content.strip() + "\n{% endblock %}"
tpl = self.tpl_env.from_string(content)
with open(dest_file, "w") as fw:
fw.write(tpl.render(**_context))
def build(self):
self.clean_build_dir()
if not os.path.isdir(self.build_dir):
os.makedirs(self.build_dir)
self.build_static()
self.build_pages()
def publish(self, target="S3", sitename=None, purge_files=True):
"""
To publish programatically
:param target: Where to pusblish at, S3
:param sitename: The site name
:param purge_files: if True, it will delete old files
:return:
"""
self.build()
endpoint = self.config.get("hosting.%s" % target)
if target.upper() == "S3":
p = publisher.S3Website(sitename=sitename or self.config.get("sitename"),
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
p.create_manifest_from_s3_files()
if purge_files:
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
p.upload(self.build_dir)
return p.website_endpoint_url
|
mardix/Yass | yass/yass.py | Yass._link_to | python | def _link_to(self, page, text=None, title=None, _class="", id="", alt="", **kwargs):
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>".format(
url=meta.get("url", "/") + anchor,
text=text or meta.get("title") or title,
title=title or "",
_class=_class,
id=id
) | Build the A HREF LINK To a page. | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/yass.py#L167-L180 | null | class Yass(object):
RE_BLOCK_BODY = re.compile(r'{%\s*block\s+body\s*%}')
RE_BLOCK_BODY_PARSED = re.compile(r'{%\s*block\s+body\s*%}(.*?){%\s*endblock\s*%}')
RE_EXTENDS = re.compile(r'{%\s*extends\s+(.*?)\s*%}')
default_page_meta = {
"title": "", # The title of the page
"markup": None, # The markup to use. ie: md | jade | html (default)
"slug": None, # The pretty url new name of the file. A file with the same name will be created
"url": "", # This will be added when processed. Should never be modified
"description": "", # Page description
"pretty_url": True, # By default, all url will be pretty (search engine friendly) Set to False to keep the .html
"meta": {},
"layout": None, # The layout for the page
"template": None # The page template.
}
tpl_env = None
_templates = {}
_pages_meta = {}
def __init__(self, root_dir, config=None):
"""
:param root_dir: The application root dir
:param config: (dict), Dict configuration, will override previously set data
"""
self.root_dir = root_dir
self.build_dir = os.path.join(self.root_dir, "build")
self.static_dir = os.path.join(self.root_dir, "static")
self.content_dir = os.path.join(self.root_dir, "content")
self.pages_dir = os.path.join(self.root_dir, "pages")
self.templates_dir = os.path.join(self.root_dir, "templates")
self.data_dir = os.path.join(self.root_dir, "data")
self.build_static_dir = os.path.join(self.build_dir, "static")
config_file = os.path.join(self.root_dir, "yass.yml")
self.config = utils.load_conf(config_file, config)
self.default_layout = self.config.get("default_layout", DEFAULT_LAYOUT)
self.site_config = utils.dictdot(self.config.get("site", {}))
self.site_config.setdefault("base_url", "/")
self.base_url = self.site_config.get("base_url")
self.sitename = utils.extract_sitename(self.config.get("sitename"))
self._data = self._load_data()
self._init_jinja({
"site": self.site_config,
"data": self._data,
"__YASS__": self._yass_vars()
})
self._init_webassets()
def _yass_vars(self):
""" Global variables """
utc = arrow.utcnow()
return {
"NAME": __title__,
"VERSION": __version__,
"URL": __uri__,
"GENERATOR": "%s %s" % (__title__, __version__),
"YEAR": utc.year
}
def _init_jinja(self, global_context={}):
loader = jinja2.ChoiceLoader([
# global macros
jinja2.DictLoader({
"yass.macros": pkg_resources.resource_string(__name__, "extras/macros.html"),
}),
jinja2.FileSystemLoader(self.templates_dir)
])
self.tpl_env = jinja2.Environment(loader=loader,
extensions=[
'pyjade.ext.jinja.PyJadeExtension',
'yass.extras.htmlcompress.HTMLCompress',
'yass.extras.jade.JadeTagExtension',
'yass.extras.md.MarkdownExtension',
'yass.extras.md.MarkdownTagExtension',
AssetsExtension
])
self.tpl_env.globals.update(global_context)
self.tpl_env.filters.update({
"format_datetime": lambda dt, format: arrow.get(dt).format(format),
"yass_link_to": self._link_to, # link for a
"yass_url_to": self._url_to # url for a page
})
def _get_page_meta(self, page):
"""
Cache the page meta from the frontmatter and assign new keys
The cache data will be used to build links or other properties
"""
meta = self._pages_meta.get(page)
if not meta:
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_, _ext = os.path.splitext(src_file)
markup = _ext.replace(".", "")
_meta, _ = frontmatter.parse(f.read())
meta = self.default_page_meta.copy()
meta["meta"].update(self.config.get("site.meta", {}))
meta.update(_meta)
dest_file, url = self._get_dest_file_and_url(page, meta)
meta["url"] = url
meta["filepath"] = dest_file
if meta.get("markup") is None:
meta["markup"] = markup
self._pages_meta[page] = meta
return meta
def _get_page_content(self, page):
""" Get the page content without the frontmatter """
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_meta, content = frontmatter.parse(f.read())
return content
def _url_to(self, page):
""" Get the url of a page """
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return meta.get("url")
def _get_dest_file_and_url(self, filepath, page_meta={}):
""" Return tuple of the file destination and url """
filename = filepath.split("/")[-1]
filepath_base = filepath.replace(filename, "").rstrip("/")
slug = page_meta.get("slug")
fname = slugify(slug) if slug else filename \
.replace(".html", "") \
.replace(".md", "") \
.replace(".jade", "")
if page_meta.get("pretty_url") is False:
dest_file = os.path.join(filepath_base, "%s.html" % fname)
else:
dest_dir = filepath_base
if filename not in ["index.html", "index.md", "index.jade"]:
dest_dir = os.path.join(filepath_base, fname)
dest_file = os.path.join(dest_dir, "index.html")
url = "/" + dest_file.replace("index.html", "")
return dest_file, url
def _load_data(self):
data = {}
# Load data from the data directory
for root, _, files in os.walk(self.data_dir):
for fname in files:
if fname.endswith((".json",)):
name = fname.replace(".json", "")
fname = os.path.join(root, fname)
if os.path.isfile(fname):
with open(fname) as f:
_ = json.load(f)
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
# data_api_urls
# Doing API call to retrieve the data and assign it to its key
# Data must be JSON
data_api_urls = self.site_config.get("data_api_urls")
if data_api_urls:
for name, url in data_api_urls.items():
try:
r = requests.get(url)
if r.status_code == 200:
_ = r.json()
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
else:
raise Exception("`%s -> %s` returns status code %s" % (name, url, r.status_code))
except Exception as e:
raise Exception("Data API URLS Error: %s" % e)
return utils.dictdot(data)
def _init_webassets(self):
assets_env = WAEnv(directory="./static",
url=self.config.get("static_url", "/static"))
bundles = self.config.get("assets_bundles", {})
assets_env.register(bundles)
self.tpl_env.assets_environment = assets_env
self.webassets_cmd = None
if bundles:
handler = logging.StreamHandler if self.config.get("debug", False) \
else logging.NullHandler
log = logging.getLogger('webassets')
log.addHandler(handler())
log.setLevel(logging.DEBUG)
self.webassets_cmd = CommandLineEnvironment(assets_env, log)
def clean_build_dir(self):
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
def build_static(self):
""" Build static files """
if not os.path.isdir(self.build_static_dir):
os.makedirs(self.build_static_dir)
copy_tree(self.static_dir, self.build_static_dir)
if self.webassets_cmd:
self.webassets_cmd.build()
def build_pages(self):
"""Iterate over the pages_dir and build the pages """
for root, _, files in os.walk(self.pages_dir):
base_dir = root.replace(self.pages_dir, "").lstrip("/")
if not base_dir.startswith("_"):
for f in files:
src_file = os.path.join(base_dir, f)
self._build_page(src_file)
def _build_page(self, filepath):
""" To build from filepath, relative to pages_dir """
filename = filepath.split("/")[-1]
# If filename starts with _ (underscore) or . (dot) do not build
if not filename.startswith(("_", ".")) and (filename.endswith(PAGE_FORMAT)):
meta = self._get_page_meta(filepath)
content = self._get_page_content(filepath)
# The default context for the page
_default_page = {
"build_dir": self.build_dir,
"filepath": meta["filepath"],
"context": {"page": meta},
"content": content,
"markup": meta.get("markup"),
"template": meta.get("template"),
"layout": meta.get("layout") or self.default_layout
}
# GENERATOR
# Allows to generate
_generator = meta.get("_generator")
if _generator:
data = self._data.get(_generator.get("data_source"))
# We want these back in meta in they exists in the data
special_meta = ["title", "slug", "description"]
# SINGLE
if _generator.get("type") == "single":
for d in data:
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
for _ in special_meta:
if _ in d:
dmeta[_] = d.get(_)
# If generator has the slug, it will substitute if
# Slug in the generator must have token from the data
# to generate the slug
if "slug" in _generator:
dmeta["slug"] = _generator.get("slug").format(**d)
# Slug is required
if "slug" not in dmeta:
print("WARNING: Skipping page because it's missing `slug`")
continue
slug = dmeta.get("slug")
dmeta["url"] = slug
dmeta["context"] = d
page.update({
"filepath": slug,
"context": {"page": dmeta}
})
self.create_page(**page)
if _generator.get("type") == "pagination":
per_page = int(_generator.get("per_page", self.site_config.get("pagination.per_page", 10)))
left_edge = int(_generator.get("left_edge", self.site_config.get("pagination.left_edge", 2)))
left_current = int(_generator.get("left_edge", self.site_config.get("pagination.left_current", 3)))
right_current = int(_generator.get("right_current", self.site_config.get("pagination.right_current", 4)))
right_edge = int(_generator.get("right_edge", self.site_config.get("pagination.right_edge", 2)))
padding = _generator.get("padding")
slug = _generator.get("slug")
limit = _generator.get("limit")
if "limit" in _generator:
data = data[:int(limit)]
data_chunks = utils.chunk_list(data, per_page)
len_data = len(data)
for i, d in enumerate(data_chunks):
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
page_num = i + 1
_paginator = Paginator([],
total=len_data,
page=page_num,
per_page=per_page,
padding=padding,
left_edge=left_edge,
right_edge=right_edge,
left_current=left_current,
right_current=right_current)
_paginator.slug = slug
_paginator.index_slug = _generator.get("index_slug")
_slug = slug.format(**{"page_num": page_num})
dmeta["url"] = _slug
dmeta["context"] = d
dmeta["paginator"] = _paginator
page.update({
"filepath": _slug,
"context": {"page": dmeta}
})
self.create_page(**page)
# First page need to generate the index
if i == 0 and _generator.get("index_slug"):
page["filepath"] = _generator.get("index_slug")
self.create_page(**page)
# NORMAL PAGE
else:
self.create_page(**_default_page)
def create_page(self, build_dir, filepath, context={}, content=None, template=None, markup=None, layout=None):
"""
To dynamically create a page and save it in the build_dir
:param build_dir: (path) The base directory that will hold the created page
:param filepath: (string) the name of the file to create. May contain slash to indicate directory
It will also create the url based on that name
If the filename doesn't end with .html, it will create a subdirectory
and create `index.html`
If file contains `.html` it will stays as is
ie:
post/waldo/where-is-waldo/ -> post/waldo/where-is-waldo/index.html
another/music/new-rap-song.html -> another/music/new-rap-song.html
post/page/5 -> post/page/5/index.html
:param context: (dict) context data
:param content: (text) The content of the file to be created. Will be overriden by template
:param template: (path) if source is not provided, template can be used to create the page.
Along with context it allows to create dynamic pages.
The file is relative to `/templates/`
file can be in html|jade|md
:param markup: (string: html|jade|md), when using content. To indicate which markup to use.
based on the markup it will parse the data
html: will render as is
jade and md: convert to the appropriate format
:param layout: (string) when using content. The layout to use.
The file location is relative to `/templates/`
file can be in html|jade|md
:return:
"""
build_dir = build_dir.rstrip("/")
filepath = filepath.lstrip("/").rstrip("/")
if not filepath.endswith(".html"):
filepath += "/index.html"
dest_file = os.path.join(build_dir, filepath)
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
_context = context
if "page" not in _context:
_context["page"] = self.default_page_meta.copy()
if "url" not in _context["page"]:
_context["page"]["url"] = "/" + filepath.lstrip("/").replace(
"index.html", "")
if template:
if template not in self._templates:
self._templates[template] = self.tpl_env.get_template(template)
tpl = self._templates[template]
else:
if markup == "md":
_context["page"]["__toc__"] = md.get_toc(content)
content = md.convert(content)
elif markup == "jade":
content = jade.convert(content)
# Page must be extended by a layout and have a block 'body'
# These tags will be included if they are missing
if re.search(self.RE_EXTENDS, content) is None:
layout = layout or self.default_layout
content = "\n{% extends '{}' %} \n\n".replace("{}",
layout) + content
if re.search(self.RE_BLOCK_BODY, content) is None:
_layout_block = re.search(self.RE_EXTENDS, content).group(0)
content = content.replace(_layout_block, "")
content = "\n" + _layout_block + "\n" + \
"{% block body %} \n" + content.strip() + "\n{% endblock %}"
tpl = self.tpl_env.from_string(content)
with open(dest_file, "w") as fw:
fw.write(tpl.render(**_context))
def build(self):
self.clean_build_dir()
if not os.path.isdir(self.build_dir):
os.makedirs(self.build_dir)
self.build_static()
self.build_pages()
def publish(self, target="S3", sitename=None, purge_files=True):
"""
To publish programatically
:param target: Where to pusblish at, S3
:param sitename: The site name
:param purge_files: if True, it will delete old files
:return:
"""
self.build()
endpoint = self.config.get("hosting.%s" % target)
if target.upper() == "S3":
p = publisher.S3Website(sitename=sitename or self.config.get("sitename"),
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
p.create_manifest_from_s3_files()
if purge_files:
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
p.upload(self.build_dir)
return p.website_endpoint_url
|
mardix/Yass | yass/yass.py | Yass._url_to | python | def _url_to(self, page):
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return meta.get("url") | Get the url of a page | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/yass.py#L182-L189 | null | class Yass(object):
RE_BLOCK_BODY = re.compile(r'{%\s*block\s+body\s*%}')
RE_BLOCK_BODY_PARSED = re.compile(r'{%\s*block\s+body\s*%}(.*?){%\s*endblock\s*%}')
RE_EXTENDS = re.compile(r'{%\s*extends\s+(.*?)\s*%}')
default_page_meta = {
"title": "", # The title of the page
"markup": None, # The markup to use. ie: md | jade | html (default)
"slug": None, # The pretty url new name of the file. A file with the same name will be created
"url": "", # This will be added when processed. Should never be modified
"description": "", # Page description
"pretty_url": True, # By default, all url will be pretty (search engine friendly) Set to False to keep the .html
"meta": {},
"layout": None, # The layout for the page
"template": None # The page template.
}
tpl_env = None
_templates = {}
_pages_meta = {}
def __init__(self, root_dir, config=None):
"""
:param root_dir: The application root dir
:param config: (dict), Dict configuration, will override previously set data
"""
self.root_dir = root_dir
self.build_dir = os.path.join(self.root_dir, "build")
self.static_dir = os.path.join(self.root_dir, "static")
self.content_dir = os.path.join(self.root_dir, "content")
self.pages_dir = os.path.join(self.root_dir, "pages")
self.templates_dir = os.path.join(self.root_dir, "templates")
self.data_dir = os.path.join(self.root_dir, "data")
self.build_static_dir = os.path.join(self.build_dir, "static")
config_file = os.path.join(self.root_dir, "yass.yml")
self.config = utils.load_conf(config_file, config)
self.default_layout = self.config.get("default_layout", DEFAULT_LAYOUT)
self.site_config = utils.dictdot(self.config.get("site", {}))
self.site_config.setdefault("base_url", "/")
self.base_url = self.site_config.get("base_url")
self.sitename = utils.extract_sitename(self.config.get("sitename"))
self._data = self._load_data()
self._init_jinja({
"site": self.site_config,
"data": self._data,
"__YASS__": self._yass_vars()
})
self._init_webassets()
def _yass_vars(self):
""" Global variables """
utc = arrow.utcnow()
return {
"NAME": __title__,
"VERSION": __version__,
"URL": __uri__,
"GENERATOR": "%s %s" % (__title__, __version__),
"YEAR": utc.year
}
def _init_jinja(self, global_context={}):
loader = jinja2.ChoiceLoader([
# global macros
jinja2.DictLoader({
"yass.macros": pkg_resources.resource_string(__name__, "extras/macros.html"),
}),
jinja2.FileSystemLoader(self.templates_dir)
])
self.tpl_env = jinja2.Environment(loader=loader,
extensions=[
'pyjade.ext.jinja.PyJadeExtension',
'yass.extras.htmlcompress.HTMLCompress',
'yass.extras.jade.JadeTagExtension',
'yass.extras.md.MarkdownExtension',
'yass.extras.md.MarkdownTagExtension',
AssetsExtension
])
self.tpl_env.globals.update(global_context)
self.tpl_env.filters.update({
"format_datetime": lambda dt, format: arrow.get(dt).format(format),
"yass_link_to": self._link_to, # link for a
"yass_url_to": self._url_to # url for a page
})
def _get_page_meta(self, page):
"""
Cache the page meta from the frontmatter and assign new keys
The cache data will be used to build links or other properties
"""
meta = self._pages_meta.get(page)
if not meta:
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_, _ext = os.path.splitext(src_file)
markup = _ext.replace(".", "")
_meta, _ = frontmatter.parse(f.read())
meta = self.default_page_meta.copy()
meta["meta"].update(self.config.get("site.meta", {}))
meta.update(_meta)
dest_file, url = self._get_dest_file_and_url(page, meta)
meta["url"] = url
meta["filepath"] = dest_file
if meta.get("markup") is None:
meta["markup"] = markup
self._pages_meta[page] = meta
return meta
def _get_page_content(self, page):
""" Get the page content without the frontmatter """
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_meta, content = frontmatter.parse(f.read())
return content
def _link_to(self, page, text=None, title=None, _class="", id="", alt="", **kwargs):
""" Build the A HREF LINK To a page."""
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>".format(
url=meta.get("url", "/") + anchor,
text=text or meta.get("title") or title,
title=title or "",
_class=_class,
id=id
)
def _get_dest_file_and_url(self, filepath, page_meta={}):
""" Return tuple of the file destination and url """
filename = filepath.split("/")[-1]
filepath_base = filepath.replace(filename, "").rstrip("/")
slug = page_meta.get("slug")
fname = slugify(slug) if slug else filename \
.replace(".html", "") \
.replace(".md", "") \
.replace(".jade", "")
if page_meta.get("pretty_url") is False:
dest_file = os.path.join(filepath_base, "%s.html" % fname)
else:
dest_dir = filepath_base
if filename not in ["index.html", "index.md", "index.jade"]:
dest_dir = os.path.join(filepath_base, fname)
dest_file = os.path.join(dest_dir, "index.html")
url = "/" + dest_file.replace("index.html", "")
return dest_file, url
def _load_data(self):
data = {}
# Load data from the data directory
for root, _, files in os.walk(self.data_dir):
for fname in files:
if fname.endswith((".json",)):
name = fname.replace(".json", "")
fname = os.path.join(root, fname)
if os.path.isfile(fname):
with open(fname) as f:
_ = json.load(f)
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
# data_api_urls
# Doing API call to retrieve the data and assign it to its key
# Data must be JSON
data_api_urls = self.site_config.get("data_api_urls")
if data_api_urls:
for name, url in data_api_urls.items():
try:
r = requests.get(url)
if r.status_code == 200:
_ = r.json()
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
else:
raise Exception("`%s -> %s` returns status code %s" % (name, url, r.status_code))
except Exception as e:
raise Exception("Data API URLS Error: %s" % e)
return utils.dictdot(data)
def _init_webassets(self):
assets_env = WAEnv(directory="./static",
url=self.config.get("static_url", "/static"))
bundles = self.config.get("assets_bundles", {})
assets_env.register(bundles)
self.tpl_env.assets_environment = assets_env
self.webassets_cmd = None
if bundles:
handler = logging.StreamHandler if self.config.get("debug", False) \
else logging.NullHandler
log = logging.getLogger('webassets')
log.addHandler(handler())
log.setLevel(logging.DEBUG)
self.webassets_cmd = CommandLineEnvironment(assets_env, log)
def clean_build_dir(self):
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
def build_static(self):
""" Build static files """
if not os.path.isdir(self.build_static_dir):
os.makedirs(self.build_static_dir)
copy_tree(self.static_dir, self.build_static_dir)
if self.webassets_cmd:
self.webassets_cmd.build()
def build_pages(self):
"""Iterate over the pages_dir and build the pages """
for root, _, files in os.walk(self.pages_dir):
base_dir = root.replace(self.pages_dir, "").lstrip("/")
if not base_dir.startswith("_"):
for f in files:
src_file = os.path.join(base_dir, f)
self._build_page(src_file)
def _build_page(self, filepath):
""" To build from filepath, relative to pages_dir """
filename = filepath.split("/")[-1]
# If filename starts with _ (underscore) or . (dot) do not build
if not filename.startswith(("_", ".")) and (filename.endswith(PAGE_FORMAT)):
meta = self._get_page_meta(filepath)
content = self._get_page_content(filepath)
# The default context for the page
_default_page = {
"build_dir": self.build_dir,
"filepath": meta["filepath"],
"context": {"page": meta},
"content": content,
"markup": meta.get("markup"),
"template": meta.get("template"),
"layout": meta.get("layout") or self.default_layout
}
# GENERATOR
# Allows to generate
_generator = meta.get("_generator")
if _generator:
data = self._data.get(_generator.get("data_source"))
# We want these back in meta in they exists in the data
special_meta = ["title", "slug", "description"]
# SINGLE
if _generator.get("type") == "single":
for d in data:
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
for _ in special_meta:
if _ in d:
dmeta[_] = d.get(_)
# If generator has the slug, it will substitute if
# Slug in the generator must have token from the data
# to generate the slug
if "slug" in _generator:
dmeta["slug"] = _generator.get("slug").format(**d)
# Slug is required
if "slug" not in dmeta:
print("WARNING: Skipping page because it's missing `slug`")
continue
slug = dmeta.get("slug")
dmeta["url"] = slug
dmeta["context"] = d
page.update({
"filepath": slug,
"context": {"page": dmeta}
})
self.create_page(**page)
if _generator.get("type") == "pagination":
per_page = int(_generator.get("per_page", self.site_config.get("pagination.per_page", 10)))
left_edge = int(_generator.get("left_edge", self.site_config.get("pagination.left_edge", 2)))
left_current = int(_generator.get("left_edge", self.site_config.get("pagination.left_current", 3)))
right_current = int(_generator.get("right_current", self.site_config.get("pagination.right_current", 4)))
right_edge = int(_generator.get("right_edge", self.site_config.get("pagination.right_edge", 2)))
padding = _generator.get("padding")
slug = _generator.get("slug")
limit = _generator.get("limit")
if "limit" in _generator:
data = data[:int(limit)]
data_chunks = utils.chunk_list(data, per_page)
len_data = len(data)
for i, d in enumerate(data_chunks):
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
page_num = i + 1
_paginator = Paginator([],
total=len_data,
page=page_num,
per_page=per_page,
padding=padding,
left_edge=left_edge,
right_edge=right_edge,
left_current=left_current,
right_current=right_current)
_paginator.slug = slug
_paginator.index_slug = _generator.get("index_slug")
_slug = slug.format(**{"page_num": page_num})
dmeta["url"] = _slug
dmeta["context"] = d
dmeta["paginator"] = _paginator
page.update({
"filepath": _slug,
"context": {"page": dmeta}
})
self.create_page(**page)
# First page need to generate the index
if i == 0 and _generator.get("index_slug"):
page["filepath"] = _generator.get("index_slug")
self.create_page(**page)
# NORMAL PAGE
else:
self.create_page(**_default_page)
def create_page(self, build_dir, filepath, context={}, content=None, template=None, markup=None, layout=None):
"""
To dynamically create a page and save it in the build_dir
:param build_dir: (path) The base directory that will hold the created page
:param filepath: (string) the name of the file to create. May contain slash to indicate directory
It will also create the url based on that name
If the filename doesn't end with .html, it will create a subdirectory
and create `index.html`
If file contains `.html` it will stays as is
ie:
post/waldo/where-is-waldo/ -> post/waldo/where-is-waldo/index.html
another/music/new-rap-song.html -> another/music/new-rap-song.html
post/page/5 -> post/page/5/index.html
:param context: (dict) context data
:param content: (text) The content of the file to be created. Will be overriden by template
:param template: (path) if source is not provided, template can be used to create the page.
Along with context it allows to create dynamic pages.
The file is relative to `/templates/`
file can be in html|jade|md
:param markup: (string: html|jade|md), when using content. To indicate which markup to use.
based on the markup it will parse the data
html: will render as is
jade and md: convert to the appropriate format
:param layout: (string) when using content. The layout to use.
The file location is relative to `/templates/`
file can be in html|jade|md
:return:
"""
build_dir = build_dir.rstrip("/")
filepath = filepath.lstrip("/").rstrip("/")
if not filepath.endswith(".html"):
filepath += "/index.html"
dest_file = os.path.join(build_dir, filepath)
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
_context = context
if "page" not in _context:
_context["page"] = self.default_page_meta.copy()
if "url" not in _context["page"]:
_context["page"]["url"] = "/" + filepath.lstrip("/").replace(
"index.html", "")
if template:
if template not in self._templates:
self._templates[template] = self.tpl_env.get_template(template)
tpl = self._templates[template]
else:
if markup == "md":
_context["page"]["__toc__"] = md.get_toc(content)
content = md.convert(content)
elif markup == "jade":
content = jade.convert(content)
# Page must be extended by a layout and have a block 'body'
# These tags will be included if they are missing
if re.search(self.RE_EXTENDS, content) is None:
layout = layout or self.default_layout
content = "\n{% extends '{}' %} \n\n".replace("{}",
layout) + content
if re.search(self.RE_BLOCK_BODY, content) is None:
_layout_block = re.search(self.RE_EXTENDS, content).group(0)
content = content.replace(_layout_block, "")
content = "\n" + _layout_block + "\n" + \
"{% block body %} \n" + content.strip() + "\n{% endblock %}"
tpl = self.tpl_env.from_string(content)
with open(dest_file, "w") as fw:
fw.write(tpl.render(**_context))
def build(self):
self.clean_build_dir()
if not os.path.isdir(self.build_dir):
os.makedirs(self.build_dir)
self.build_static()
self.build_pages()
def publish(self, target="S3", sitename=None, purge_files=True):
"""
To publish programatically
:param target: Where to pusblish at, S3
:param sitename: The site name
:param purge_files: if True, it will delete old files
:return:
"""
self.build()
endpoint = self.config.get("hosting.%s" % target)
if target.upper() == "S3":
p = publisher.S3Website(sitename=sitename or self.config.get("sitename"),
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
p.create_manifest_from_s3_files()
if purge_files:
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
p.upload(self.build_dir)
return p.website_endpoint_url
|
mardix/Yass | yass/yass.py | Yass._get_dest_file_and_url | python | def _get_dest_file_and_url(self, filepath, page_meta={}):
filename = filepath.split("/")[-1]
filepath_base = filepath.replace(filename, "").rstrip("/")
slug = page_meta.get("slug")
fname = slugify(slug) if slug else filename \
.replace(".html", "") \
.replace(".md", "") \
.replace(".jade", "")
if page_meta.get("pretty_url") is False:
dest_file = os.path.join(filepath_base, "%s.html" % fname)
else:
dest_dir = filepath_base
if filename not in ["index.html", "index.md", "index.jade"]:
dest_dir = os.path.join(filepath_base, fname)
dest_file = os.path.join(dest_dir, "index.html")
url = "/" + dest_file.replace("index.html", "")
return dest_file, url | Return tuple of the file destination and url | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/yass.py#L191-L211 | null | class Yass(object):
RE_BLOCK_BODY = re.compile(r'{%\s*block\s+body\s*%}')
RE_BLOCK_BODY_PARSED = re.compile(r'{%\s*block\s+body\s*%}(.*?){%\s*endblock\s*%}')
RE_EXTENDS = re.compile(r'{%\s*extends\s+(.*?)\s*%}')
default_page_meta = {
"title": "", # The title of the page
"markup": None, # The markup to use. ie: md | jade | html (default)
"slug": None, # The pretty url new name of the file. A file with the same name will be created
"url": "", # This will be added when processed. Should never be modified
"description": "", # Page description
"pretty_url": True, # By default, all url will be pretty (search engine friendly) Set to False to keep the .html
"meta": {},
"layout": None, # The layout for the page
"template": None # The page template.
}
tpl_env = None
_templates = {}
_pages_meta = {}
def __init__(self, root_dir, config=None):
"""
:param root_dir: The application root dir
:param config: (dict), Dict configuration, will override previously set data
"""
self.root_dir = root_dir
self.build_dir = os.path.join(self.root_dir, "build")
self.static_dir = os.path.join(self.root_dir, "static")
self.content_dir = os.path.join(self.root_dir, "content")
self.pages_dir = os.path.join(self.root_dir, "pages")
self.templates_dir = os.path.join(self.root_dir, "templates")
self.data_dir = os.path.join(self.root_dir, "data")
self.build_static_dir = os.path.join(self.build_dir, "static")
config_file = os.path.join(self.root_dir, "yass.yml")
self.config = utils.load_conf(config_file, config)
self.default_layout = self.config.get("default_layout", DEFAULT_LAYOUT)
self.site_config = utils.dictdot(self.config.get("site", {}))
self.site_config.setdefault("base_url", "/")
self.base_url = self.site_config.get("base_url")
self.sitename = utils.extract_sitename(self.config.get("sitename"))
self._data = self._load_data()
self._init_jinja({
"site": self.site_config,
"data": self._data,
"__YASS__": self._yass_vars()
})
self._init_webassets()
def _yass_vars(self):
""" Global variables """
utc = arrow.utcnow()
return {
"NAME": __title__,
"VERSION": __version__,
"URL": __uri__,
"GENERATOR": "%s %s" % (__title__, __version__),
"YEAR": utc.year
}
def _init_jinja(self, global_context={}):
loader = jinja2.ChoiceLoader([
# global macros
jinja2.DictLoader({
"yass.macros": pkg_resources.resource_string(__name__, "extras/macros.html"),
}),
jinja2.FileSystemLoader(self.templates_dir)
])
self.tpl_env = jinja2.Environment(loader=loader,
extensions=[
'pyjade.ext.jinja.PyJadeExtension',
'yass.extras.htmlcompress.HTMLCompress',
'yass.extras.jade.JadeTagExtension',
'yass.extras.md.MarkdownExtension',
'yass.extras.md.MarkdownTagExtension',
AssetsExtension
])
self.tpl_env.globals.update(global_context)
self.tpl_env.filters.update({
"format_datetime": lambda dt, format: arrow.get(dt).format(format),
"yass_link_to": self._link_to, # link for a
"yass_url_to": self._url_to # url for a page
})
def _get_page_meta(self, page):
"""
Cache the page meta from the frontmatter and assign new keys
The cache data will be used to build links or other properties
"""
meta = self._pages_meta.get(page)
if not meta:
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_, _ext = os.path.splitext(src_file)
markup = _ext.replace(".", "")
_meta, _ = frontmatter.parse(f.read())
meta = self.default_page_meta.copy()
meta["meta"].update(self.config.get("site.meta", {}))
meta.update(_meta)
dest_file, url = self._get_dest_file_and_url(page, meta)
meta["url"] = url
meta["filepath"] = dest_file
if meta.get("markup") is None:
meta["markup"] = markup
self._pages_meta[page] = meta
return meta
def _get_page_content(self, page):
""" Get the page content without the frontmatter """
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_meta, content = frontmatter.parse(f.read())
return content
def _link_to(self, page, text=None, title=None, _class="", id="", alt="", **kwargs):
""" Build the A HREF LINK To a page."""
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>".format(
url=meta.get("url", "/") + anchor,
text=text or meta.get("title") or title,
title=title or "",
_class=_class,
id=id
)
def _url_to(self, page):
""" Get the url of a page """
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return meta.get("url")
def _load_data(self):
data = {}
# Load data from the data directory
for root, _, files in os.walk(self.data_dir):
for fname in files:
if fname.endswith((".json",)):
name = fname.replace(".json", "")
fname = os.path.join(root, fname)
if os.path.isfile(fname):
with open(fname) as f:
_ = json.load(f)
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
# data_api_urls
# Doing API call to retrieve the data and assign it to its key
# Data must be JSON
data_api_urls = self.site_config.get("data_api_urls")
if data_api_urls:
for name, url in data_api_urls.items():
try:
r = requests.get(url)
if r.status_code == 200:
_ = r.json()
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
else:
raise Exception("`%s -> %s` returns status code %s" % (name, url, r.status_code))
except Exception as e:
raise Exception("Data API URLS Error: %s" % e)
return utils.dictdot(data)
def _init_webassets(self):
assets_env = WAEnv(directory="./static",
url=self.config.get("static_url", "/static"))
bundles = self.config.get("assets_bundles", {})
assets_env.register(bundles)
self.tpl_env.assets_environment = assets_env
self.webassets_cmd = None
if bundles:
handler = logging.StreamHandler if self.config.get("debug", False) \
else logging.NullHandler
log = logging.getLogger('webassets')
log.addHandler(handler())
log.setLevel(logging.DEBUG)
self.webassets_cmd = CommandLineEnvironment(assets_env, log)
def clean_build_dir(self):
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
def build_static(self):
""" Build static files """
if not os.path.isdir(self.build_static_dir):
os.makedirs(self.build_static_dir)
copy_tree(self.static_dir, self.build_static_dir)
if self.webassets_cmd:
self.webassets_cmd.build()
def build_pages(self):
"""Iterate over the pages_dir and build the pages """
for root, _, files in os.walk(self.pages_dir):
base_dir = root.replace(self.pages_dir, "").lstrip("/")
if not base_dir.startswith("_"):
for f in files:
src_file = os.path.join(base_dir, f)
self._build_page(src_file)
def _build_page(self, filepath):
""" To build from filepath, relative to pages_dir """
filename = filepath.split("/")[-1]
# If filename starts with _ (underscore) or . (dot) do not build
if not filename.startswith(("_", ".")) and (filename.endswith(PAGE_FORMAT)):
meta = self._get_page_meta(filepath)
content = self._get_page_content(filepath)
# The default context for the page
_default_page = {
"build_dir": self.build_dir,
"filepath": meta["filepath"],
"context": {"page": meta},
"content": content,
"markup": meta.get("markup"),
"template": meta.get("template"),
"layout": meta.get("layout") or self.default_layout
}
# GENERATOR
# Allows to generate
_generator = meta.get("_generator")
if _generator:
data = self._data.get(_generator.get("data_source"))
# We want these back in meta in they exists in the data
special_meta = ["title", "slug", "description"]
# SINGLE
if _generator.get("type") == "single":
for d in data:
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
for _ in special_meta:
if _ in d:
dmeta[_] = d.get(_)
# If generator has the slug, it will substitute if
# Slug in the generator must have token from the data
# to generate the slug
if "slug" in _generator:
dmeta["slug"] = _generator.get("slug").format(**d)
# Slug is required
if "slug" not in dmeta:
print("WARNING: Skipping page because it's missing `slug`")
continue
slug = dmeta.get("slug")
dmeta["url"] = slug
dmeta["context"] = d
page.update({
"filepath": slug,
"context": {"page": dmeta}
})
self.create_page(**page)
if _generator.get("type") == "pagination":
per_page = int(_generator.get("per_page", self.site_config.get("pagination.per_page", 10)))
left_edge = int(_generator.get("left_edge", self.site_config.get("pagination.left_edge", 2)))
left_current = int(_generator.get("left_edge", self.site_config.get("pagination.left_current", 3)))
right_current = int(_generator.get("right_current", self.site_config.get("pagination.right_current", 4)))
right_edge = int(_generator.get("right_edge", self.site_config.get("pagination.right_edge", 2)))
padding = _generator.get("padding")
slug = _generator.get("slug")
limit = _generator.get("limit")
if "limit" in _generator:
data = data[:int(limit)]
data_chunks = utils.chunk_list(data, per_page)
len_data = len(data)
for i, d in enumerate(data_chunks):
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
page_num = i + 1
_paginator = Paginator([],
total=len_data,
page=page_num,
per_page=per_page,
padding=padding,
left_edge=left_edge,
right_edge=right_edge,
left_current=left_current,
right_current=right_current)
_paginator.slug = slug
_paginator.index_slug = _generator.get("index_slug")
_slug = slug.format(**{"page_num": page_num})
dmeta["url"] = _slug
dmeta["context"] = d
dmeta["paginator"] = _paginator
page.update({
"filepath": _slug,
"context": {"page": dmeta}
})
self.create_page(**page)
# First page need to generate the index
if i == 0 and _generator.get("index_slug"):
page["filepath"] = _generator.get("index_slug")
self.create_page(**page)
# NORMAL PAGE
else:
self.create_page(**_default_page)
def create_page(self, build_dir, filepath, context={}, content=None, template=None, markup=None, layout=None):
"""
To dynamically create a page and save it in the build_dir
:param build_dir: (path) The base directory that will hold the created page
:param filepath: (string) the name of the file to create. May contain slash to indicate directory
It will also create the url based on that name
If the filename doesn't end with .html, it will create a subdirectory
and create `index.html`
If file contains `.html` it will stays as is
ie:
post/waldo/where-is-waldo/ -> post/waldo/where-is-waldo/index.html
another/music/new-rap-song.html -> another/music/new-rap-song.html
post/page/5 -> post/page/5/index.html
:param context: (dict) context data
:param content: (text) The content of the file to be created. Will be overriden by template
:param template: (path) if source is not provided, template can be used to create the page.
Along with context it allows to create dynamic pages.
The file is relative to `/templates/`
file can be in html|jade|md
:param markup: (string: html|jade|md), when using content. To indicate which markup to use.
based on the markup it will parse the data
html: will render as is
jade and md: convert to the appropriate format
:param layout: (string) when using content. The layout to use.
The file location is relative to `/templates/`
file can be in html|jade|md
:return:
"""
build_dir = build_dir.rstrip("/")
filepath = filepath.lstrip("/").rstrip("/")
if not filepath.endswith(".html"):
filepath += "/index.html"
dest_file = os.path.join(build_dir, filepath)
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
_context = context
if "page" not in _context:
_context["page"] = self.default_page_meta.copy()
if "url" not in _context["page"]:
_context["page"]["url"] = "/" + filepath.lstrip("/").replace(
"index.html", "")
if template:
if template not in self._templates:
self._templates[template] = self.tpl_env.get_template(template)
tpl = self._templates[template]
else:
if markup == "md":
_context["page"]["__toc__"] = md.get_toc(content)
content = md.convert(content)
elif markup == "jade":
content = jade.convert(content)
# Page must be extended by a layout and have a block 'body'
# These tags will be included if they are missing
if re.search(self.RE_EXTENDS, content) is None:
layout = layout or self.default_layout
content = "\n{% extends '{}' %} \n\n".replace("{}",
layout) + content
if re.search(self.RE_BLOCK_BODY, content) is None:
_layout_block = re.search(self.RE_EXTENDS, content).group(0)
content = content.replace(_layout_block, "")
content = "\n" + _layout_block + "\n" + \
"{% block body %} \n" + content.strip() + "\n{% endblock %}"
tpl = self.tpl_env.from_string(content)
with open(dest_file, "w") as fw:
fw.write(tpl.render(**_context))
def build(self):
self.clean_build_dir()
if not os.path.isdir(self.build_dir):
os.makedirs(self.build_dir)
self.build_static()
self.build_pages()
def publish(self, target="S3", sitename=None, purge_files=True):
"""
To publish programatically
:param target: Where to pusblish at, S3
:param sitename: The site name
:param purge_files: if True, it will delete old files
:return:
"""
self.build()
endpoint = self.config.get("hosting.%s" % target)
if target.upper() == "S3":
p = publisher.S3Website(sitename=sitename or self.config.get("sitename"),
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
p.create_manifest_from_s3_files()
if purge_files:
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
p.upload(self.build_dir)
return p.website_endpoint_url
|
mardix/Yass | yass/yass.py | Yass.build_static | python | def build_static(self):
if not os.path.isdir(self.build_static_dir):
os.makedirs(self.build_static_dir)
copy_tree(self.static_dir, self.build_static_dir)
if self.webassets_cmd:
self.webassets_cmd.build() | Build static files | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/yass.py#L272-L278 | null | class Yass(object):
RE_BLOCK_BODY = re.compile(r'{%\s*block\s+body\s*%}')
RE_BLOCK_BODY_PARSED = re.compile(r'{%\s*block\s+body\s*%}(.*?){%\s*endblock\s*%}')
RE_EXTENDS = re.compile(r'{%\s*extends\s+(.*?)\s*%}')
default_page_meta = {
"title": "", # The title of the page
"markup": None, # The markup to use. ie: md | jade | html (default)
"slug": None, # The pretty url new name of the file. A file with the same name will be created
"url": "", # This will be added when processed. Should never be modified
"description": "", # Page description
"pretty_url": True, # By default, all url will be pretty (search engine friendly) Set to False to keep the .html
"meta": {},
"layout": None, # The layout for the page
"template": None # The page template.
}
tpl_env = None
_templates = {}
_pages_meta = {}
def __init__(self, root_dir, config=None):
"""
:param root_dir: The application root dir
:param config: (dict), Dict configuration, will override previously set data
"""
self.root_dir = root_dir
self.build_dir = os.path.join(self.root_dir, "build")
self.static_dir = os.path.join(self.root_dir, "static")
self.content_dir = os.path.join(self.root_dir, "content")
self.pages_dir = os.path.join(self.root_dir, "pages")
self.templates_dir = os.path.join(self.root_dir, "templates")
self.data_dir = os.path.join(self.root_dir, "data")
self.build_static_dir = os.path.join(self.build_dir, "static")
config_file = os.path.join(self.root_dir, "yass.yml")
self.config = utils.load_conf(config_file, config)
self.default_layout = self.config.get("default_layout", DEFAULT_LAYOUT)
self.site_config = utils.dictdot(self.config.get("site", {}))
self.site_config.setdefault("base_url", "/")
self.base_url = self.site_config.get("base_url")
self.sitename = utils.extract_sitename(self.config.get("sitename"))
self._data = self._load_data()
self._init_jinja({
"site": self.site_config,
"data": self._data,
"__YASS__": self._yass_vars()
})
self._init_webassets()
def _yass_vars(self):
""" Global variables """
utc = arrow.utcnow()
return {
"NAME": __title__,
"VERSION": __version__,
"URL": __uri__,
"GENERATOR": "%s %s" % (__title__, __version__),
"YEAR": utc.year
}
def _init_jinja(self, global_context={}):
loader = jinja2.ChoiceLoader([
# global macros
jinja2.DictLoader({
"yass.macros": pkg_resources.resource_string(__name__, "extras/macros.html"),
}),
jinja2.FileSystemLoader(self.templates_dir)
])
self.tpl_env = jinja2.Environment(loader=loader,
extensions=[
'pyjade.ext.jinja.PyJadeExtension',
'yass.extras.htmlcompress.HTMLCompress',
'yass.extras.jade.JadeTagExtension',
'yass.extras.md.MarkdownExtension',
'yass.extras.md.MarkdownTagExtension',
AssetsExtension
])
self.tpl_env.globals.update(global_context)
self.tpl_env.filters.update({
"format_datetime": lambda dt, format: arrow.get(dt).format(format),
"yass_link_to": self._link_to, # link for a
"yass_url_to": self._url_to # url for a page
})
def _get_page_meta(self, page):
"""
Cache the page meta from the frontmatter and assign new keys
The cache data will be used to build links or other properties
"""
meta = self._pages_meta.get(page)
if not meta:
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_, _ext = os.path.splitext(src_file)
markup = _ext.replace(".", "")
_meta, _ = frontmatter.parse(f.read())
meta = self.default_page_meta.copy()
meta["meta"].update(self.config.get("site.meta", {}))
meta.update(_meta)
dest_file, url = self._get_dest_file_and_url(page, meta)
meta["url"] = url
meta["filepath"] = dest_file
if meta.get("markup") is None:
meta["markup"] = markup
self._pages_meta[page] = meta
return meta
def _get_page_content(self, page):
""" Get the page content without the frontmatter """
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_meta, content = frontmatter.parse(f.read())
return content
def _link_to(self, page, text=None, title=None, _class="", id="", alt="", **kwargs):
""" Build the A HREF LINK To a page."""
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>".format(
url=meta.get("url", "/") + anchor,
text=text or meta.get("title") or title,
title=title or "",
_class=_class,
id=id
)
def _url_to(self, page):
""" Get the url of a page """
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return meta.get("url")
def _get_dest_file_and_url(self, filepath, page_meta={}):
""" Return tuple of the file destination and url """
filename = filepath.split("/")[-1]
filepath_base = filepath.replace(filename, "").rstrip("/")
slug = page_meta.get("slug")
fname = slugify(slug) if slug else filename \
.replace(".html", "") \
.replace(".md", "") \
.replace(".jade", "")
if page_meta.get("pretty_url") is False:
dest_file = os.path.join(filepath_base, "%s.html" % fname)
else:
dest_dir = filepath_base
if filename not in ["index.html", "index.md", "index.jade"]:
dest_dir = os.path.join(filepath_base, fname)
dest_file = os.path.join(dest_dir, "index.html")
url = "/" + dest_file.replace("index.html", "")
return dest_file, url
def _load_data(self):
data = {}
# Load data from the data directory
for root, _, files in os.walk(self.data_dir):
for fname in files:
if fname.endswith((".json",)):
name = fname.replace(".json", "")
fname = os.path.join(root, fname)
if os.path.isfile(fname):
with open(fname) as f:
_ = json.load(f)
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
# data_api_urls
# Doing API call to retrieve the data and assign it to its key
# Data must be JSON
data_api_urls = self.site_config.get("data_api_urls")
if data_api_urls:
for name, url in data_api_urls.items():
try:
r = requests.get(url)
if r.status_code == 200:
_ = r.json()
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
else:
raise Exception("`%s -> %s` returns status code %s" % (name, url, r.status_code))
except Exception as e:
raise Exception("Data API URLS Error: %s" % e)
return utils.dictdot(data)
def _init_webassets(self):
assets_env = WAEnv(directory="./static",
url=self.config.get("static_url", "/static"))
bundles = self.config.get("assets_bundles", {})
assets_env.register(bundles)
self.tpl_env.assets_environment = assets_env
self.webassets_cmd = None
if bundles:
handler = logging.StreamHandler if self.config.get("debug", False) \
else logging.NullHandler
log = logging.getLogger('webassets')
log.addHandler(handler())
log.setLevel(logging.DEBUG)
self.webassets_cmd = CommandLineEnvironment(assets_env, log)
def clean_build_dir(self):
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
def build_pages(self):
"""Iterate over the pages_dir and build the pages """
for root, _, files in os.walk(self.pages_dir):
base_dir = root.replace(self.pages_dir, "").lstrip("/")
if not base_dir.startswith("_"):
for f in files:
src_file = os.path.join(base_dir, f)
self._build_page(src_file)
def _build_page(self, filepath):
""" To build from filepath, relative to pages_dir """
filename = filepath.split("/")[-1]
# If filename starts with _ (underscore) or . (dot) do not build
if not filename.startswith(("_", ".")) and (filename.endswith(PAGE_FORMAT)):
meta = self._get_page_meta(filepath)
content = self._get_page_content(filepath)
# The default context for the page
_default_page = {
"build_dir": self.build_dir,
"filepath": meta["filepath"],
"context": {"page": meta},
"content": content,
"markup": meta.get("markup"),
"template": meta.get("template"),
"layout": meta.get("layout") or self.default_layout
}
# GENERATOR
# Allows to generate
_generator = meta.get("_generator")
if _generator:
data = self._data.get(_generator.get("data_source"))
# We want these back in meta in they exists in the data
special_meta = ["title", "slug", "description"]
# SINGLE
if _generator.get("type") == "single":
for d in data:
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
for _ in special_meta:
if _ in d:
dmeta[_] = d.get(_)
# If generator has the slug, it will substitute if
# Slug in the generator must have token from the data
# to generate the slug
if "slug" in _generator:
dmeta["slug"] = _generator.get("slug").format(**d)
# Slug is required
if "slug" not in dmeta:
print("WARNING: Skipping page because it's missing `slug`")
continue
slug = dmeta.get("slug")
dmeta["url"] = slug
dmeta["context"] = d
page.update({
"filepath": slug,
"context": {"page": dmeta}
})
self.create_page(**page)
if _generator.get("type") == "pagination":
per_page = int(_generator.get("per_page", self.site_config.get("pagination.per_page", 10)))
left_edge = int(_generator.get("left_edge", self.site_config.get("pagination.left_edge", 2)))
left_current = int(_generator.get("left_edge", self.site_config.get("pagination.left_current", 3)))
right_current = int(_generator.get("right_current", self.site_config.get("pagination.right_current", 4)))
right_edge = int(_generator.get("right_edge", self.site_config.get("pagination.right_edge", 2)))
padding = _generator.get("padding")
slug = _generator.get("slug")
limit = _generator.get("limit")
if "limit" in _generator:
data = data[:int(limit)]
data_chunks = utils.chunk_list(data, per_page)
len_data = len(data)
for i, d in enumerate(data_chunks):
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
page_num = i + 1
_paginator = Paginator([],
total=len_data,
page=page_num,
per_page=per_page,
padding=padding,
left_edge=left_edge,
right_edge=right_edge,
left_current=left_current,
right_current=right_current)
_paginator.slug = slug
_paginator.index_slug = _generator.get("index_slug")
_slug = slug.format(**{"page_num": page_num})
dmeta["url"] = _slug
dmeta["context"] = d
dmeta["paginator"] = _paginator
page.update({
"filepath": _slug,
"context": {"page": dmeta}
})
self.create_page(**page)
# First page need to generate the index
if i == 0 and _generator.get("index_slug"):
page["filepath"] = _generator.get("index_slug")
self.create_page(**page)
# NORMAL PAGE
else:
self.create_page(**_default_page)
def create_page(self, build_dir, filepath, context={}, content=None, template=None, markup=None, layout=None):
"""
To dynamically create a page and save it in the build_dir
:param build_dir: (path) The base directory that will hold the created page
:param filepath: (string) the name of the file to create. May contain slash to indicate directory
It will also create the url based on that name
If the filename doesn't end with .html, it will create a subdirectory
and create `index.html`
If file contains `.html` it will stays as is
ie:
post/waldo/where-is-waldo/ -> post/waldo/where-is-waldo/index.html
another/music/new-rap-song.html -> another/music/new-rap-song.html
post/page/5 -> post/page/5/index.html
:param context: (dict) context data
:param content: (text) The content of the file to be created. Will be overriden by template
:param template: (path) if source is not provided, template can be used to create the page.
Along with context it allows to create dynamic pages.
The file is relative to `/templates/`
file can be in html|jade|md
:param markup: (string: html|jade|md), when using content. To indicate which markup to use.
based on the markup it will parse the data
html: will render as is
jade and md: convert to the appropriate format
:param layout: (string) when using content. The layout to use.
The file location is relative to `/templates/`
file can be in html|jade|md
:return:
"""
build_dir = build_dir.rstrip("/")
filepath = filepath.lstrip("/").rstrip("/")
if not filepath.endswith(".html"):
filepath += "/index.html"
dest_file = os.path.join(build_dir, filepath)
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
_context = context
if "page" not in _context:
_context["page"] = self.default_page_meta.copy()
if "url" not in _context["page"]:
_context["page"]["url"] = "/" + filepath.lstrip("/").replace(
"index.html", "")
if template:
if template not in self._templates:
self._templates[template] = self.tpl_env.get_template(template)
tpl = self._templates[template]
else:
if markup == "md":
_context["page"]["__toc__"] = md.get_toc(content)
content = md.convert(content)
elif markup == "jade":
content = jade.convert(content)
# Page must be extended by a layout and have a block 'body'
# These tags will be included if they are missing
if re.search(self.RE_EXTENDS, content) is None:
layout = layout or self.default_layout
content = "\n{% extends '{}' %} \n\n".replace("{}",
layout) + content
if re.search(self.RE_BLOCK_BODY, content) is None:
_layout_block = re.search(self.RE_EXTENDS, content).group(0)
content = content.replace(_layout_block, "")
content = "\n" + _layout_block + "\n" + \
"{% block body %} \n" + content.strip() + "\n{% endblock %}"
tpl = self.tpl_env.from_string(content)
with open(dest_file, "w") as fw:
fw.write(tpl.render(**_context))
def build(self):
self.clean_build_dir()
if not os.path.isdir(self.build_dir):
os.makedirs(self.build_dir)
self.build_static()
self.build_pages()
def publish(self, target="S3", sitename=None, purge_files=True):
"""
To publish programatically
:param target: Where to pusblish at, S3
:param sitename: The site name
:param purge_files: if True, it will delete old files
:return:
"""
self.build()
endpoint = self.config.get("hosting.%s" % target)
if target.upper() == "S3":
p = publisher.S3Website(sitename=sitename or self.config.get("sitename"),
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
p.create_manifest_from_s3_files()
if purge_files:
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
p.upload(self.build_dir)
return p.website_endpoint_url
|
mardix/Yass | yass/yass.py | Yass.build_pages | python | def build_pages(self):
for root, _, files in os.walk(self.pages_dir):
base_dir = root.replace(self.pages_dir, "").lstrip("/")
if not base_dir.startswith("_"):
for f in files:
src_file = os.path.join(base_dir, f)
self._build_page(src_file) | Iterate over the pages_dir and build the pages | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/yass.py#L280-L287 | [
"def _build_page(self, filepath):\n \"\"\" To build from filepath, relative to pages_dir \"\"\"\n filename = filepath.split(\"/\")[-1]\n # If filename starts with _ (underscore) or . (dot) do not build\n if not filename.startswith((\"_\", \".\")) and (filename.endswith(PAGE_FORMAT)):\n meta = sel... | class Yass(object):
RE_BLOCK_BODY = re.compile(r'{%\s*block\s+body\s*%}')
RE_BLOCK_BODY_PARSED = re.compile(r'{%\s*block\s+body\s*%}(.*?){%\s*endblock\s*%}')
RE_EXTENDS = re.compile(r'{%\s*extends\s+(.*?)\s*%}')
default_page_meta = {
"title": "", # The title of the page
"markup": None, # The markup to use. ie: md | jade | html (default)
"slug": None, # The pretty url new name of the file. A file with the same name will be created
"url": "", # This will be added when processed. Should never be modified
"description": "", # Page description
"pretty_url": True, # By default, all url will be pretty (search engine friendly) Set to False to keep the .html
"meta": {},
"layout": None, # The layout for the page
"template": None # The page template.
}
tpl_env = None
_templates = {}
_pages_meta = {}
def __init__(self, root_dir, config=None):
"""
:param root_dir: The application root dir
:param config: (dict), Dict configuration, will override previously set data
"""
self.root_dir = root_dir
self.build_dir = os.path.join(self.root_dir, "build")
self.static_dir = os.path.join(self.root_dir, "static")
self.content_dir = os.path.join(self.root_dir, "content")
self.pages_dir = os.path.join(self.root_dir, "pages")
self.templates_dir = os.path.join(self.root_dir, "templates")
self.data_dir = os.path.join(self.root_dir, "data")
self.build_static_dir = os.path.join(self.build_dir, "static")
config_file = os.path.join(self.root_dir, "yass.yml")
self.config = utils.load_conf(config_file, config)
self.default_layout = self.config.get("default_layout", DEFAULT_LAYOUT)
self.site_config = utils.dictdot(self.config.get("site", {}))
self.site_config.setdefault("base_url", "/")
self.base_url = self.site_config.get("base_url")
self.sitename = utils.extract_sitename(self.config.get("sitename"))
self._data = self._load_data()
self._init_jinja({
"site": self.site_config,
"data": self._data,
"__YASS__": self._yass_vars()
})
self._init_webassets()
def _yass_vars(self):
""" Global variables """
utc = arrow.utcnow()
return {
"NAME": __title__,
"VERSION": __version__,
"URL": __uri__,
"GENERATOR": "%s %s" % (__title__, __version__),
"YEAR": utc.year
}
def _init_jinja(self, global_context={}):
loader = jinja2.ChoiceLoader([
# global macros
jinja2.DictLoader({
"yass.macros": pkg_resources.resource_string(__name__, "extras/macros.html"),
}),
jinja2.FileSystemLoader(self.templates_dir)
])
self.tpl_env = jinja2.Environment(loader=loader,
extensions=[
'pyjade.ext.jinja.PyJadeExtension',
'yass.extras.htmlcompress.HTMLCompress',
'yass.extras.jade.JadeTagExtension',
'yass.extras.md.MarkdownExtension',
'yass.extras.md.MarkdownTagExtension',
AssetsExtension
])
self.tpl_env.globals.update(global_context)
self.tpl_env.filters.update({
"format_datetime": lambda dt, format: arrow.get(dt).format(format),
"yass_link_to": self._link_to, # link for a
"yass_url_to": self._url_to # url for a page
})
def _get_page_meta(self, page):
"""
Cache the page meta from the frontmatter and assign new keys
The cache data will be used to build links or other properties
"""
meta = self._pages_meta.get(page)
if not meta:
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_, _ext = os.path.splitext(src_file)
markup = _ext.replace(".", "")
_meta, _ = frontmatter.parse(f.read())
meta = self.default_page_meta.copy()
meta["meta"].update(self.config.get("site.meta", {}))
meta.update(_meta)
dest_file, url = self._get_dest_file_and_url(page, meta)
meta["url"] = url
meta["filepath"] = dest_file
if meta.get("markup") is None:
meta["markup"] = markup
self._pages_meta[page] = meta
return meta
def _get_page_content(self, page):
""" Get the page content without the frontmatter """
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_meta, content = frontmatter.parse(f.read())
return content
def _link_to(self, page, text=None, title=None, _class="", id="", alt="", **kwargs):
""" Build the A HREF LINK To a page."""
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>".format(
url=meta.get("url", "/") + anchor,
text=text or meta.get("title") or title,
title=title or "",
_class=_class,
id=id
)
def _url_to(self, page):
""" Get the url of a page """
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return meta.get("url")
def _get_dest_file_and_url(self, filepath, page_meta={}):
""" Return tuple of the file destination and url """
filename = filepath.split("/")[-1]
filepath_base = filepath.replace(filename, "").rstrip("/")
slug = page_meta.get("slug")
fname = slugify(slug) if slug else filename \
.replace(".html", "") \
.replace(".md", "") \
.replace(".jade", "")
if page_meta.get("pretty_url") is False:
dest_file = os.path.join(filepath_base, "%s.html" % fname)
else:
dest_dir = filepath_base
if filename not in ["index.html", "index.md", "index.jade"]:
dest_dir = os.path.join(filepath_base, fname)
dest_file = os.path.join(dest_dir, "index.html")
url = "/" + dest_file.replace("index.html", "")
return dest_file, url
def _load_data(self):
data = {}
# Load data from the data directory
for root, _, files in os.walk(self.data_dir):
for fname in files:
if fname.endswith((".json",)):
name = fname.replace(".json", "")
fname = os.path.join(root, fname)
if os.path.isfile(fname):
with open(fname) as f:
_ = json.load(f)
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
# data_api_urls
# Doing API call to retrieve the data and assign it to its key
# Data must be JSON
data_api_urls = self.site_config.get("data_api_urls")
if data_api_urls:
for name, url in data_api_urls.items():
try:
r = requests.get(url)
if r.status_code == 200:
_ = r.json()
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
else:
raise Exception("`%s -> %s` returns status code %s" % (name, url, r.status_code))
except Exception as e:
raise Exception("Data API URLS Error: %s" % e)
return utils.dictdot(data)
def _init_webassets(self):
assets_env = WAEnv(directory="./static",
url=self.config.get("static_url", "/static"))
bundles = self.config.get("assets_bundles", {})
assets_env.register(bundles)
self.tpl_env.assets_environment = assets_env
self.webassets_cmd = None
if bundles:
handler = logging.StreamHandler if self.config.get("debug", False) \
else logging.NullHandler
log = logging.getLogger('webassets')
log.addHandler(handler())
log.setLevel(logging.DEBUG)
self.webassets_cmd = CommandLineEnvironment(assets_env, log)
def clean_build_dir(self):
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
def build_static(self):
""" Build static files """
if not os.path.isdir(self.build_static_dir):
os.makedirs(self.build_static_dir)
copy_tree(self.static_dir, self.build_static_dir)
if self.webassets_cmd:
self.webassets_cmd.build()
def _build_page(self, filepath):
""" To build from filepath, relative to pages_dir """
filename = filepath.split("/")[-1]
# If filename starts with _ (underscore) or . (dot) do not build
if not filename.startswith(("_", ".")) and (filename.endswith(PAGE_FORMAT)):
meta = self._get_page_meta(filepath)
content = self._get_page_content(filepath)
# The default context for the page
_default_page = {
"build_dir": self.build_dir,
"filepath": meta["filepath"],
"context": {"page": meta},
"content": content,
"markup": meta.get("markup"),
"template": meta.get("template"),
"layout": meta.get("layout") or self.default_layout
}
# GENERATOR
# Allows to generate
_generator = meta.get("_generator")
if _generator:
data = self._data.get(_generator.get("data_source"))
# We want these back in meta in they exists in the data
special_meta = ["title", "slug", "description"]
# SINGLE
if _generator.get("type") == "single":
for d in data:
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
for _ in special_meta:
if _ in d:
dmeta[_] = d.get(_)
# If generator has the slug, it will substitute if
# Slug in the generator must have token from the data
# to generate the slug
if "slug" in _generator:
dmeta["slug"] = _generator.get("slug").format(**d)
# Slug is required
if "slug" not in dmeta:
print("WARNING: Skipping page because it's missing `slug`")
continue
slug = dmeta.get("slug")
dmeta["url"] = slug
dmeta["context"] = d
page.update({
"filepath": slug,
"context": {"page": dmeta}
})
self.create_page(**page)
if _generator.get("type") == "pagination":
per_page = int(_generator.get("per_page", self.site_config.get("pagination.per_page", 10)))
left_edge = int(_generator.get("left_edge", self.site_config.get("pagination.left_edge", 2)))
left_current = int(_generator.get("left_edge", self.site_config.get("pagination.left_current", 3)))
right_current = int(_generator.get("right_current", self.site_config.get("pagination.right_current", 4)))
right_edge = int(_generator.get("right_edge", self.site_config.get("pagination.right_edge", 2)))
padding = _generator.get("padding")
slug = _generator.get("slug")
limit = _generator.get("limit")
if "limit" in _generator:
data = data[:int(limit)]
data_chunks = utils.chunk_list(data, per_page)
len_data = len(data)
for i, d in enumerate(data_chunks):
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
page_num = i + 1
_paginator = Paginator([],
total=len_data,
page=page_num,
per_page=per_page,
padding=padding,
left_edge=left_edge,
right_edge=right_edge,
left_current=left_current,
right_current=right_current)
_paginator.slug = slug
_paginator.index_slug = _generator.get("index_slug")
_slug = slug.format(**{"page_num": page_num})
dmeta["url"] = _slug
dmeta["context"] = d
dmeta["paginator"] = _paginator
page.update({
"filepath": _slug,
"context": {"page": dmeta}
})
self.create_page(**page)
# First page need to generate the index
if i == 0 and _generator.get("index_slug"):
page["filepath"] = _generator.get("index_slug")
self.create_page(**page)
# NORMAL PAGE
else:
self.create_page(**_default_page)
def create_page(self, build_dir, filepath, context={}, content=None, template=None, markup=None, layout=None):
"""
To dynamically create a page and save it in the build_dir
:param build_dir: (path) The base directory that will hold the created page
:param filepath: (string) the name of the file to create. May contain slash to indicate directory
It will also create the url based on that name
If the filename doesn't end with .html, it will create a subdirectory
and create `index.html`
If file contains `.html` it will stays as is
ie:
post/waldo/where-is-waldo/ -> post/waldo/where-is-waldo/index.html
another/music/new-rap-song.html -> another/music/new-rap-song.html
post/page/5 -> post/page/5/index.html
:param context: (dict) context data
:param content: (text) The content of the file to be created. Will be overriden by template
:param template: (path) if source is not provided, template can be used to create the page.
Along with context it allows to create dynamic pages.
The file is relative to `/templates/`
file can be in html|jade|md
:param markup: (string: html|jade|md), when using content. To indicate which markup to use.
based on the markup it will parse the data
html: will render as is
jade and md: convert to the appropriate format
:param layout: (string) when using content. The layout to use.
The file location is relative to `/templates/`
file can be in html|jade|md
:return:
"""
build_dir = build_dir.rstrip("/")
filepath = filepath.lstrip("/").rstrip("/")
if not filepath.endswith(".html"):
filepath += "/index.html"
dest_file = os.path.join(build_dir, filepath)
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
_context = context
if "page" not in _context:
_context["page"] = self.default_page_meta.copy()
if "url" not in _context["page"]:
_context["page"]["url"] = "/" + filepath.lstrip("/").replace(
"index.html", "")
if template:
if template not in self._templates:
self._templates[template] = self.tpl_env.get_template(template)
tpl = self._templates[template]
else:
if markup == "md":
_context["page"]["__toc__"] = md.get_toc(content)
content = md.convert(content)
elif markup == "jade":
content = jade.convert(content)
# Page must be extended by a layout and have a block 'body'
# These tags will be included if they are missing
if re.search(self.RE_EXTENDS, content) is None:
layout = layout or self.default_layout
content = "\n{% extends '{}' %} \n\n".replace("{}",
layout) + content
if re.search(self.RE_BLOCK_BODY, content) is None:
_layout_block = re.search(self.RE_EXTENDS, content).group(0)
content = content.replace(_layout_block, "")
content = "\n" + _layout_block + "\n" + \
"{% block body %} \n" + content.strip() + "\n{% endblock %}"
tpl = self.tpl_env.from_string(content)
with open(dest_file, "w") as fw:
fw.write(tpl.render(**_context))
def build(self):
self.clean_build_dir()
if not os.path.isdir(self.build_dir):
os.makedirs(self.build_dir)
self.build_static()
self.build_pages()
def publish(self, target="S3", sitename=None, purge_files=True):
"""
To publish programatically
:param target: Where to pusblish at, S3
:param sitename: The site name
:param purge_files: if True, it will delete old files
:return:
"""
self.build()
endpoint = self.config.get("hosting.%s" % target)
if target.upper() == "S3":
p = publisher.S3Website(sitename=sitename or self.config.get("sitename"),
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
p.create_manifest_from_s3_files()
if purge_files:
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
p.upload(self.build_dir)
return p.website_endpoint_url
|
mardix/Yass | yass/yass.py | Yass._build_page | python | def _build_page(self, filepath):
filename = filepath.split("/")[-1]
# If filename starts with _ (underscore) or . (dot) do not build
if not filename.startswith(("_", ".")) and (filename.endswith(PAGE_FORMAT)):
meta = self._get_page_meta(filepath)
content = self._get_page_content(filepath)
# The default context for the page
_default_page = {
"build_dir": self.build_dir,
"filepath": meta["filepath"],
"context": {"page": meta},
"content": content,
"markup": meta.get("markup"),
"template": meta.get("template"),
"layout": meta.get("layout") or self.default_layout
}
# GENERATOR
# Allows to generate
_generator = meta.get("_generator")
if _generator:
data = self._data.get(_generator.get("data_source"))
# We want these back in meta in they exists in the data
special_meta = ["title", "slug", "description"]
# SINGLE
if _generator.get("type") == "single":
for d in data:
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
for _ in special_meta:
if _ in d:
dmeta[_] = d.get(_)
# If generator has the slug, it will substitute if
# Slug in the generator must have token from the data
# to generate the slug
if "slug" in _generator:
dmeta["slug"] = _generator.get("slug").format(**d)
# Slug is required
if "slug" not in dmeta:
print("WARNING: Skipping page because it's missing `slug`")
continue
slug = dmeta.get("slug")
dmeta["url"] = slug
dmeta["context"] = d
page.update({
"filepath": slug,
"context": {"page": dmeta}
})
self.create_page(**page)
if _generator.get("type") == "pagination":
per_page = int(_generator.get("per_page", self.site_config.get("pagination.per_page", 10)))
left_edge = int(_generator.get("left_edge", self.site_config.get("pagination.left_edge", 2)))
left_current = int(_generator.get("left_edge", self.site_config.get("pagination.left_current", 3)))
right_current = int(_generator.get("right_current", self.site_config.get("pagination.right_current", 4)))
right_edge = int(_generator.get("right_edge", self.site_config.get("pagination.right_edge", 2)))
padding = _generator.get("padding")
slug = _generator.get("slug")
limit = _generator.get("limit")
if "limit" in _generator:
data = data[:int(limit)]
data_chunks = utils.chunk_list(data, per_page)
len_data = len(data)
for i, d in enumerate(data_chunks):
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
page_num = i + 1
_paginator = Paginator([],
total=len_data,
page=page_num,
per_page=per_page,
padding=padding,
left_edge=left_edge,
right_edge=right_edge,
left_current=left_current,
right_current=right_current)
_paginator.slug = slug
_paginator.index_slug = _generator.get("index_slug")
_slug = slug.format(**{"page_num": page_num})
dmeta["url"] = _slug
dmeta["context"] = d
dmeta["paginator"] = _paginator
page.update({
"filepath": _slug,
"context": {"page": dmeta}
})
self.create_page(**page)
# First page need to generate the index
if i == 0 and _generator.get("index_slug"):
page["filepath"] = _generator.get("index_slug")
self.create_page(**page)
# NORMAL PAGE
else:
self.create_page(**_default_page) | To build from filepath, relative to pages_dir | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/yass.py#L289-L396 | null | class Yass(object):
RE_BLOCK_BODY = re.compile(r'{%\s*block\s+body\s*%}')
RE_BLOCK_BODY_PARSED = re.compile(r'{%\s*block\s+body\s*%}(.*?){%\s*endblock\s*%}')
RE_EXTENDS = re.compile(r'{%\s*extends\s+(.*?)\s*%}')
default_page_meta = {
"title": "", # The title of the page
"markup": None, # The markup to use. ie: md | jade | html (default)
"slug": None, # The pretty url new name of the file. A file with the same name will be created
"url": "", # This will be added when processed. Should never be modified
"description": "", # Page description
"pretty_url": True, # By default, all url will be pretty (search engine friendly) Set to False to keep the .html
"meta": {},
"layout": None, # The layout for the page
"template": None # The page template.
}
tpl_env = None
_templates = {}
_pages_meta = {}
def __init__(self, root_dir, config=None):
"""
:param root_dir: The application root dir
:param config: (dict), Dict configuration, will override previously set data
"""
self.root_dir = root_dir
self.build_dir = os.path.join(self.root_dir, "build")
self.static_dir = os.path.join(self.root_dir, "static")
self.content_dir = os.path.join(self.root_dir, "content")
self.pages_dir = os.path.join(self.root_dir, "pages")
self.templates_dir = os.path.join(self.root_dir, "templates")
self.data_dir = os.path.join(self.root_dir, "data")
self.build_static_dir = os.path.join(self.build_dir, "static")
config_file = os.path.join(self.root_dir, "yass.yml")
self.config = utils.load_conf(config_file, config)
self.default_layout = self.config.get("default_layout", DEFAULT_LAYOUT)
self.site_config = utils.dictdot(self.config.get("site", {}))
self.site_config.setdefault("base_url", "/")
self.base_url = self.site_config.get("base_url")
self.sitename = utils.extract_sitename(self.config.get("sitename"))
self._data = self._load_data()
self._init_jinja({
"site": self.site_config,
"data": self._data,
"__YASS__": self._yass_vars()
})
self._init_webassets()
def _yass_vars(self):
""" Global variables """
utc = arrow.utcnow()
return {
"NAME": __title__,
"VERSION": __version__,
"URL": __uri__,
"GENERATOR": "%s %s" % (__title__, __version__),
"YEAR": utc.year
}
def _init_jinja(self, global_context={}):
loader = jinja2.ChoiceLoader([
# global macros
jinja2.DictLoader({
"yass.macros": pkg_resources.resource_string(__name__, "extras/macros.html"),
}),
jinja2.FileSystemLoader(self.templates_dir)
])
self.tpl_env = jinja2.Environment(loader=loader,
extensions=[
'pyjade.ext.jinja.PyJadeExtension',
'yass.extras.htmlcompress.HTMLCompress',
'yass.extras.jade.JadeTagExtension',
'yass.extras.md.MarkdownExtension',
'yass.extras.md.MarkdownTagExtension',
AssetsExtension
])
self.tpl_env.globals.update(global_context)
self.tpl_env.filters.update({
"format_datetime": lambda dt, format: arrow.get(dt).format(format),
"yass_link_to": self._link_to, # link for a
"yass_url_to": self._url_to # url for a page
})
def _get_page_meta(self, page):
"""
Cache the page meta from the frontmatter and assign new keys
The cache data will be used to build links or other properties
"""
meta = self._pages_meta.get(page)
if not meta:
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_, _ext = os.path.splitext(src_file)
markup = _ext.replace(".", "")
_meta, _ = frontmatter.parse(f.read())
meta = self.default_page_meta.copy()
meta["meta"].update(self.config.get("site.meta", {}))
meta.update(_meta)
dest_file, url = self._get_dest_file_and_url(page, meta)
meta["url"] = url
meta["filepath"] = dest_file
if meta.get("markup") is None:
meta["markup"] = markup
self._pages_meta[page] = meta
return meta
def _get_page_content(self, page):
""" Get the page content without the frontmatter """
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_meta, content = frontmatter.parse(f.read())
return content
def _link_to(self, page, text=None, title=None, _class="", id="", alt="", **kwargs):
""" Build the A HREF LINK To a page."""
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>".format(
url=meta.get("url", "/") + anchor,
text=text or meta.get("title") or title,
title=title or "",
_class=_class,
id=id
)
def _url_to(self, page):
""" Get the url of a page """
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return meta.get("url")
def _get_dest_file_and_url(self, filepath, page_meta={}):
""" Return tuple of the file destination and url """
filename = filepath.split("/")[-1]
filepath_base = filepath.replace(filename, "").rstrip("/")
slug = page_meta.get("slug")
fname = slugify(slug) if slug else filename \
.replace(".html", "") \
.replace(".md", "") \
.replace(".jade", "")
if page_meta.get("pretty_url") is False:
dest_file = os.path.join(filepath_base, "%s.html" % fname)
else:
dest_dir = filepath_base
if filename not in ["index.html", "index.md", "index.jade"]:
dest_dir = os.path.join(filepath_base, fname)
dest_file = os.path.join(dest_dir, "index.html")
url = "/" + dest_file.replace("index.html", "")
return dest_file, url
def _load_data(self):
data = {}
# Load data from the data directory
for root, _, files in os.walk(self.data_dir):
for fname in files:
if fname.endswith((".json",)):
name = fname.replace(".json", "")
fname = os.path.join(root, fname)
if os.path.isfile(fname):
with open(fname) as f:
_ = json.load(f)
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
# data_api_urls
# Doing API call to retrieve the data and assign it to its key
# Data must be JSON
data_api_urls = self.site_config.get("data_api_urls")
if data_api_urls:
for name, url in data_api_urls.items():
try:
r = requests.get(url)
if r.status_code == 200:
_ = r.json()
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
else:
raise Exception("`%s -> %s` returns status code %s" % (name, url, r.status_code))
except Exception as e:
raise Exception("Data API URLS Error: %s" % e)
return utils.dictdot(data)
def _init_webassets(self):
assets_env = WAEnv(directory="./static",
url=self.config.get("static_url", "/static"))
bundles = self.config.get("assets_bundles", {})
assets_env.register(bundles)
self.tpl_env.assets_environment = assets_env
self.webassets_cmd = None
if bundles:
handler = logging.StreamHandler if self.config.get("debug", False) \
else logging.NullHandler
log = logging.getLogger('webassets')
log.addHandler(handler())
log.setLevel(logging.DEBUG)
self.webassets_cmd = CommandLineEnvironment(assets_env, log)
def clean_build_dir(self):
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
def build_static(self):
""" Build static files """
if not os.path.isdir(self.build_static_dir):
os.makedirs(self.build_static_dir)
copy_tree(self.static_dir, self.build_static_dir)
if self.webassets_cmd:
self.webassets_cmd.build()
def build_pages(self):
"""Iterate over the pages_dir and build the pages """
for root, _, files in os.walk(self.pages_dir):
base_dir = root.replace(self.pages_dir, "").lstrip("/")
if not base_dir.startswith("_"):
for f in files:
src_file = os.path.join(base_dir, f)
self._build_page(src_file)
def create_page(self, build_dir, filepath, context={}, content=None, template=None, markup=None, layout=None):
"""
To dynamically create a page and save it in the build_dir
:param build_dir: (path) The base directory that will hold the created page
:param filepath: (string) the name of the file to create. May contain slash to indicate directory
It will also create the url based on that name
If the filename doesn't end with .html, it will create a subdirectory
and create `index.html`
If file contains `.html` it will stays as is
ie:
post/waldo/where-is-waldo/ -> post/waldo/where-is-waldo/index.html
another/music/new-rap-song.html -> another/music/new-rap-song.html
post/page/5 -> post/page/5/index.html
:param context: (dict) context data
:param content: (text) The content of the file to be created. Will be overriden by template
:param template: (path) if source is not provided, template can be used to create the page.
Along with context it allows to create dynamic pages.
The file is relative to `/templates/`
file can be in html|jade|md
:param markup: (string: html|jade|md), when using content. To indicate which markup to use.
based on the markup it will parse the data
html: will render as is
jade and md: convert to the appropriate format
:param layout: (string) when using content. The layout to use.
The file location is relative to `/templates/`
file can be in html|jade|md
:return:
"""
build_dir = build_dir.rstrip("/")
filepath = filepath.lstrip("/").rstrip("/")
if not filepath.endswith(".html"):
filepath += "/index.html"
dest_file = os.path.join(build_dir, filepath)
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
_context = context
if "page" not in _context:
_context["page"] = self.default_page_meta.copy()
if "url" not in _context["page"]:
_context["page"]["url"] = "/" + filepath.lstrip("/").replace(
"index.html", "")
if template:
if template not in self._templates:
self._templates[template] = self.tpl_env.get_template(template)
tpl = self._templates[template]
else:
if markup == "md":
_context["page"]["__toc__"] = md.get_toc(content)
content = md.convert(content)
elif markup == "jade":
content = jade.convert(content)
# Page must be extended by a layout and have a block 'body'
# These tags will be included if they are missing
if re.search(self.RE_EXTENDS, content) is None:
layout = layout or self.default_layout
content = "\n{% extends '{}' %} \n\n".replace("{}",
layout) + content
if re.search(self.RE_BLOCK_BODY, content) is None:
_layout_block = re.search(self.RE_EXTENDS, content).group(0)
content = content.replace(_layout_block, "")
content = "\n" + _layout_block + "\n" + \
"{% block body %} \n" + content.strip() + "\n{% endblock %}"
tpl = self.tpl_env.from_string(content)
with open(dest_file, "w") as fw:
fw.write(tpl.render(**_context))
def build(self):
self.clean_build_dir()
if not os.path.isdir(self.build_dir):
os.makedirs(self.build_dir)
self.build_static()
self.build_pages()
def publish(self, target="S3", sitename=None, purge_files=True):
"""
To publish programatically
:param target: Where to pusblish at, S3
:param sitename: The site name
:param purge_files: if True, it will delete old files
:return:
"""
self.build()
endpoint = self.config.get("hosting.%s" % target)
if target.upper() == "S3":
p = publisher.S3Website(sitename=sitename or self.config.get("sitename"),
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
p.create_manifest_from_s3_files()
if purge_files:
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
p.upload(self.build_dir)
return p.website_endpoint_url
|
mardix/Yass | yass/yass.py | Yass.create_page | python | def create_page(self, build_dir, filepath, context={}, content=None, template=None, markup=None, layout=None):
build_dir = build_dir.rstrip("/")
filepath = filepath.lstrip("/").rstrip("/")
if not filepath.endswith(".html"):
filepath += "/index.html"
dest_file = os.path.join(build_dir, filepath)
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
_context = context
if "page" not in _context:
_context["page"] = self.default_page_meta.copy()
if "url" not in _context["page"]:
_context["page"]["url"] = "/" + filepath.lstrip("/").replace(
"index.html", "")
if template:
if template not in self._templates:
self._templates[template] = self.tpl_env.get_template(template)
tpl = self._templates[template]
else:
if markup == "md":
_context["page"]["__toc__"] = md.get_toc(content)
content = md.convert(content)
elif markup == "jade":
content = jade.convert(content)
# Page must be extended by a layout and have a block 'body'
# These tags will be included if they are missing
if re.search(self.RE_EXTENDS, content) is None:
layout = layout or self.default_layout
content = "\n{% extends '{}' %} \n\n".replace("{}",
layout) + content
if re.search(self.RE_BLOCK_BODY, content) is None:
_layout_block = re.search(self.RE_EXTENDS, content).group(0)
content = content.replace(_layout_block, "")
content = "\n" + _layout_block + "\n" + \
"{% block body %} \n" + content.strip() + "\n{% endblock %}"
tpl = self.tpl_env.from_string(content)
with open(dest_file, "w") as fw:
fw.write(tpl.render(**_context)) | To dynamically create a page and save it in the build_dir
:param build_dir: (path) The base directory that will hold the created page
:param filepath: (string) the name of the file to create. May contain slash to indicate directory
It will also create the url based on that name
If the filename doesn't end with .html, it will create a subdirectory
and create `index.html`
If file contains `.html` it will stays as is
ie:
post/waldo/where-is-waldo/ -> post/waldo/where-is-waldo/index.html
another/music/new-rap-song.html -> another/music/new-rap-song.html
post/page/5 -> post/page/5/index.html
:param context: (dict) context data
:param content: (text) The content of the file to be created. Will be overriden by template
:param template: (path) if source is not provided, template can be used to create the page.
Along with context it allows to create dynamic pages.
The file is relative to `/templates/`
file can be in html|jade|md
:param markup: (string: html|jade|md), when using content. To indicate which markup to use.
based on the markup it will parse the data
html: will render as is
jade and md: convert to the appropriate format
:param layout: (string) when using content. The layout to use.
The file location is relative to `/templates/`
file can be in html|jade|md
:return: | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/yass.py#L398-L471 | null | class Yass(object):
RE_BLOCK_BODY = re.compile(r'{%\s*block\s+body\s*%}')
RE_BLOCK_BODY_PARSED = re.compile(r'{%\s*block\s+body\s*%}(.*?){%\s*endblock\s*%}')
RE_EXTENDS = re.compile(r'{%\s*extends\s+(.*?)\s*%}')
default_page_meta = {
"title": "", # The title of the page
"markup": None, # The markup to use. ie: md | jade | html (default)
"slug": None, # The pretty url new name of the file. A file with the same name will be created
"url": "", # This will be added when processed. Should never be modified
"description": "", # Page description
"pretty_url": True, # By default, all url will be pretty (search engine friendly) Set to False to keep the .html
"meta": {},
"layout": None, # The layout for the page
"template": None # The page template.
}
tpl_env = None
_templates = {}
_pages_meta = {}
def __init__(self, root_dir, config=None):
"""
:param root_dir: The application root dir
:param config: (dict), Dict configuration, will override previously set data
"""
self.root_dir = root_dir
self.build_dir = os.path.join(self.root_dir, "build")
self.static_dir = os.path.join(self.root_dir, "static")
self.content_dir = os.path.join(self.root_dir, "content")
self.pages_dir = os.path.join(self.root_dir, "pages")
self.templates_dir = os.path.join(self.root_dir, "templates")
self.data_dir = os.path.join(self.root_dir, "data")
self.build_static_dir = os.path.join(self.build_dir, "static")
config_file = os.path.join(self.root_dir, "yass.yml")
self.config = utils.load_conf(config_file, config)
self.default_layout = self.config.get("default_layout", DEFAULT_LAYOUT)
self.site_config = utils.dictdot(self.config.get("site", {}))
self.site_config.setdefault("base_url", "/")
self.base_url = self.site_config.get("base_url")
self.sitename = utils.extract_sitename(self.config.get("sitename"))
self._data = self._load_data()
self._init_jinja({
"site": self.site_config,
"data": self._data,
"__YASS__": self._yass_vars()
})
self._init_webassets()
def _yass_vars(self):
""" Global variables """
utc = arrow.utcnow()
return {
"NAME": __title__,
"VERSION": __version__,
"URL": __uri__,
"GENERATOR": "%s %s" % (__title__, __version__),
"YEAR": utc.year
}
def _init_jinja(self, global_context={}):
loader = jinja2.ChoiceLoader([
# global macros
jinja2.DictLoader({
"yass.macros": pkg_resources.resource_string(__name__, "extras/macros.html"),
}),
jinja2.FileSystemLoader(self.templates_dir)
])
self.tpl_env = jinja2.Environment(loader=loader,
extensions=[
'pyjade.ext.jinja.PyJadeExtension',
'yass.extras.htmlcompress.HTMLCompress',
'yass.extras.jade.JadeTagExtension',
'yass.extras.md.MarkdownExtension',
'yass.extras.md.MarkdownTagExtension',
AssetsExtension
])
self.tpl_env.globals.update(global_context)
self.tpl_env.filters.update({
"format_datetime": lambda dt, format: arrow.get(dt).format(format),
"yass_link_to": self._link_to, # link for a
"yass_url_to": self._url_to # url for a page
})
def _get_page_meta(self, page):
"""
Cache the page meta from the frontmatter and assign new keys
The cache data will be used to build links or other properties
"""
meta = self._pages_meta.get(page)
if not meta:
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_, _ext = os.path.splitext(src_file)
markup = _ext.replace(".", "")
_meta, _ = frontmatter.parse(f.read())
meta = self.default_page_meta.copy()
meta["meta"].update(self.config.get("site.meta", {}))
meta.update(_meta)
dest_file, url = self._get_dest_file_and_url(page, meta)
meta["url"] = url
meta["filepath"] = dest_file
if meta.get("markup") is None:
meta["markup"] = markup
self._pages_meta[page] = meta
return meta
def _get_page_content(self, page):
""" Get the page content without the frontmatter """
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_meta, content = frontmatter.parse(f.read())
return content
def _link_to(self, page, text=None, title=None, _class="", id="", alt="", **kwargs):
""" Build the A HREF LINK To a page."""
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>".format(
url=meta.get("url", "/") + anchor,
text=text or meta.get("title") or title,
title=title or "",
_class=_class,
id=id
)
def _url_to(self, page):
""" Get the url of a page """
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return meta.get("url")
def _get_dest_file_and_url(self, filepath, page_meta={}):
""" Return tuple of the file destination and url """
filename = filepath.split("/")[-1]
filepath_base = filepath.replace(filename, "").rstrip("/")
slug = page_meta.get("slug")
fname = slugify(slug) if slug else filename \
.replace(".html", "") \
.replace(".md", "") \
.replace(".jade", "")
if page_meta.get("pretty_url") is False:
dest_file = os.path.join(filepath_base, "%s.html" % fname)
else:
dest_dir = filepath_base
if filename not in ["index.html", "index.md", "index.jade"]:
dest_dir = os.path.join(filepath_base, fname)
dest_file = os.path.join(dest_dir, "index.html")
url = "/" + dest_file.replace("index.html", "")
return dest_file, url
def _load_data(self):
data = {}
# Load data from the data directory
for root, _, files in os.walk(self.data_dir):
for fname in files:
if fname.endswith((".json",)):
name = fname.replace(".json", "")
fname = os.path.join(root, fname)
if os.path.isfile(fname):
with open(fname) as f:
_ = json.load(f)
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
# data_api_urls
# Doing API call to retrieve the data and assign it to its key
# Data must be JSON
data_api_urls = self.site_config.get("data_api_urls")
if data_api_urls:
for name, url in data_api_urls.items():
try:
r = requests.get(url)
if r.status_code == 200:
_ = r.json()
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
else:
raise Exception("`%s -> %s` returns status code %s" % (name, url, r.status_code))
except Exception as e:
raise Exception("Data API URLS Error: %s" % e)
return utils.dictdot(data)
def _init_webassets(self):
assets_env = WAEnv(directory="./static",
url=self.config.get("static_url", "/static"))
bundles = self.config.get("assets_bundles", {})
assets_env.register(bundles)
self.tpl_env.assets_environment = assets_env
self.webassets_cmd = None
if bundles:
handler = logging.StreamHandler if self.config.get("debug", False) \
else logging.NullHandler
log = logging.getLogger('webassets')
log.addHandler(handler())
log.setLevel(logging.DEBUG)
self.webassets_cmd = CommandLineEnvironment(assets_env, log)
def clean_build_dir(self):
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
def build_static(self):
""" Build static files """
if not os.path.isdir(self.build_static_dir):
os.makedirs(self.build_static_dir)
copy_tree(self.static_dir, self.build_static_dir)
if self.webassets_cmd:
self.webassets_cmd.build()
def build_pages(self):
"""Iterate over the pages_dir and build the pages """
for root, _, files in os.walk(self.pages_dir):
base_dir = root.replace(self.pages_dir, "").lstrip("/")
if not base_dir.startswith("_"):
for f in files:
src_file = os.path.join(base_dir, f)
self._build_page(src_file)
def _build_page(self, filepath):
""" To build from filepath, relative to pages_dir """
filename = filepath.split("/")[-1]
# If filename starts with _ (underscore) or . (dot) do not build
if not filename.startswith(("_", ".")) and (filename.endswith(PAGE_FORMAT)):
meta = self._get_page_meta(filepath)
content = self._get_page_content(filepath)
# The default context for the page
_default_page = {
"build_dir": self.build_dir,
"filepath": meta["filepath"],
"context": {"page": meta},
"content": content,
"markup": meta.get("markup"),
"template": meta.get("template"),
"layout": meta.get("layout") or self.default_layout
}
# GENERATOR
# Allows to generate
_generator = meta.get("_generator")
if _generator:
data = self._data.get(_generator.get("data_source"))
# We want these back in meta in they exists in the data
special_meta = ["title", "slug", "description"]
# SINGLE
if _generator.get("type") == "single":
for d in data:
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
for _ in special_meta:
if _ in d:
dmeta[_] = d.get(_)
# If generator has the slug, it will substitute if
# Slug in the generator must have token from the data
# to generate the slug
if "slug" in _generator:
dmeta["slug"] = _generator.get("slug").format(**d)
# Slug is required
if "slug" not in dmeta:
print("WARNING: Skipping page because it's missing `slug`")
continue
slug = dmeta.get("slug")
dmeta["url"] = slug
dmeta["context"] = d
page.update({
"filepath": slug,
"context": {"page": dmeta}
})
self.create_page(**page)
if _generator.get("type") == "pagination":
per_page = int(_generator.get("per_page", self.site_config.get("pagination.per_page", 10)))
left_edge = int(_generator.get("left_edge", self.site_config.get("pagination.left_edge", 2)))
left_current = int(_generator.get("left_edge", self.site_config.get("pagination.left_current", 3)))
right_current = int(_generator.get("right_current", self.site_config.get("pagination.right_current", 4)))
right_edge = int(_generator.get("right_edge", self.site_config.get("pagination.right_edge", 2)))
padding = _generator.get("padding")
slug = _generator.get("slug")
limit = _generator.get("limit")
if "limit" in _generator:
data = data[:int(limit)]
data_chunks = utils.chunk_list(data, per_page)
len_data = len(data)
for i, d in enumerate(data_chunks):
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
page_num = i + 1
_paginator = Paginator([],
total=len_data,
page=page_num,
per_page=per_page,
padding=padding,
left_edge=left_edge,
right_edge=right_edge,
left_current=left_current,
right_current=right_current)
_paginator.slug = slug
_paginator.index_slug = _generator.get("index_slug")
_slug = slug.format(**{"page_num": page_num})
dmeta["url"] = _slug
dmeta["context"] = d
dmeta["paginator"] = _paginator
page.update({
"filepath": _slug,
"context": {"page": dmeta}
})
self.create_page(**page)
# First page need to generate the index
if i == 0 and _generator.get("index_slug"):
page["filepath"] = _generator.get("index_slug")
self.create_page(**page)
# NORMAL PAGE
else:
self.create_page(**_default_page)
def build(self):
self.clean_build_dir()
if not os.path.isdir(self.build_dir):
os.makedirs(self.build_dir)
self.build_static()
self.build_pages()
def publish(self, target="S3", sitename=None, purge_files=True):
"""
To publish programatically
:param target: Where to pusblish at, S3
:param sitename: The site name
:param purge_files: if True, it will delete old files
:return:
"""
self.build()
endpoint = self.config.get("hosting.%s" % target)
if target.upper() == "S3":
p = publisher.S3Website(sitename=sitename or self.config.get("sitename"),
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
p.create_manifest_from_s3_files()
if purge_files:
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
p.upload(self.build_dir)
return p.website_endpoint_url
|
mardix/Yass | yass/yass.py | Yass.publish | python | def publish(self, target="S3", sitename=None, purge_files=True):
self.build()
endpoint = self.config.get("hosting.%s" % target)
if target.upper() == "S3":
p = publisher.S3Website(sitename=sitename or self.config.get("sitename"),
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
p.create_manifest_from_s3_files()
if purge_files:
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
p.upload(self.build_dir)
return p.website_endpoint_url | To publish programatically
:param target: Where to pusblish at, S3
:param sitename: The site name
:param purge_files: if True, it will delete old files
:return: | train | https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/yass.py#L480-L511 | [
"def build(self):\n self.clean_build_dir()\n if not os.path.isdir(self.build_dir):\n os.makedirs(self.build_dir)\n self.build_static()\n self.build_pages()\n"
] | class Yass(object):
RE_BLOCK_BODY = re.compile(r'{%\s*block\s+body\s*%}')
RE_BLOCK_BODY_PARSED = re.compile(r'{%\s*block\s+body\s*%}(.*?){%\s*endblock\s*%}')
RE_EXTENDS = re.compile(r'{%\s*extends\s+(.*?)\s*%}')
default_page_meta = {
"title": "", # The title of the page
"markup": None, # The markup to use. ie: md | jade | html (default)
"slug": None, # The pretty url new name of the file. A file with the same name will be created
"url": "", # This will be added when processed. Should never be modified
"description": "", # Page description
"pretty_url": True, # By default, all url will be pretty (search engine friendly) Set to False to keep the .html
"meta": {},
"layout": None, # The layout for the page
"template": None # The page template.
}
tpl_env = None
_templates = {}
_pages_meta = {}
def __init__(self, root_dir, config=None):
"""
:param root_dir: The application root dir
:param config: (dict), Dict configuration, will override previously set data
"""
self.root_dir = root_dir
self.build_dir = os.path.join(self.root_dir, "build")
self.static_dir = os.path.join(self.root_dir, "static")
self.content_dir = os.path.join(self.root_dir, "content")
self.pages_dir = os.path.join(self.root_dir, "pages")
self.templates_dir = os.path.join(self.root_dir, "templates")
self.data_dir = os.path.join(self.root_dir, "data")
self.build_static_dir = os.path.join(self.build_dir, "static")
config_file = os.path.join(self.root_dir, "yass.yml")
self.config = utils.load_conf(config_file, config)
self.default_layout = self.config.get("default_layout", DEFAULT_LAYOUT)
self.site_config = utils.dictdot(self.config.get("site", {}))
self.site_config.setdefault("base_url", "/")
self.base_url = self.site_config.get("base_url")
self.sitename = utils.extract_sitename(self.config.get("sitename"))
self._data = self._load_data()
self._init_jinja({
"site": self.site_config,
"data": self._data,
"__YASS__": self._yass_vars()
})
self._init_webassets()
def _yass_vars(self):
""" Global variables """
utc = arrow.utcnow()
return {
"NAME": __title__,
"VERSION": __version__,
"URL": __uri__,
"GENERATOR": "%s %s" % (__title__, __version__),
"YEAR": utc.year
}
def _init_jinja(self, global_context={}):
loader = jinja2.ChoiceLoader([
# global macros
jinja2.DictLoader({
"yass.macros": pkg_resources.resource_string(__name__, "extras/macros.html"),
}),
jinja2.FileSystemLoader(self.templates_dir)
])
self.tpl_env = jinja2.Environment(loader=loader,
extensions=[
'pyjade.ext.jinja.PyJadeExtension',
'yass.extras.htmlcompress.HTMLCompress',
'yass.extras.jade.JadeTagExtension',
'yass.extras.md.MarkdownExtension',
'yass.extras.md.MarkdownTagExtension',
AssetsExtension
])
self.tpl_env.globals.update(global_context)
self.tpl_env.filters.update({
"format_datetime": lambda dt, format: arrow.get(dt).format(format),
"yass_link_to": self._link_to, # link for a
"yass_url_to": self._url_to # url for a page
})
def _get_page_meta(self, page):
"""
Cache the page meta from the frontmatter and assign new keys
The cache data will be used to build links or other properties
"""
meta = self._pages_meta.get(page)
if not meta:
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_, _ext = os.path.splitext(src_file)
markup = _ext.replace(".", "")
_meta, _ = frontmatter.parse(f.read())
meta = self.default_page_meta.copy()
meta["meta"].update(self.config.get("site.meta", {}))
meta.update(_meta)
dest_file, url = self._get_dest_file_and_url(page, meta)
meta["url"] = url
meta["filepath"] = dest_file
if meta.get("markup") is None:
meta["markup"] = markup
self._pages_meta[page] = meta
return meta
def _get_page_content(self, page):
""" Get the page content without the frontmatter """
src_file = os.path.join(self.pages_dir, page)
with open(src_file) as f:
_meta, content = frontmatter.parse(f.read())
return content
def _link_to(self, page, text=None, title=None, _class="", id="", alt="", **kwargs):
""" Build the A HREF LINK To a page."""
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>".format(
url=meta.get("url", "/") + anchor,
text=text or meta.get("title") or title,
title=title or "",
_class=_class,
id=id
)
def _url_to(self, page):
""" Get the url of a page """
anchor = ""
if "#" in page:
page, anchor = page.split("#")
anchor = "#" + anchor
meta = self._get_page_meta(page)
return meta.get("url")
def _get_dest_file_and_url(self, filepath, page_meta={}):
""" Return tuple of the file destination and url """
filename = filepath.split("/")[-1]
filepath_base = filepath.replace(filename, "").rstrip("/")
slug = page_meta.get("slug")
fname = slugify(slug) if slug else filename \
.replace(".html", "") \
.replace(".md", "") \
.replace(".jade", "")
if page_meta.get("pretty_url") is False:
dest_file = os.path.join(filepath_base, "%s.html" % fname)
else:
dest_dir = filepath_base
if filename not in ["index.html", "index.md", "index.jade"]:
dest_dir = os.path.join(filepath_base, fname)
dest_file = os.path.join(dest_dir, "index.html")
url = "/" + dest_file.replace("index.html", "")
return dest_file, url
def _load_data(self):
data = {}
# Load data from the data directory
for root, _, files in os.walk(self.data_dir):
for fname in files:
if fname.endswith((".json",)):
name = fname.replace(".json", "")
fname = os.path.join(root, fname)
if os.path.isfile(fname):
with open(fname) as f:
_ = json.load(f)
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
# data_api_urls
# Doing API call to retrieve the data and assign it to its key
# Data must be JSON
data_api_urls = self.site_config.get("data_api_urls")
if data_api_urls:
for name, url in data_api_urls.items():
try:
r = requests.get(url)
if r.status_code == 200:
_ = r.json()
if isinstance(_, dict):
_ = utils.dictdot(_)
data[name] = _
else:
raise Exception("`%s -> %s` returns status code %s" % (name, url, r.status_code))
except Exception as e:
raise Exception("Data API URLS Error: %s" % e)
return utils.dictdot(data)
def _init_webassets(self):
assets_env = WAEnv(directory="./static",
url=self.config.get("static_url", "/static"))
bundles = self.config.get("assets_bundles", {})
assets_env.register(bundles)
self.tpl_env.assets_environment = assets_env
self.webassets_cmd = None
if bundles:
handler = logging.StreamHandler if self.config.get("debug", False) \
else logging.NullHandler
log = logging.getLogger('webassets')
log.addHandler(handler())
log.setLevel(logging.DEBUG)
self.webassets_cmd = CommandLineEnvironment(assets_env, log)
def clean_build_dir(self):
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
def build_static(self):
""" Build static files """
if not os.path.isdir(self.build_static_dir):
os.makedirs(self.build_static_dir)
copy_tree(self.static_dir, self.build_static_dir)
if self.webassets_cmd:
self.webassets_cmd.build()
def build_pages(self):
"""Iterate over the pages_dir and build the pages """
for root, _, files in os.walk(self.pages_dir):
base_dir = root.replace(self.pages_dir, "").lstrip("/")
if not base_dir.startswith("_"):
for f in files:
src_file = os.path.join(base_dir, f)
self._build_page(src_file)
def _build_page(self, filepath):
""" To build from filepath, relative to pages_dir """
filename = filepath.split("/")[-1]
# If filename starts with _ (underscore) or . (dot) do not build
if not filename.startswith(("_", ".")) and (filename.endswith(PAGE_FORMAT)):
meta = self._get_page_meta(filepath)
content = self._get_page_content(filepath)
# The default context for the page
_default_page = {
"build_dir": self.build_dir,
"filepath": meta["filepath"],
"context": {"page": meta},
"content": content,
"markup": meta.get("markup"),
"template": meta.get("template"),
"layout": meta.get("layout") or self.default_layout
}
# GENERATOR
# Allows to generate
_generator = meta.get("_generator")
if _generator:
data = self._data.get(_generator.get("data_source"))
# We want these back in meta in they exists in the data
special_meta = ["title", "slug", "description"]
# SINGLE
if _generator.get("type") == "single":
for d in data:
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
for _ in special_meta:
if _ in d:
dmeta[_] = d.get(_)
# If generator has the slug, it will substitute if
# Slug in the generator must have token from the data
# to generate the slug
if "slug" in _generator:
dmeta["slug"] = _generator.get("slug").format(**d)
# Slug is required
if "slug" not in dmeta:
print("WARNING: Skipping page because it's missing `slug`")
continue
slug = dmeta.get("slug")
dmeta["url"] = slug
dmeta["context"] = d
page.update({
"filepath": slug,
"context": {"page": dmeta}
})
self.create_page(**page)
if _generator.get("type") == "pagination":
per_page = int(_generator.get("per_page", self.site_config.get("pagination.per_page", 10)))
left_edge = int(_generator.get("left_edge", self.site_config.get("pagination.left_edge", 2)))
left_current = int(_generator.get("left_edge", self.site_config.get("pagination.left_current", 3)))
right_current = int(_generator.get("right_current", self.site_config.get("pagination.right_current", 4)))
right_edge = int(_generator.get("right_edge", self.site_config.get("pagination.right_edge", 2)))
padding = _generator.get("padding")
slug = _generator.get("slug")
limit = _generator.get("limit")
if "limit" in _generator:
data = data[:int(limit)]
data_chunks = utils.chunk_list(data, per_page)
len_data = len(data)
for i, d in enumerate(data_chunks):
dmeta = copy.deepcopy(meta)
page = copy.deepcopy(_default_page)
page_num = i + 1
_paginator = Paginator([],
total=len_data,
page=page_num,
per_page=per_page,
padding=padding,
left_edge=left_edge,
right_edge=right_edge,
left_current=left_current,
right_current=right_current)
_paginator.slug = slug
_paginator.index_slug = _generator.get("index_slug")
_slug = slug.format(**{"page_num": page_num})
dmeta["url"] = _slug
dmeta["context"] = d
dmeta["paginator"] = _paginator
page.update({
"filepath": _slug,
"context": {"page": dmeta}
})
self.create_page(**page)
# First page need to generate the index
if i == 0 and _generator.get("index_slug"):
page["filepath"] = _generator.get("index_slug")
self.create_page(**page)
# NORMAL PAGE
else:
self.create_page(**_default_page)
def create_page(self, build_dir, filepath, context={}, content=None, template=None, markup=None, layout=None):
"""
To dynamically create a page and save it in the build_dir
:param build_dir: (path) The base directory that will hold the created page
:param filepath: (string) the name of the file to create. May contain slash to indicate directory
It will also create the url based on that name
If the filename doesn't end with .html, it will create a subdirectory
and create `index.html`
If file contains `.html` it will stays as is
ie:
post/waldo/where-is-waldo/ -> post/waldo/where-is-waldo/index.html
another/music/new-rap-song.html -> another/music/new-rap-song.html
post/page/5 -> post/page/5/index.html
:param context: (dict) context data
:param content: (text) The content of the file to be created. Will be overriden by template
:param template: (path) if source is not provided, template can be used to create the page.
Along with context it allows to create dynamic pages.
The file is relative to `/templates/`
file can be in html|jade|md
:param markup: (string: html|jade|md), when using content. To indicate which markup to use.
based on the markup it will parse the data
html: will render as is
jade and md: convert to the appropriate format
:param layout: (string) when using content. The layout to use.
The file location is relative to `/templates/`
file can be in html|jade|md
:return:
"""
build_dir = build_dir.rstrip("/")
filepath = filepath.lstrip("/").rstrip("/")
if not filepath.endswith(".html"):
filepath += "/index.html"
dest_file = os.path.join(build_dir, filepath)
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
_context = context
if "page" not in _context:
_context["page"] = self.default_page_meta.copy()
if "url" not in _context["page"]:
_context["page"]["url"] = "/" + filepath.lstrip("/").replace(
"index.html", "")
if template:
if template not in self._templates:
self._templates[template] = self.tpl_env.get_template(template)
tpl = self._templates[template]
else:
if markup == "md":
_context["page"]["__toc__"] = md.get_toc(content)
content = md.convert(content)
elif markup == "jade":
content = jade.convert(content)
# Page must be extended by a layout and have a block 'body'
# These tags will be included if they are missing
if re.search(self.RE_EXTENDS, content) is None:
layout = layout or self.default_layout
content = "\n{% extends '{}' %} \n\n".replace("{}",
layout) + content
if re.search(self.RE_BLOCK_BODY, content) is None:
_layout_block = re.search(self.RE_EXTENDS, content).group(0)
content = content.replace(_layout_block, "")
content = "\n" + _layout_block + "\n" + \
"{% block body %} \n" + content.strip() + "\n{% endblock %}"
tpl = self.tpl_env.from_string(content)
with open(dest_file, "w") as fw:
fw.write(tpl.render(**_context))
def build(self):
self.clean_build_dir()
if not os.path.isdir(self.build_dir):
os.makedirs(self.build_dir)
self.build_static()
self.build_pages()
|
NORDUnet/python-norduniclient | norduniclient/core.py | get_db_driver | python | def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust) | :param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L117-L135 | null | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | create_node | python | def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'] | Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L170-L199 | null | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | get_node | python | def get_node(manager, handle_id, legacy=True):
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id) | :param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L202-L222 | null | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | get_node_bundle | python | def get_node_bundle(manager, handle_id=None, node=None):
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d | :param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L225-L246 | [
"def get_node(manager, handle_id, legacy=True):\n \"\"\"\n :param manager: Manager to handle sessions and transactions\n :param handle_id: Unique id\n :param legacy: Backwards compatibility\n\n :type manager: norduniclient.contextmanager.Neo4jDBSessionManager\n :type handle_id: str|unicode\n :t... | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | delete_node | python | def delete_node(manager, handle_id):
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True | Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L249-L265 | null | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | get_relationship | python | def get_relationship(manager, relationship_id, legacy=True):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id)) | :param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L268-L291 | null | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | get_relationship_bundle | python | def get_relationship_bundle(manager, relationship_id=None, legacy=True):
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle | :param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L294-L332 | null | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | delete_relationship | python | def delete_relationship(manager, relationship_id):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True | Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L335-L350 | null | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | get_node_meta_type | python | def get_node_meta_type(manager, handle_id):
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id) | Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L353-L365 | [
"def get_node(manager, handle_id, legacy=True):\n \"\"\"\n :param manager: Manager to handle sessions and transactions\n :param handle_id: Unique id\n :param legacy: Backwards compatibility\n\n :type manager: norduniclient.contextmanager.Neo4jDBSessionManager\n :type handle_id: str|unicode\n :t... | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | get_nodes_by_value | python | def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break | Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L369-L406 | null | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | search_nodes_by_value | python | def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n'] | Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L422-L454 | null | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | create_index | python | def create_index(manager, prop, node_type='Node'):
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop)) | :param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L494-L505 | null | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | get_indexed_node | python | def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n'] | :param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L508-L537 | null | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | get_unique_node_by_name | python | def get_unique_node_by_name(manager, node_name, node_type):
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None | Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L540-L562 | [
"def get_node_model(manager, handle_id=None, node=None):\n \"\"\"\n :param manager: Context manager to handle transactions\n :type manager: Neo4jDBSessionManager\n :param handle_id: Nodes handle id\n :type handle_id: str|unicode\n :param node: Node object\n :type node: neo4j.v1.types.Node\n ... | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | _create_relationship | python | def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'] | :param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L565-L591 | null | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | create_location_relationship | python | def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type) | Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised. | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L594-L603 | [
"def get_node_meta_type(manager, handle_id):\n \"\"\"\n Returns the meta type of the supplied node as a string.\n\n :param manager: Neo4jDBSessionManager\n :param handle_id: Unique id\n :return: string\n \"\"\"\n node = get_node(manager=manager, handle_id=handle_id, legacy=False)\n for label... | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | create_logical_relationship | python | def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type) | Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised. | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L606-L619 | [
"def get_node_meta_type(manager, handle_id):\n \"\"\"\n Returns the meta type of the supplied node as a string.\n\n :param manager: Neo4jDBSessionManager\n :param handle_id: Unique id\n :return: string\n \"\"\"\n node = get_node(manager=manager, handle_id=handle_id, legacy=False)\n for label... | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | create_relation_relationship | python | def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type) | Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised. | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L622-L637 | [
"def get_node_meta_type(manager, handle_id):\n \"\"\"\n Returns the meta type of the supplied node as a string.\n\n :param manager: Neo4jDBSessionManager\n :param handle_id: Unique id\n :return: string\n \"\"\"\n node = get_node(manager=manager, handle_id=handle_id, legacy=False)\n for label... | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | create_physical_relationship | python | def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type) | Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised. | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L640-L652 | [
"def get_node_meta_type(manager, handle_id):\n \"\"\"\n Returns the meta type of the supplied node as a string.\n\n :param manager: Neo4jDBSessionManager\n :param handle_id: Unique id\n :return: string\n \"\"\"\n node = get_node(manager=manager, handle_id=handle_id, legacy=False)\n for label... | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | create_relationship | python | def create_relationship(manager, handle_id, other_handle_id, rel_type):
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type) | Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception. | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L655-L671 | [
"def get_node_meta_type(manager, handle_id):\n \"\"\"\n Returns the meta type of the supplied node as a string.\n\n :param manager: Neo4jDBSessionManager\n :param handle_id: Unique id\n :return: string\n \"\"\"\n node = get_node(manager=manager, handle_id=handle_id, legacy=False)\n for label... | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | get_relationships | python | def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships'] | Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list. | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L674-L694 | null | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | get_node_model | python | def get_node_model(manager, handle_id=None, node=None):
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle) | :param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L723-L751 | [
"def get_node_bundle(manager, handle_id=None, node=None):\n \"\"\"\n :param manager: Neo4jDBSessionManager\n :param handle_id: Unique id\n :type handle_id: str|unicode\n :param node: Node object\n :type node: neo4j.v1.types.Node\n :return: dict\n \"\"\"\n if not node:\n node = get_... | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_relationship_model(manager, relationship_id):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel
"""
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/core.py | get_relationship_model | python | def get_relationship_model(manager, relationship_id):
bundle = get_relationship_bundle(manager, relationship_id)
return models.BaseRelationshipModel(manager).load(bundle) | :param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:type relationship_id: int
:return: Relationship model
:rtype: models.BaseRelationshipModel | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L754-L764 | [
"def get_relationship_bundle(manager, relationship_id=None, legacy=True):\n \"\"\"\n :param manager: Neo4jDBSessionManager\n :param relationship_id: Internal Neo4j id\n :param legacy: Backwards compatibility\n\n :type relationship_id: int\n :type legacy: bool\n\n :rtype: dictionary\n \"\"\"\... | # -*- coding: utf-8 -*-
#
# core.py
#
# Copyright 2016 Johan Lundberg <lundberg@nordu.net>
#
# This started as an extension to the Neo4j REST client made by Versae, continued
# as an extension for the official Neo4j python bindings when they were released
# (Neo4j 1.5, python-embedded).
#
# After the release of neo4j 3.0 and the bolt protocol we replaced neo4jdb-python with
# the official Neo4j driver.
#
# The goal is to make it easier to add and retrieve data from a Neo4j database
# according to the NORDUnet Network Inventory data model.
#
# More information about NORDUnet Network Inventory:
# https://portal.nordu.net/display/NI/
from __future__ import absolute_import
import re
from six import text_type
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.bolt import ProtocolError
from norduniclient import exceptions
from norduniclient import models
import logging
logger = logging.getLogger(__name__)
# Load Django settings
NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD = None, None, None
MAX_POOL_SIZE = 50
ENCRYPTED = False
try:
from django.conf import settings as django_settings
try:
# Mandatory Django settings for quick init
NEO4J_URI = django_settings.NEO4J_RESOURCE_URI
NEO4J_USERNAME = django_settings.NEO4J_USERNAME
NEO4J_PASSWORD = django_settings.NEO4J_PASSWORD
except AttributeError:
pass
# Optional Django settings for quick init
try:
MAX_POOL_SIZE = django_settings.NEO4J_MAX_POOL_SIZE
except AttributeError:
pass
try:
ENCRYPTED = django_settings.NEO4J_ENCRYPTED
except AttributeError:
pass
except ImportError:
logger.info('Starting up without a Django environment.')
logger.info('Initial: norduniclient.neo4jdb == None.')
logger.info('Use norduniclient.init_db to open a database connection.')
META_TYPES = ['Physical', 'Logical', 'Relation', 'Location']
class GraphDB(object):
_instance = None
_manager = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self._manager = self.manager
@property
def manager(self):
if self._manager is None:
try:
self._manager = init_db()
except Exception as e:
logger.error('Could not create manager: {}'.format(e))
self._manager = None
return self._manager
@manager.setter
def manager(self, manager):
self._manager = manager
def init_db(uri=NEO4J_URI, username=NEO4J_USERNAME, password=NEO4J_PASSWORD, encrypted=ENCRYPTED,
max_pool_size=MAX_POOL_SIZE):
if uri:
try:
from norduniclient.contextmanager import Neo4jDBSessionManager
manager = Neo4jDBSessionManager(uri=uri, username=username, password=password, encrypted=encrypted,
max_pool_size=max_pool_size)
try:
with manager.session as s:
s.run('CREATE CONSTRAINT ON (n:Node) ASSERT n.handle_id IS UNIQUE')
except Exception as e:
logger.error('Could not create constraints for Neo4j database: {!s}'.format(uri))
raise e
try:
create_index(manager, 'name')
except Exception as e:
logger.error('Could not create index for Neo4j database: {!s}'.format(uri))
raise e
return manager
except ProtocolError as e:
logger.warning('Could not connect to Neo4j database: {!s}'.format(uri))
raise e
def get_db_driver(uri, username=None, password=None, encrypted=True, max_pool_size=50, trust=0):
"""
:param uri: Bolt uri
:type uri: str
:param username: Neo4j username
:type username: str
:param password: Neo4j password
:type password: str
:param encrypted: Use TLS
:type encrypted: Boolean
:param max_pool_size: Maximum number of idle sessions
:type max_pool_size: Integer
:param trust: Trust cert on first use (0) or do not accept unknown cert (1)
:type trust: Integer
:return: Neo4j driver
:rtype: neo4j.v1.session.Driver
"""
return GraphDatabase.driver(uri, auth=basic_auth(username, password), encrypted=encrypted,
max_pool_size=max_pool_size, trust=trust)
def query_to_dict(manager, query, **kwargs):
d = {}
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
for key, value in record.items():
d[key] = value
return d
def query_to_list(manager, query, **kwargs):
l = []
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
l.append(d)
return l
def query_to_iterator(manager, query, **kwargs):
with manager.session as s:
result = s.run(query, kwargs)
for record in result:
d = {}
for key, value in record.items():
d[key] = value
yield d
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
def get_node(manager, handle_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
q = 'MATCH (n:Node { handle_id: {handle_id} }) RETURN n'
with manager.session as s:
result = s.run(q, {'handle_id': handle_id}).single()
if result:
if legacy:
return result['n'].properties
return result['n']
raise exceptions.NodeNotFound(manager, handle_id)
def get_node_bundle(manager, handle_id=None, node=None):
"""
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: dict
"""
if not node:
node = get_node(manager, handle_id=handle_id, legacy=False)
d = {
'data': node.properties
}
labels = list(node.labels)
labels.remove('Node') # All nodes have this label for indexing
for label in labels:
if label in META_TYPES:
d['meta_type'] = label
labels.remove(label)
d['labels'] = labels
return d
def delete_node(manager, handle_id):
"""
Deletes the node and all its relationships.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:rtype: bool
"""
q = """
MATCH (n:Node {handle_id: {handle_id}})
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
"""
with manager.session as s:
s.run(q, {'handle_id': handle_id})
return True
def get_relationship(manager, relationship_id, legacy=True):
"""
:param manager: Manager to handle sessions and transactions
:param relationship_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type relationship_id: int
:type legacy: Boolean
:rtype int|neo4j.v1.types.Relationship
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
RETURN r
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record:
if legacy:
return record['r'].properties
return record['r']
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle
def delete_relationship(manager, relationship_id):
"""
Deletes the relationship.
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j relationship id
:return: bool
"""
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
DELETE r
"""
with manager.session as s:
s.run(q, {'relationship_id': int(relationship_id)})
return True
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
# TODO: Try out elasticsearch
def get_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str|list|bool|int
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
USING SCAN n:{label}
WHERE n.{prop} = {{value}}
RETURN distinct n
""".format(label=node_type, prop=prop)
with manager.session as s:
for result in s.run(q, {'value': value}):
yield result['n']
else:
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
pattern = re.compile(u'{0}'.format(value), re.IGNORECASE)
with manager.session as s:
for result in s.run(q):
for v in result['n'].properties.values():
if pattern.search(text_type(v)):
yield result['n']
break
def get_node_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN distinct n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
def search_nodes_by_value(manager, value, prop=None, node_type='Node'):
"""
Traverses all nodes or nodes of specified label and fuzzy compares the property/properties of the node
with the supplied string.
:param manager: Neo4jDBSessionManager
:param value: Value to search for
:param prop: Which property to look for value in
:param node_type:
:type value: str
:type prop: str
:type node_type: str
:return: dicts
"""
if prop:
q = """
MATCH (n:{label})
WHERE n.{prop} =~ "(?i).*{value}.*" OR any(x IN n.{prop} WHERE x =~ "(?i).*{value}.*")
RETURN distinct n
""".format(label=node_type, prop=prop, value=value)
else:
q = """
MATCH (n:{label})
WITH n, keys(n) as props
WHERE any(prop in props WHERE n[prop] =~ "(?i).*{value}.*") OR
any(prop in props WHERE any(x IN n[prop] WHERE x =~ "(?i).*{value}.*"))
RETURN distinct n
""".format(label=node_type, value=value)
with manager.session as s:
for result in s.run(q):
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_type(manager, node_type, legacy=True):
q = """
MATCH (n:{label})
RETURN n
""".format(label=node_type)
with manager.session as s:
for result in s.run(q):
if legacy:
yield result['n'].properties
else:
yield result['n']
# TODO: Try out elasticsearch
def get_nodes_by_name(manager, name, legacy=True):
q = """
MATCH (n:Node {name: {name}})
RETURN n
"""
with manager.session as s:
for result in s.run(q, {'name': name}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def legacy_node_index_search(manager, lucene_query):
"""
:param manager: Neo4jDBSessionManager
:param lucene_query: string
:return: dict
"""
raise NotImplementedError('Legacy index removed from Neo4j 3.0')
def create_index(manager, prop, node_type='Node'):
"""
:param manager: Neo4jDBSessionManager
:param prop: Property to index
:param node_type: Label to create index on
:type manager: Neo4jDBSessionManager
:type prop: str
:type node_type: str
"""
with manager.session as s:
s.run('CREATE INDEX ON :{node_type}({prop})'.format(node_type=node_type, prop=prop))
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param prop: Indexed property
:param value: Indexed value
:param node_type: Label used for index
:param lookup_func: STARTS WITH | CONTAINS | ENDS WITH
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type prop: str
:type value: str
:type node_type: str
:type lookup_func: str
:type legacy: bool
:return: Dict or Node object
:rtype: dict|Node
"""
q = """
MATCH (n:{label})
WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})
RETURN n
""".format(label=node_type, prop=prop, lookup_func=lookup_func)
with manager.session as s:
for result in s.run(q, {'value': value}):
if legacy:
yield result['n'].properties
else:
yield result['n']
def get_unique_node_by_name(manager, node_name, node_type):
"""
Returns the node if the node is unique for name and type or None.
:param manager: Neo4jDBSessionManager
:param node_name: string
:param node_type: str|unicode
:return: norduniclient node model or None
"""
q = """
MATCH (n:Node { name: {name} })
WHERE {label} IN labels(n)
RETURN n.handle_id as handle_id
"""
with manager.session as s:
result = list(s.run(q, {'name': node_name, 'label': node_type}))
if result:
if len(result) == 1:
return get_node_model(manager, result[0]['handle_id'])
raise exceptions.MultipleNodesReturned(node_name, node_type)
return None
def _create_relationship(manager, handle_id, other_handle_id, rel_type, legacy=True):
"""
:param manager: Context manager to handle transactions
:param handle_id: Node handle id
:param other_handle_id: Other node handle id
:param rel_type: Relationship type
:param legacy: Backwards compatibility
:type manager: Neo4jDBSessionManager
:type handle_id: str|unicode
:type other_handle_id: str|unicode
:type rel_type: str|unicode
:type legacy: Boolean
:rtype: int|neo4j.v1.types.Relationship
"""
q = """
MATCH (a:Node {handle_id: {start}}),(b:Node {handle_id: {end}})
CREATE (a)-[r:%s]->(b)
RETURN r
""" % rel_type
with manager.session as s:
if legacy:
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r'].id
return s.run(q, {'start': handle_id, 'end': other_handle_id}).single()['r']
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Location' and rel_type == 'Has':
return _create_relationship(manager, location_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
def create_logical_relationship(manager, logical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if rel_type == 'Depends_on':
if other_meta_type == 'Logical' or other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
elif rel_type == 'Part_of':
if other_meta_type == 'Physical':
return _create_relationship(manager, logical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(logical_handle_id, 'Logical', other_handle_id, other_meta_type, rel_type)
def create_relation_relationship(manager, relation_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Logical':
if rel_type in ['Uses', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Responsible_for':
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Physical':
if rel_type in ['Owns', 'Provides']:
return _create_relationship(manager, relation_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(relation_handle_id, 'Relation', other_handle_id, other_meta_type, rel_type)
def create_physical_relationship(manager, physical_handle_id, other_handle_id, rel_type):
"""
Makes relationship between the two nodes and returns the relationship.
If a relationship is not possible NoRelationshipPossible exception is
raised.
"""
other_meta_type = get_node_meta_type(manager, other_handle_id)
if other_meta_type == 'Physical':
if rel_type == 'Has' or rel_type == 'Connected_to':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
elif other_meta_type == 'Location' and rel_type == 'Located_in':
return _create_relationship(manager, physical_handle_id, other_handle_id, rel_type)
raise exceptions.NoRelationshipPossible(physical_handle_id, 'Physical', other_handle_id, other_meta_type, rel_type)
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
def get_relationships(manager, handle_id1, handle_id2, rel_type=None, legacy=True):
"""
Takes a start and an end node with an optional relationship
type.
Returns the relationships between the nodes or an empty list.
"""
if rel_type:
q = """
MATCH (a:Node {{handle_id: {{handle_id1}}}})-[r:{rel_type}]-(b:Node {{handle_id: {{handle_id2}}}})
RETURN collect(r) as relationships
""".format(rel_type=rel_type)
else:
q = """
MATCH (a:Node {handle_id: {handle_id1}})-[r]-(b:Node {handle_id: {handle_id2}})
RETURN collect(r) as relationships
"""
with manager.session as s:
if legacy:
relationships = s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
return [relationship.id for relationship in relationships]
return s.run(q, {'handle_id1': handle_id1, 'handle_id2': handle_id2}).single()['relationships']
def set_node_properties(manager, handle_id, new_properties, legacy=True):
new_properties['handle_id'] = handle_id # Make sure the handle_id can't be changed
q = """
MATCH (n:Node {handle_id: {props}.handle_id})
SET n = {props}
RETURN n
"""
with manager.session as s:
if legacy:
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n'].properties
return s.run(q, {'handle_id': handle_id, 'props': new_properties}).single()['n']
def set_relationship_properties(manager, relationship_id, new_properties):
q = """
MATCH ()-[r]->()
WHERE ID(r) = {relationship_id}
SET r = {props}
RETURN r
"""
with manager.session as s:
return s.run(q, {'relationship_id': int(relationship_id), 'props': new_properties}).single()
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle)
|
NORDUnet/python-norduniclient | norduniclient/helpers.py | merge_properties | python | def merge_properties(item_properties, prop_name, merge_value):
existing_value = item_properties.get(prop_name, None)
if not existing_value: # A node without existing values for the property
item_properties[prop_name] = merge_value
else:
if type(merge_value) is int or type(merge_value) is str:
item_properties[prop_name] = existing_value + merge_value
elif type(merge_value) is list:
item_properties[prop_name] = merge_list(existing_value, merge_value)
else:
return False
return item_properties | Tries to figure out which type of property value that should be merged and
invoke the right function.
Returns new properties if the merge was successful otherwise False. | train | https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/helpers.py#L34-L50 | [
"def merge_list(existing_value, new_value):\n \"\"\"\n Takes the name of a property, a list of new property values and the existing\n node values.\n Returns the merged properties.\n \"\"\"\n new_set = set(existing_value + new_value)\n return list(new_set)\n"
] | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from six import text_type
__author__ = 'lundberg'
def normalize_whitespace(s):
"""
Removes leading and ending whitespace from a string.
"""
return ' '.join(s.split())
def lowerstr(s):
"""
Makes everything to a string and tries to make it lower case. Also
normalizes whitespace.
"""
return normalize_whitespace(text_type(s).lower())
def update_item_properties(item_properties, new_properties):
for key, value in new_properties.items():
if value or value is 0:
item_properties[key] = value
elif key in item_properties.keys():
del item_properties[key]
return item_properties
# TODO: Does this helper make any sense?
def merge_list(existing_value, new_value):
"""
Takes the name of a property, a list of new property values and the existing
node values.
Returns the merged properties.
"""
new_set = set(existing_value + new_value)
return list(new_set)
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | make_gpg_home | python | def make_gpg_home(appname, config_dir=None):
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path | Make GPG keyring dir for a particular application.
Return the path. | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L87-L102 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n",
"def is_valid_appname(appname):\n \"\"\"\n Appname must be url-safe\n \... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | get_gpg_home | python | def get_gpg_home( appname, config_dir=None ):
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path | Get the GPG keyring directory for a particular application.
Return the path. | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L105-L113 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n",
"def is_valid_appname(appname):\n \"\"\"\n Appname must be url-safe\n \... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | make_gpg_tmphome | python | def make_gpg_tmphome( prefix=None, config_dir=None ):
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir | Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring. | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L125-L139 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n"
] | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_stash_key | python | def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0] | Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L142-L172 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n",
"def is_valid_appname(appname):\n \"\"\"\n Appname must be url-safe\n \... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_unstash_key | python | def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True | Remove a public key locally from our local app keyring
Return True on success
Return False on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L175-L203 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n",
"def is_valid_appname(appname):\n \"\"\"\n Appname must be url-safe\n \... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_download_key | python | def gpg_download_key( key_id, key_server, config_dir=None ):
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat) | Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L206-L232 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n",
"def make_gpg_tmphome( prefix=None, config_dir=None ):\n \"\"\"\n Make a t... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_key_fingerprint | python | def gpg_key_fingerprint( key_data, config_dir=None ):
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None | Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L235-L258 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n",
"def make_gpg_tmphome( prefix=None, config_dir=None ):\n \"\"\"\n Make a t... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_verify_key | python | def gpg_verify_key( key_id, key_data, config_dir=None ):
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True | Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L261-L287 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n",
"def gpg_key_fingerprint( key_data, config_dir=None ):\n \"\"\"\n Get the ... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_export_key | python | def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat | Get the ASCII-armored key, given the ID | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L290-L305 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n",
"def is_valid_appname(appname):\n \"\"\"\n Appname must be url-safe\n \... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_list_profile_keys | python | def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret | List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L308-L344 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n"
] | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_list_app_keys | python | def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info | List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L347-L396 | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_fetch_key | python | def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat | Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L399-L488 | [
"def gpg_download_key( key_id, key_server, config_dir=None ):\n \"\"\"\n Download a GPG key from a key server.\n Do not import it into any keyrings.\n Return the ASCII-armored key\n \"\"\"\n\n config_dir = get_config_dir( config_dir )\n tmpdir = make_gpg_tmphome( prefix=\"download\", config_dir... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_profile_put_key | python | def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res | Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L491-L562 | [
"def is_valid_keyname(keyname):\n \"\"\"\n Keyname must be url-save\n \"\"\"\n return is_valid_appname(keyname)\n",
"def get_default_gpg_home( config_dir=None ):\n \"\"\"\n Get the GPG keyring directory for a particular application.\n Return the path.\n \"\"\"\n raise Exception(\"Should... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_profile_delete_key | python | def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret | Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L565-L606 | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_profile_create_key | python | def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res | Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L609-L649 | [
"def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):\n \"\"\"\n Put a local GPG key into a blockchain ID's global account.\n If the URL is not given, the key will be repli... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_profile_get_key | python | def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret | Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L652-L700 | [
"def gpg_fetch_key( key_url, key_id=None, config_dir=None ):\n \"\"\"\n Fetch a GPG public key from the given URL.\n Supports anything urllib2 supports.\n If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.\n The key is not accepted into any keyrings.\n Return th... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_app_put_key | python | def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res | Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage. | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L703-L759 | [
"def is_valid_appname(appname):\n \"\"\"\n Appname must be url-safe\n \"\"\"\n # RFC 3896 unreserved characters, except for .\n url_regex = '^[a-zA-Z0-9-_~]+$'\n if re.match(url_regex, appname) is None:\n return False\n else:\n return True\n",
"def is_valid_keyname(keyname):\n ... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_app_delete_key | python | def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result | Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed. | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L762-L824 | [
"def is_valid_appname(appname):\n \"\"\"\n Appname must be url-safe\n \"\"\"\n # RFC 3896 unreserved characters, except for .\n url_regex = '^[a-zA-Z0-9-_~]+$'\n if re.match(url_regex, appname) is None:\n return False\n else:\n return True\n",
"def is_valid_keyname(keyname):\n ... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_app_create_key | python | def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res | Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L827-L860 | [
"def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):\n \"\"\"\n Put an application GPG key.\n Stash the private key locally to an app-specific keyring.\n\n Return {'status': True, 'key_url': ..., 'key_data': ...} on... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_app_get_key | python | def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret | Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L863-L902 | [
"def gpg_fetch_key( key_url, key_id=None, config_dir=None ):\n \"\"\"\n Fetch a GPG public key from the given URL.\n Supports anything urllib2 supports.\n If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.\n The key is not accepted into any keyrings.\n Return th... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_sign | python | def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data } | Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L905-L950 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n",
"def make_gpg_tmphome( prefix=None, config_dir=None ):\n \"\"\"\n Make a t... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_verify | python | def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True} | Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L953-L1001 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n",
"def make_gpg_tmphome( prefix=None, config_dir=None ):\n \"\"\"\n Make a t... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_encrypt | python | def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True} | Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L1004-L1054 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n",
"def make_gpg_tmphome( prefix=None, config_dir=None ):\n \"\"\"\n Make a t... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True}
|
blockstack-packages/blockstack-gpg | blockstack_gpg/gpg.py | gpg_decrypt | python | def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True} | Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error | train | https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L1057-L1103 | [
"def get_config_dir( config_dir=None ):\n \"\"\"\n Get the default configuration directory.\n \"\"\"\n if config_dir is None:\n config = get_config()\n config_dir = config['dir']\n\n return config_dir\n",
"def make_gpg_tmphome( prefix=None, config_dir=None ):\n \"\"\"\n Make a t... | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack-gpg
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of blockstack-gpg.
BLockstack-gpg is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack-gpg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with blockstack-gpg. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import traceback
import logging
import gnupg
logging.getLogger("gnupg").setLevel( logging.CRITICAL )
import urllib2
import urlparse
import tempfile
import shutil
import base64
import copy
import json
import re
from ConfigParser import SafeConfigParser
import blockstack_client
from blockstack_client import get_logger, get_config
from blockstack_client import BlockstackHandler
from blockstack_client import list_immutable_data
from blockstack_client import make_mutable_data_url, make_immutable_data_url
import blockstack_profiles
client = blockstack_client
log = get_logger("blockstack-gpg")
import urllib
DEFAULT_KEY_SERVER = 'pgp.mit.edu'
def get_config_dir( config_dir=None ):
"""
Get the default configuration directory.
"""
if config_dir is None:
config = get_config()
config_dir = config['dir']
return config_dir
def is_valid_appname(appname):
"""
Appname must be url-safe
"""
# RFC 3896 unreserved characters, except for .
url_regex = '^[a-zA-Z0-9-_~]+$'
if re.match(url_regex, appname) is None:
return False
else:
return True
def is_valid_keyname(keyname):
"""
Keyname must be url-save
"""
return is_valid_appname(keyname)
def make_gpg_home(appname, config_dir=None):
"""
Make GPG keyring dir for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path
def get_default_gpg_home( config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
raise Exception("Should ever be called")
return os.path.expanduser("~/.gnupg")
def make_gpg_tmphome( prefix=None, config_dir=None ):
"""
Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring.
"""
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
"""
Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error
"""
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0]
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
"""
Remove a public key locally from our local app keyring
Return True on success
Return False on error
"""
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True
def gpg_download_key( key_id, key_server, config_dir=None ):
"""
Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key
"""
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat)
def gpg_key_fingerprint( key_data, config_dir=None ):
"""
Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None
def gpg_verify_key( key_id, key_data, config_dir=None ):
"""
Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error
"""
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
"""
Get the ASCII-armored key, given the ID
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
"""
List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure
"""
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
"""
List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error
"""
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
"""
Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error
"""
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
"""
Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error
"""
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
"""
Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error
"""
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Put an application GPG key.
Stash the private key locally to an app-specific keyring.
Return {'status': True, 'key_url': ..., 'key_data': ...} on success
Return {'error': ...} on error
If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash)
This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID
on a successful execution. It is up to you to wait until the transaction is confirmed before using the key.
Otherwise, the key is stored to mutable storage.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
try:
keydir = make_gpg_home( appname, config_dir=config_dir )
key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir )
assert key_id is not None, "Failed to stash key"
log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir))
except Exception, e:
log.exception(e)
log.error("Failed to store GPG key '%s'" % keyname)
return {'error': "Failed to store GPG key locally"}
# get public key...
assert is_valid_appname(appname)
try:
pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir )
except:
return {'error': 'Failed to load key'}
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if not immutable:
res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] )
else:
res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] )
res['key_url'] = key_url
res['key_data'] = pubkey_data
res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir )
log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url))
return res
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Remove an application GPG key.
Unstash the local private key.
Return {'status': True, ...} on success
Return {'error': ...} on error
If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take
on the order of an hour to complete on the blockchain. A transaction ID will be returned to you
on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
fq_key_name = "gpg.%s.%s" % (appname, keyname)
result = {}
dead_pubkey_dict = None
dead_pubkey = None
key_id = None
if not immutable:
# find the key first, so we can get the key ID and then remove it locally
dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
else:
# need the key ID so we can unstash locally
dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy )
if 'error' in dead_pubkey_dict:
return dead_pubkey_dict
dead_pubkey_kv = dead_pubkey_dict['data']
assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv
dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ]
key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir )
assert key_id is not None, "Failed to load pubkey fingerprint"
# actually delete
if not immutable:
result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys )
else:
result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in result:
return result
# unstash
try:
rc = gpg_unstash_key( appname, key_id, config_dir=config_dir )
assert rc, "Failed to unstash key"
except:
log.warning("Failed to remove private key for '%s'" % key_id )
result['warning'] = "Failed to remove private key"
if os.environ.get('BLOCKSTACK_TEST') is not None:
# make sure this never happens in testing
raise
return result
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
"""
Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
"""
Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error
"""
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
"""
Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data }
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
"""
Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True}
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True}
|
hyde/fswrap | fswrap.py | FS.fully_expanded_path | python | def fully_expanded_path(self):
return os.path.abspath(
os.path.normpath(
os.path.normcase(
os.path.expandvars(
os.path.expanduser(self.path))))) | Returns the absolutely absolute path. Calls os.(
normpath, normcase, expandvars and expanduser). | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L68-L77 | null | class FS(object):
"""
The base file system object
"""
def __init__(self, path):
super(FS, self).__init__()
if path == os.sep:
self.path = path
else:
self.path = os.path.expandvars(os.path.expanduser(
unicode(path).strip().rstrip(os.sep)))
def __str__(self):
return self.path
def __repr__(self):
return self.path
def __eq__(self, other):
return unicode(self) == unicode(other)
def __ne__(self, other):
return unicode(self) != unicode(other)
@property
@property
def exists(self):
"""
Does the file system object exist?
"""
return os.path.exists(self.path)
@property
def name(self):
"""
Returns the name of the FS object with its extension
"""
return os.path.basename(self.path)
@property
def parent(self):
"""
The parent folder. Returns a `Folder` object.
"""
return Folder(os.path.dirname(self.path))
@property
def depth(self):
"""
Returns the number of ancestors of this directory.
"""
return len(self.path.rstrip(os.sep).split(os.sep))
def ancestors(self, stop=None):
"""
Generates the parents until stop or the absolute
root directory is reached.
"""
folder = self
while folder.parent != stop:
if folder.parent == folder:
return
yield folder.parent
folder = folder.parent
def is_descendant_of(self, ancestor):
"""
Checks if this folder is inside the given ancestor.
"""
stop = Folder(ancestor)
for folder in self.ancestors():
if folder == stop:
return True
if stop.depth > folder.depth:
return False
return False
def get_relative_path(self, root):
"""
Gets the fragment of the current path starting at root.
"""
if self.path == root:
return ''
ancestors = self.ancestors(stop=root)
return functools.reduce(lambda f, p: Folder(p.name).child(f),
ancestors,
self.name)
def get_mirror(self, target_root, source_root=None):
"""
Returns a File or Folder object that reperesents if the entire
fragment of this directory starting with `source_root` were copied
to `target_root`.
>>> Folder('/usr/local/hyde/stuff').get_mirror('/usr/tmp',
source_root='/usr/local/hyde')
Folder('/usr/tmp/stuff')
"""
fragment = self.get_relative_path(
source_root if source_root else self.parent)
return Folder(target_root).child(fragment)
@staticmethod
def file_or_folder(path):
"""
Returns a File or Folder object that would represent the given path.
"""
target = unicode(path)
return Folder(target) if os.path.isdir(target) else File(target)
def __get_destination__(self, destination):
"""
Returns a File or Folder object that would represent this entity
if it were copied or moved to `destination`.
"""
if isinstance(destination,
File) or os.path.isfile(unicode(destination)):
return destination
else:
return FS.file_or_folder(Folder(destination).child(self.name))
|
hyde/fswrap | fswrap.py | FS.depth | python | def depth(self):
return len(self.path.rstrip(os.sep).split(os.sep)) | Returns the number of ancestors of this directory. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L101-L105 | null | class FS(object):
"""
The base file system object
"""
def __init__(self, path):
super(FS, self).__init__()
if path == os.sep:
self.path = path
else:
self.path = os.path.expandvars(os.path.expanduser(
unicode(path).strip().rstrip(os.sep)))
def __str__(self):
return self.path
def __repr__(self):
return self.path
def __eq__(self, other):
return unicode(self) == unicode(other)
def __ne__(self, other):
return unicode(self) != unicode(other)
@property
def fully_expanded_path(self):
"""
Returns the absolutely absolute path. Calls os.(
normpath, normcase, expandvars and expanduser).
"""
return os.path.abspath(
os.path.normpath(
os.path.normcase(
os.path.expandvars(
os.path.expanduser(self.path)))))
@property
def exists(self):
"""
Does the file system object exist?
"""
return os.path.exists(self.path)
@property
def name(self):
"""
Returns the name of the FS object with its extension
"""
return os.path.basename(self.path)
@property
def parent(self):
"""
The parent folder. Returns a `Folder` object.
"""
return Folder(os.path.dirname(self.path))
@property
def ancestors(self, stop=None):
"""
Generates the parents until stop or the absolute
root directory is reached.
"""
folder = self
while folder.parent != stop:
if folder.parent == folder:
return
yield folder.parent
folder = folder.parent
def is_descendant_of(self, ancestor):
"""
Checks if this folder is inside the given ancestor.
"""
stop = Folder(ancestor)
for folder in self.ancestors():
if folder == stop:
return True
if stop.depth > folder.depth:
return False
return False
def get_relative_path(self, root):
"""
Gets the fragment of the current path starting at root.
"""
if self.path == root:
return ''
ancestors = self.ancestors(stop=root)
return functools.reduce(lambda f, p: Folder(p.name).child(f),
ancestors,
self.name)
def get_mirror(self, target_root, source_root=None):
"""
Returns a File or Folder object that reperesents if the entire
fragment of this directory starting with `source_root` were copied
to `target_root`.
>>> Folder('/usr/local/hyde/stuff').get_mirror('/usr/tmp',
source_root='/usr/local/hyde')
Folder('/usr/tmp/stuff')
"""
fragment = self.get_relative_path(
source_root if source_root else self.parent)
return Folder(target_root).child(fragment)
@staticmethod
def file_or_folder(path):
"""
Returns a File or Folder object that would represent the given path.
"""
target = unicode(path)
return Folder(target) if os.path.isdir(target) else File(target)
def __get_destination__(self, destination):
"""
Returns a File or Folder object that would represent this entity
if it were copied or moved to `destination`.
"""
if isinstance(destination,
File) or os.path.isfile(unicode(destination)):
return destination
else:
return FS.file_or_folder(Folder(destination).child(self.name))
|
hyde/fswrap | fswrap.py | FS.ancestors | python | def ancestors(self, stop=None):
folder = self
while folder.parent != stop:
if folder.parent == folder:
return
yield folder.parent
folder = folder.parent | Generates the parents until stop or the absolute
root directory is reached. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L107-L117 | null | class FS(object):
"""
The base file system object
"""
def __init__(self, path):
super(FS, self).__init__()
if path == os.sep:
self.path = path
else:
self.path = os.path.expandvars(os.path.expanduser(
unicode(path).strip().rstrip(os.sep)))
def __str__(self):
return self.path
def __repr__(self):
return self.path
def __eq__(self, other):
return unicode(self) == unicode(other)
def __ne__(self, other):
return unicode(self) != unicode(other)
@property
def fully_expanded_path(self):
"""
Returns the absolutely absolute path. Calls os.(
normpath, normcase, expandvars and expanduser).
"""
return os.path.abspath(
os.path.normpath(
os.path.normcase(
os.path.expandvars(
os.path.expanduser(self.path)))))
@property
def exists(self):
"""
Does the file system object exist?
"""
return os.path.exists(self.path)
@property
def name(self):
"""
Returns the name of the FS object with its extension
"""
return os.path.basename(self.path)
@property
def parent(self):
"""
The parent folder. Returns a `Folder` object.
"""
return Folder(os.path.dirname(self.path))
@property
def depth(self):
"""
Returns the number of ancestors of this directory.
"""
return len(self.path.rstrip(os.sep).split(os.sep))
def is_descendant_of(self, ancestor):
"""
Checks if this folder is inside the given ancestor.
"""
stop = Folder(ancestor)
for folder in self.ancestors():
if folder == stop:
return True
if stop.depth > folder.depth:
return False
return False
def get_relative_path(self, root):
"""
Gets the fragment of the current path starting at root.
"""
if self.path == root:
return ''
ancestors = self.ancestors(stop=root)
return functools.reduce(lambda f, p: Folder(p.name).child(f),
ancestors,
self.name)
def get_mirror(self, target_root, source_root=None):
"""
Returns a File or Folder object that reperesents if the entire
fragment of this directory starting with `source_root` were copied
to `target_root`.
>>> Folder('/usr/local/hyde/stuff').get_mirror('/usr/tmp',
source_root='/usr/local/hyde')
Folder('/usr/tmp/stuff')
"""
fragment = self.get_relative_path(
source_root if source_root else self.parent)
return Folder(target_root).child(fragment)
@staticmethod
def file_or_folder(path):
"""
Returns a File or Folder object that would represent the given path.
"""
target = unicode(path)
return Folder(target) if os.path.isdir(target) else File(target)
def __get_destination__(self, destination):
"""
Returns a File or Folder object that would represent this entity
if it were copied or moved to `destination`.
"""
if isinstance(destination,
File) or os.path.isfile(unicode(destination)):
return destination
else:
return FS.file_or_folder(Folder(destination).child(self.name))
|
hyde/fswrap | fswrap.py | FS.is_descendant_of | python | def is_descendant_of(self, ancestor):
stop = Folder(ancestor)
for folder in self.ancestors():
if folder == stop:
return True
if stop.depth > folder.depth:
return False
return False | Checks if this folder is inside the given ancestor. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L119-L129 | [
"def ancestors(self, stop=None):\n \"\"\"\n Generates the parents until stop or the absolute\n root directory is reached.\n \"\"\"\n folder = self\n while folder.parent != stop:\n if folder.parent == folder:\n return\n yield folder.parent\n folder = folder.parent\n"... | class FS(object):
"""
The base file system object
"""
def __init__(self, path):
super(FS, self).__init__()
if path == os.sep:
self.path = path
else:
self.path = os.path.expandvars(os.path.expanduser(
unicode(path).strip().rstrip(os.sep)))
def __str__(self):
return self.path
def __repr__(self):
return self.path
def __eq__(self, other):
return unicode(self) == unicode(other)
def __ne__(self, other):
return unicode(self) != unicode(other)
@property
def fully_expanded_path(self):
"""
Returns the absolutely absolute path. Calls os.(
normpath, normcase, expandvars and expanduser).
"""
return os.path.abspath(
os.path.normpath(
os.path.normcase(
os.path.expandvars(
os.path.expanduser(self.path)))))
@property
def exists(self):
"""
Does the file system object exist?
"""
return os.path.exists(self.path)
@property
def name(self):
"""
Returns the name of the FS object with its extension
"""
return os.path.basename(self.path)
@property
def parent(self):
"""
The parent folder. Returns a `Folder` object.
"""
return Folder(os.path.dirname(self.path))
@property
def depth(self):
"""
Returns the number of ancestors of this directory.
"""
return len(self.path.rstrip(os.sep).split(os.sep))
def ancestors(self, stop=None):
"""
Generates the parents until stop or the absolute
root directory is reached.
"""
folder = self
while folder.parent != stop:
if folder.parent == folder:
return
yield folder.parent
folder = folder.parent
def get_relative_path(self, root):
"""
Gets the fragment of the current path starting at root.
"""
if self.path == root:
return ''
ancestors = self.ancestors(stop=root)
return functools.reduce(lambda f, p: Folder(p.name).child(f),
ancestors,
self.name)
def get_mirror(self, target_root, source_root=None):
"""
Returns a File or Folder object that reperesents if the entire
fragment of this directory starting with `source_root` were copied
to `target_root`.
>>> Folder('/usr/local/hyde/stuff').get_mirror('/usr/tmp',
source_root='/usr/local/hyde')
Folder('/usr/tmp/stuff')
"""
fragment = self.get_relative_path(
source_root if source_root else self.parent)
return Folder(target_root).child(fragment)
@staticmethod
def file_or_folder(path):
"""
Returns a File or Folder object that would represent the given path.
"""
target = unicode(path)
return Folder(target) if os.path.isdir(target) else File(target)
def __get_destination__(self, destination):
"""
Returns a File or Folder object that would represent this entity
if it were copied or moved to `destination`.
"""
if isinstance(destination,
File) or os.path.isfile(unicode(destination)):
return destination
else:
return FS.file_or_folder(Folder(destination).child(self.name))
|
hyde/fswrap | fswrap.py | FS.get_relative_path | python | def get_relative_path(self, root):
if self.path == root:
return ''
ancestors = self.ancestors(stop=root)
return functools.reduce(lambda f, p: Folder(p.name).child(f),
ancestors,
self.name) | Gets the fragment of the current path starting at root. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L131-L140 | [
"def ancestors(self, stop=None):\n \"\"\"\n Generates the parents until stop or the absolute\n root directory is reached.\n \"\"\"\n folder = self\n while folder.parent != stop:\n if folder.parent == folder:\n return\n yield folder.parent\n folder = folder.parent\n"... | class FS(object):
"""
The base file system object
"""
def __init__(self, path):
super(FS, self).__init__()
if path == os.sep:
self.path = path
else:
self.path = os.path.expandvars(os.path.expanduser(
unicode(path).strip().rstrip(os.sep)))
def __str__(self):
return self.path
def __repr__(self):
return self.path
def __eq__(self, other):
return unicode(self) == unicode(other)
def __ne__(self, other):
return unicode(self) != unicode(other)
@property
def fully_expanded_path(self):
"""
Returns the absolutely absolute path. Calls os.(
normpath, normcase, expandvars and expanduser).
"""
return os.path.abspath(
os.path.normpath(
os.path.normcase(
os.path.expandvars(
os.path.expanduser(self.path)))))
@property
def exists(self):
"""
Does the file system object exist?
"""
return os.path.exists(self.path)
@property
def name(self):
"""
Returns the name of the FS object with its extension
"""
return os.path.basename(self.path)
@property
def parent(self):
"""
The parent folder. Returns a `Folder` object.
"""
return Folder(os.path.dirname(self.path))
@property
def depth(self):
"""
Returns the number of ancestors of this directory.
"""
return len(self.path.rstrip(os.sep).split(os.sep))
def ancestors(self, stop=None):
"""
Generates the parents until stop or the absolute
root directory is reached.
"""
folder = self
while folder.parent != stop:
if folder.parent == folder:
return
yield folder.parent
folder = folder.parent
def is_descendant_of(self, ancestor):
"""
Checks if this folder is inside the given ancestor.
"""
stop = Folder(ancestor)
for folder in self.ancestors():
if folder == stop:
return True
if stop.depth > folder.depth:
return False
return False
def get_mirror(self, target_root, source_root=None):
"""
Returns a File or Folder object that reperesents if the entire
fragment of this directory starting with `source_root` were copied
to `target_root`.
>>> Folder('/usr/local/hyde/stuff').get_mirror('/usr/tmp',
source_root='/usr/local/hyde')
Folder('/usr/tmp/stuff')
"""
fragment = self.get_relative_path(
source_root if source_root else self.parent)
return Folder(target_root).child(fragment)
@staticmethod
def file_or_folder(path):
"""
Returns a File or Folder object that would represent the given path.
"""
target = unicode(path)
return Folder(target) if os.path.isdir(target) else File(target)
def __get_destination__(self, destination):
"""
Returns a File or Folder object that would represent this entity
if it were copied or moved to `destination`.
"""
if isinstance(destination,
File) or os.path.isfile(unicode(destination)):
return destination
else:
return FS.file_or_folder(Folder(destination).child(self.name))
|
hyde/fswrap | fswrap.py | FS.get_mirror | python | def get_mirror(self, target_root, source_root=None):
fragment = self.get_relative_path(
source_root if source_root else self.parent)
return Folder(target_root).child(fragment) | Returns a File or Folder object that reperesents if the entire
fragment of this directory starting with `source_root` were copied
to `target_root`.
>>> Folder('/usr/local/hyde/stuff').get_mirror('/usr/tmp',
source_root='/usr/local/hyde')
Folder('/usr/tmp/stuff') | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L142-L154 | [
"def get_relative_path(self, root):\n \"\"\"\n Gets the fragment of the current path starting at root.\n \"\"\"\n if self.path == root:\n return ''\n ancestors = self.ancestors(stop=root)\n return functools.reduce(lambda f, p: Folder(p.name).child(f),\n ancestors,... | class FS(object):
"""
The base file system object
"""
def __init__(self, path):
super(FS, self).__init__()
if path == os.sep:
self.path = path
else:
self.path = os.path.expandvars(os.path.expanduser(
unicode(path).strip().rstrip(os.sep)))
def __str__(self):
return self.path
def __repr__(self):
return self.path
def __eq__(self, other):
return unicode(self) == unicode(other)
def __ne__(self, other):
return unicode(self) != unicode(other)
@property
def fully_expanded_path(self):
"""
Returns the absolutely absolute path. Calls os.(
normpath, normcase, expandvars and expanduser).
"""
return os.path.abspath(
os.path.normpath(
os.path.normcase(
os.path.expandvars(
os.path.expanduser(self.path)))))
@property
def exists(self):
"""
Does the file system object exist?
"""
return os.path.exists(self.path)
@property
def name(self):
"""
Returns the name of the FS object with its extension
"""
return os.path.basename(self.path)
@property
def parent(self):
"""
The parent folder. Returns a `Folder` object.
"""
return Folder(os.path.dirname(self.path))
@property
def depth(self):
"""
Returns the number of ancestors of this directory.
"""
return len(self.path.rstrip(os.sep).split(os.sep))
def ancestors(self, stop=None):
"""
Generates the parents until stop or the absolute
root directory is reached.
"""
folder = self
while folder.parent != stop:
if folder.parent == folder:
return
yield folder.parent
folder = folder.parent
def is_descendant_of(self, ancestor):
"""
Checks if this folder is inside the given ancestor.
"""
stop = Folder(ancestor)
for folder in self.ancestors():
if folder == stop:
return True
if stop.depth > folder.depth:
return False
return False
def get_relative_path(self, root):
"""
Gets the fragment of the current path starting at root.
"""
if self.path == root:
return ''
ancestors = self.ancestors(stop=root)
return functools.reduce(lambda f, p: Folder(p.name).child(f),
ancestors,
self.name)
@staticmethod
def file_or_folder(path):
"""
Returns a File or Folder object that would represent the given path.
"""
target = unicode(path)
return Folder(target) if os.path.isdir(target) else File(target)
def __get_destination__(self, destination):
"""
Returns a File or Folder object that would represent this entity
if it were copied or moved to `destination`.
"""
if isinstance(destination,
File) or os.path.isfile(unicode(destination)):
return destination
else:
return FS.file_or_folder(Folder(destination).child(self.name))
|
hyde/fswrap | fswrap.py | FS.file_or_folder | python | def file_or_folder(path):
target = unicode(path)
return Folder(target) if os.path.isdir(target) else File(target) | Returns a File or Folder object that would represent the given path. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L157-L162 | null | class FS(object):
"""
The base file system object
"""
def __init__(self, path):
super(FS, self).__init__()
if path == os.sep:
self.path = path
else:
self.path = os.path.expandvars(os.path.expanduser(
unicode(path).strip().rstrip(os.sep)))
def __str__(self):
return self.path
def __repr__(self):
return self.path
def __eq__(self, other):
return unicode(self) == unicode(other)
def __ne__(self, other):
return unicode(self) != unicode(other)
@property
def fully_expanded_path(self):
"""
Returns the absolutely absolute path. Calls os.(
normpath, normcase, expandvars and expanduser).
"""
return os.path.abspath(
os.path.normpath(
os.path.normcase(
os.path.expandvars(
os.path.expanduser(self.path)))))
@property
def exists(self):
"""
Does the file system object exist?
"""
return os.path.exists(self.path)
@property
def name(self):
"""
Returns the name of the FS object with its extension
"""
return os.path.basename(self.path)
@property
def parent(self):
"""
The parent folder. Returns a `Folder` object.
"""
return Folder(os.path.dirname(self.path))
@property
def depth(self):
"""
Returns the number of ancestors of this directory.
"""
return len(self.path.rstrip(os.sep).split(os.sep))
def ancestors(self, stop=None):
"""
Generates the parents until stop or the absolute
root directory is reached.
"""
folder = self
while folder.parent != stop:
if folder.parent == folder:
return
yield folder.parent
folder = folder.parent
def is_descendant_of(self, ancestor):
"""
Checks if this folder is inside the given ancestor.
"""
stop = Folder(ancestor)
for folder in self.ancestors():
if folder == stop:
return True
if stop.depth > folder.depth:
return False
return False
def get_relative_path(self, root):
"""
Gets the fragment of the current path starting at root.
"""
if self.path == root:
return ''
ancestors = self.ancestors(stop=root)
return functools.reduce(lambda f, p: Folder(p.name).child(f),
ancestors,
self.name)
def get_mirror(self, target_root, source_root=None):
"""
Returns a File or Folder object that reperesents if the entire
fragment of this directory starting with `source_root` were copied
to `target_root`.
>>> Folder('/usr/local/hyde/stuff').get_mirror('/usr/tmp',
source_root='/usr/local/hyde')
Folder('/usr/tmp/stuff')
"""
fragment = self.get_relative_path(
source_root if source_root else self.parent)
return Folder(target_root).child(fragment)
@staticmethod
def __get_destination__(self, destination):
"""
Returns a File or Folder object that would represent this entity
if it were copied or moved to `destination`.
"""
if isinstance(destination,
File) or os.path.isfile(unicode(destination)):
return destination
else:
return FS.file_or_folder(Folder(destination).child(self.name))
|
hyde/fswrap | fswrap.py | File.is_binary | python | def is_binary(self):
with open(self.path, 'rb') as fin:
CHUNKSIZE = 1024
while 1:
chunk = fin.read(CHUNKSIZE)
if b'\0' in chunk:
return True
if len(chunk) < CHUNKSIZE:
break
return False | Return true if this is a binary file. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L224-L234 | null | class File(FS):
"""
The File object.
"""
def __init__(self, path):
super(File, self).__init__(path)
@property
def name_without_extension(self):
"""
Returns the name of the FS object without its extension
"""
return os.path.splitext(self.name)[0]
@property
def extension(self):
"""
File extension prefixed with a dot.
"""
return os.path.splitext(self.path)[1]
@property
def kind(self):
"""
File extension without dot prefix.
"""
return self.extension.lstrip(".")
@property
def size(self):
"""
Size of this file.
"""
if not self.exists:
return -1
return os.path.getsize(self.path)
@property
def mimetype(self):
"""
Gets the mimetype of this file.
"""
(mime, _) = mimetypes.guess_type(self.path)
return mime
@property
@property
def is_text(self):
"""Return true if this is a text file."""
return (not self.is_binary)
@property
def is_image(self):
"""Return true if this is an image file."""
return self.mimetype.split("/")[0] == "image"
@property
def last_modified(self):
"""
Returns a datetime object representing the last modified time.
Calls os.path.getmtime.
"""
return datetime.fromtimestamp(os.path.getmtime(self.path))
def has_changed_since(self, basetime):
"""
Returns True if the file has been changed since the given time.
"""
return self.last_modified > basetime
def older_than(self, another_file):
"""
Checks if this file is older than the given file. Uses last_modified to
determine age.
"""
return self.last_modified < File(unicode(another_file)).last_modified
@staticmethod
def make_temp(text):
"""
Creates a temprorary file and writes the `text` into it
"""
import tempfile
(handle, path) = tempfile.mkstemp(text=True)
os.close(handle)
afile = File(path)
afile.write(text)
return afile
def read_all(self, encoding='utf-8'):
"""
Reads from the file and returns the content as a string.
"""
logger.info("Reading everything from %s" % self)
with codecs.open(self.path, 'r', encoding) as fin:
read_text = fin.read()
return read_text
def write(self, text, encoding="utf-8"):
"""
Writes the given text to the file using the given encoding.
"""
logger.info("Writing to %s" % self)
with codecs.open(self.path, 'w', encoding) as fout:
fout.write(text)
def copy_to(self, destination):
"""
Copies the file to the given destination. Returns a File
object that represents the target file. `destination` must
be a File or Folder object.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copy(self.path, unicode(destination))
return target
def delete(self):
"""
Delete the file if it exists.
"""
if self.exists:
os.remove(self.path)
@property
def etag(self):
"""
Generates etag from file contents.
"""
CHUNKSIZE = 1024 * 64
from hashlib import md5
hash = md5()
with open(self.path) as fin:
chunk = fin.read(CHUNKSIZE)
while chunk:
hash_update(hash, chunk)
chunk = fin.read(CHUNKSIZE)
return hash.hexdigest()
|
hyde/fswrap | fswrap.py | File.make_temp | python | def make_temp(text):
import tempfile
(handle, path) = tempfile.mkstemp(text=True)
os.close(handle)
afile = File(path)
afile.write(text)
return afile | Creates a temprorary file and writes the `text` into it | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L271-L280 | [
"def write(self, text, encoding=\"utf-8\"):\n \"\"\"\n Writes the given text to the file using the given encoding.\n \"\"\"\n logger.info(\"Writing to %s\" % self)\n with codecs.open(self.path, 'w', encoding) as fout:\n fout.write(text)\n"
] | class File(FS):
"""
The File object.
"""
def __init__(self, path):
super(File, self).__init__(path)
@property
def name_without_extension(self):
"""
Returns the name of the FS object without its extension
"""
return os.path.splitext(self.name)[0]
@property
def extension(self):
"""
File extension prefixed with a dot.
"""
return os.path.splitext(self.path)[1]
@property
def kind(self):
"""
File extension without dot prefix.
"""
return self.extension.lstrip(".")
@property
def size(self):
"""
Size of this file.
"""
if not self.exists:
return -1
return os.path.getsize(self.path)
@property
def mimetype(self):
"""
Gets the mimetype of this file.
"""
(mime, _) = mimetypes.guess_type(self.path)
return mime
@property
def is_binary(self):
"""Return true if this is a binary file."""
with open(self.path, 'rb') as fin:
CHUNKSIZE = 1024
while 1:
chunk = fin.read(CHUNKSIZE)
if b'\0' in chunk:
return True
if len(chunk) < CHUNKSIZE:
break
return False
@property
def is_text(self):
"""Return true if this is a text file."""
return (not self.is_binary)
@property
def is_image(self):
"""Return true if this is an image file."""
return self.mimetype.split("/")[0] == "image"
@property
def last_modified(self):
"""
Returns a datetime object representing the last modified time.
Calls os.path.getmtime.
"""
return datetime.fromtimestamp(os.path.getmtime(self.path))
def has_changed_since(self, basetime):
"""
Returns True if the file has been changed since the given time.
"""
return self.last_modified > basetime
def older_than(self, another_file):
"""
Checks if this file is older than the given file. Uses last_modified to
determine age.
"""
return self.last_modified < File(unicode(another_file)).last_modified
@staticmethod
def read_all(self, encoding='utf-8'):
"""
Reads from the file and returns the content as a string.
"""
logger.info("Reading everything from %s" % self)
with codecs.open(self.path, 'r', encoding) as fin:
read_text = fin.read()
return read_text
def write(self, text, encoding="utf-8"):
"""
Writes the given text to the file using the given encoding.
"""
logger.info("Writing to %s" % self)
with codecs.open(self.path, 'w', encoding) as fout:
fout.write(text)
def copy_to(self, destination):
"""
Copies the file to the given destination. Returns a File
object that represents the target file. `destination` must
be a File or Folder object.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copy(self.path, unicode(destination))
return target
def delete(self):
"""
Delete the file if it exists.
"""
if self.exists:
os.remove(self.path)
@property
def etag(self):
"""
Generates etag from file contents.
"""
CHUNKSIZE = 1024 * 64
from hashlib import md5
hash = md5()
with open(self.path) as fin:
chunk = fin.read(CHUNKSIZE)
while chunk:
hash_update(hash, chunk)
chunk = fin.read(CHUNKSIZE)
return hash.hexdigest()
|
hyde/fswrap | fswrap.py | File.read_all | python | def read_all(self, encoding='utf-8'):
logger.info("Reading everything from %s" % self)
with codecs.open(self.path, 'r', encoding) as fin:
read_text = fin.read()
return read_text | Reads from the file and returns the content as a string. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L282-L289 | null | class File(FS):
"""
The File object.
"""
def __init__(self, path):
super(File, self).__init__(path)
@property
def name_without_extension(self):
"""
Returns the name of the FS object without its extension
"""
return os.path.splitext(self.name)[0]
@property
def extension(self):
"""
File extension prefixed with a dot.
"""
return os.path.splitext(self.path)[1]
@property
def kind(self):
"""
File extension without dot prefix.
"""
return self.extension.lstrip(".")
@property
def size(self):
"""
Size of this file.
"""
if not self.exists:
return -1
return os.path.getsize(self.path)
@property
def mimetype(self):
"""
Gets the mimetype of this file.
"""
(mime, _) = mimetypes.guess_type(self.path)
return mime
@property
def is_binary(self):
"""Return true if this is a binary file."""
with open(self.path, 'rb') as fin:
CHUNKSIZE = 1024
while 1:
chunk = fin.read(CHUNKSIZE)
if b'\0' in chunk:
return True
if len(chunk) < CHUNKSIZE:
break
return False
@property
def is_text(self):
"""Return true if this is a text file."""
return (not self.is_binary)
@property
def is_image(self):
"""Return true if this is an image file."""
return self.mimetype.split("/")[0] == "image"
@property
def last_modified(self):
"""
Returns a datetime object representing the last modified time.
Calls os.path.getmtime.
"""
return datetime.fromtimestamp(os.path.getmtime(self.path))
def has_changed_since(self, basetime):
"""
Returns True if the file has been changed since the given time.
"""
return self.last_modified > basetime
def older_than(self, another_file):
"""
Checks if this file is older than the given file. Uses last_modified to
determine age.
"""
return self.last_modified < File(unicode(another_file)).last_modified
@staticmethod
def make_temp(text):
"""
Creates a temprorary file and writes the `text` into it
"""
import tempfile
(handle, path) = tempfile.mkstemp(text=True)
os.close(handle)
afile = File(path)
afile.write(text)
return afile
def write(self, text, encoding="utf-8"):
"""
Writes the given text to the file using the given encoding.
"""
logger.info("Writing to %s" % self)
with codecs.open(self.path, 'w', encoding) as fout:
fout.write(text)
def copy_to(self, destination):
"""
Copies the file to the given destination. Returns a File
object that represents the target file. `destination` must
be a File or Folder object.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copy(self.path, unicode(destination))
return target
def delete(self):
"""
Delete the file if it exists.
"""
if self.exists:
os.remove(self.path)
@property
def etag(self):
"""
Generates etag from file contents.
"""
CHUNKSIZE = 1024 * 64
from hashlib import md5
hash = md5()
with open(self.path) as fin:
chunk = fin.read(CHUNKSIZE)
while chunk:
hash_update(hash, chunk)
chunk = fin.read(CHUNKSIZE)
return hash.hexdigest()
|
hyde/fswrap | fswrap.py | File.write | python | def write(self, text, encoding="utf-8"):
logger.info("Writing to %s" % self)
with codecs.open(self.path, 'w', encoding) as fout:
fout.write(text) | Writes the given text to the file using the given encoding. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L291-L297 | null | class File(FS):
"""
The File object.
"""
def __init__(self, path):
super(File, self).__init__(path)
@property
def name_without_extension(self):
"""
Returns the name of the FS object without its extension
"""
return os.path.splitext(self.name)[0]
@property
def extension(self):
"""
File extension prefixed with a dot.
"""
return os.path.splitext(self.path)[1]
@property
def kind(self):
"""
File extension without dot prefix.
"""
return self.extension.lstrip(".")
@property
def size(self):
"""
Size of this file.
"""
if not self.exists:
return -1
return os.path.getsize(self.path)
@property
def mimetype(self):
"""
Gets the mimetype of this file.
"""
(mime, _) = mimetypes.guess_type(self.path)
return mime
@property
def is_binary(self):
"""Return true if this is a binary file."""
with open(self.path, 'rb') as fin:
CHUNKSIZE = 1024
while 1:
chunk = fin.read(CHUNKSIZE)
if b'\0' in chunk:
return True
if len(chunk) < CHUNKSIZE:
break
return False
@property
def is_text(self):
"""Return true if this is a text file."""
return (not self.is_binary)
@property
def is_image(self):
"""Return true if this is an image file."""
return self.mimetype.split("/")[0] == "image"
@property
def last_modified(self):
"""
Returns a datetime object representing the last modified time.
Calls os.path.getmtime.
"""
return datetime.fromtimestamp(os.path.getmtime(self.path))
def has_changed_since(self, basetime):
"""
Returns True if the file has been changed since the given time.
"""
return self.last_modified > basetime
def older_than(self, another_file):
"""
Checks if this file is older than the given file. Uses last_modified to
determine age.
"""
return self.last_modified < File(unicode(another_file)).last_modified
@staticmethod
def make_temp(text):
"""
Creates a temprorary file and writes the `text` into it
"""
import tempfile
(handle, path) = tempfile.mkstemp(text=True)
os.close(handle)
afile = File(path)
afile.write(text)
return afile
def read_all(self, encoding='utf-8'):
"""
Reads from the file and returns the content as a string.
"""
logger.info("Reading everything from %s" % self)
with codecs.open(self.path, 'r', encoding) as fin:
read_text = fin.read()
return read_text
def copy_to(self, destination):
"""
Copies the file to the given destination. Returns a File
object that represents the target file. `destination` must
be a File or Folder object.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copy(self.path, unicode(destination))
return target
def delete(self):
"""
Delete the file if it exists.
"""
if self.exists:
os.remove(self.path)
@property
def etag(self):
"""
Generates etag from file contents.
"""
CHUNKSIZE = 1024 * 64
from hashlib import md5
hash = md5()
with open(self.path) as fin:
chunk = fin.read(CHUNKSIZE)
while chunk:
hash_update(hash, chunk)
chunk = fin.read(CHUNKSIZE)
return hash.hexdigest()
|
hyde/fswrap | fswrap.py | File.copy_to | python | def copy_to(self, destination):
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copy(self.path, unicode(destination))
return target | Copies the file to the given destination. Returns a File
object that represents the target file. `destination` must
be a File or Folder object. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L299-L308 | [
"def __get_destination__(self, destination):\n \"\"\"\n Returns a File or Folder object that would represent this entity\n if it were copied or moved to `destination`.\n \"\"\"\n if isinstance(destination,\n File) or os.path.isfile(unicode(destination)):\n return destination\n... | class File(FS):
"""
The File object.
"""
def __init__(self, path):
super(File, self).__init__(path)
@property
def name_without_extension(self):
"""
Returns the name of the FS object without its extension
"""
return os.path.splitext(self.name)[0]
@property
def extension(self):
"""
File extension prefixed with a dot.
"""
return os.path.splitext(self.path)[1]
@property
def kind(self):
"""
File extension without dot prefix.
"""
return self.extension.lstrip(".")
@property
def size(self):
"""
Size of this file.
"""
if not self.exists:
return -1
return os.path.getsize(self.path)
@property
def mimetype(self):
"""
Gets the mimetype of this file.
"""
(mime, _) = mimetypes.guess_type(self.path)
return mime
@property
def is_binary(self):
"""Return true if this is a binary file."""
with open(self.path, 'rb') as fin:
CHUNKSIZE = 1024
while 1:
chunk = fin.read(CHUNKSIZE)
if b'\0' in chunk:
return True
if len(chunk) < CHUNKSIZE:
break
return False
@property
def is_text(self):
"""Return true if this is a text file."""
return (not self.is_binary)
@property
def is_image(self):
"""Return true if this is an image file."""
return self.mimetype.split("/")[0] == "image"
@property
def last_modified(self):
"""
Returns a datetime object representing the last modified time.
Calls os.path.getmtime.
"""
return datetime.fromtimestamp(os.path.getmtime(self.path))
def has_changed_since(self, basetime):
"""
Returns True if the file has been changed since the given time.
"""
return self.last_modified > basetime
def older_than(self, another_file):
"""
Checks if this file is older than the given file. Uses last_modified to
determine age.
"""
return self.last_modified < File(unicode(another_file)).last_modified
@staticmethod
def make_temp(text):
"""
Creates a temprorary file and writes the `text` into it
"""
import tempfile
(handle, path) = tempfile.mkstemp(text=True)
os.close(handle)
afile = File(path)
afile.write(text)
return afile
def read_all(self, encoding='utf-8'):
"""
Reads from the file and returns the content as a string.
"""
logger.info("Reading everything from %s" % self)
with codecs.open(self.path, 'r', encoding) as fin:
read_text = fin.read()
return read_text
def write(self, text, encoding="utf-8"):
"""
Writes the given text to the file using the given encoding.
"""
logger.info("Writing to %s" % self)
with codecs.open(self.path, 'w', encoding) as fout:
fout.write(text)
def delete(self):
"""
Delete the file if it exists.
"""
if self.exists:
os.remove(self.path)
@property
def etag(self):
"""
Generates etag from file contents.
"""
CHUNKSIZE = 1024 * 64
from hashlib import md5
hash = md5()
with open(self.path) as fin:
chunk = fin.read(CHUNKSIZE)
while chunk:
hash_update(hash, chunk)
chunk = fin.read(CHUNKSIZE)
return hash.hexdigest()
|
hyde/fswrap | fswrap.py | File.etag | python | def etag(self):
CHUNKSIZE = 1024 * 64
from hashlib import md5
hash = md5()
with open(self.path) as fin:
chunk = fin.read(CHUNKSIZE)
while chunk:
hash_update(hash, chunk)
chunk = fin.read(CHUNKSIZE)
return hash.hexdigest() | Generates etag from file contents. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L318-L330 | null | class File(FS):
"""
The File object.
"""
def __init__(self, path):
super(File, self).__init__(path)
@property
def name_without_extension(self):
"""
Returns the name of the FS object without its extension
"""
return os.path.splitext(self.name)[0]
@property
def extension(self):
"""
File extension prefixed with a dot.
"""
return os.path.splitext(self.path)[1]
@property
def kind(self):
"""
File extension without dot prefix.
"""
return self.extension.lstrip(".")
@property
def size(self):
"""
Size of this file.
"""
if not self.exists:
return -1
return os.path.getsize(self.path)
@property
def mimetype(self):
"""
Gets the mimetype of this file.
"""
(mime, _) = mimetypes.guess_type(self.path)
return mime
@property
def is_binary(self):
"""Return true if this is a binary file."""
with open(self.path, 'rb') as fin:
CHUNKSIZE = 1024
while 1:
chunk = fin.read(CHUNKSIZE)
if b'\0' in chunk:
return True
if len(chunk) < CHUNKSIZE:
break
return False
@property
def is_text(self):
"""Return true if this is a text file."""
return (not self.is_binary)
@property
def is_image(self):
"""Return true if this is an image file."""
return self.mimetype.split("/")[0] == "image"
@property
def last_modified(self):
"""
Returns a datetime object representing the last modified time.
Calls os.path.getmtime.
"""
return datetime.fromtimestamp(os.path.getmtime(self.path))
def has_changed_since(self, basetime):
"""
Returns True if the file has been changed since the given time.
"""
return self.last_modified > basetime
def older_than(self, another_file):
"""
Checks if this file is older than the given file. Uses last_modified to
determine age.
"""
return self.last_modified < File(unicode(another_file)).last_modified
@staticmethod
def make_temp(text):
"""
Creates a temprorary file and writes the `text` into it
"""
import tempfile
(handle, path) = tempfile.mkstemp(text=True)
os.close(handle)
afile = File(path)
afile.write(text)
return afile
def read_all(self, encoding='utf-8'):
"""
Reads from the file and returns the content as a string.
"""
logger.info("Reading everything from %s" % self)
with codecs.open(self.path, 'r', encoding) as fin:
read_text = fin.read()
return read_text
def write(self, text, encoding="utf-8"):
"""
Writes the given text to the file using the given encoding.
"""
logger.info("Writing to %s" % self)
with codecs.open(self.path, 'w', encoding) as fout:
fout.write(text)
def copy_to(self, destination):
"""
Copies the file to the given destination. Returns a File
object that represents the target file. `destination` must
be a File or Folder object.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copy(self.path, unicode(destination))
return target
def delete(self):
"""
Delete the file if it exists.
"""
if self.exists:
os.remove(self.path)
@property
|
hyde/fswrap | fswrap.py | FolderWalker.walk | python | def walk(self, walk_folders=False, walk_files=False):
if not walk_files and not walk_folders:
return
for root, _, a_files in os.walk(self.folder.path, followlinks=True):
folder = Folder(root)
if walk_folders:
yield folder
if walk_files:
for a_file in a_files:
if (not self.pattern or fnmatch.fnmatch(a_file,
self.pattern)):
yield File(folder.child(a_file)) | A simple generator that yields a File or Folder object based on
the arguments. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L379-L396 | [
"def child(self, fragment):\n \"\"\"\n Returns a path of a child item represented by `fragment`.\n \"\"\"\n return os.path.join(self.path, FS(fragment).path)\n"
] | class FolderWalker(FSVisitor):
"""
Walks the entire hirearchy of this directory starting with itself.
If a pattern is provided, only the files that match the pattern are
processed.
"""
def walk_all(self):
"""
Yield both Files and Folders as the tree is walked.
"""
return self.walk(walk_folders=True, walk_files=True)
def walk_files(self):
"""
Yield only Files.
"""
return self.walk(walk_folders=False, walk_files=True)
def walk_folders(self):
"""
Yield only Folders.
"""
return self.walk(walk_folders=True, walk_files=False)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Automatically walk the folder when the context manager is exited.
Calls self.visit_folder first and then calls self.visit_file for
any files found. After all files and folders have been exhausted
self.visit_complete is called.
If visitor.visit_folder returns False, the files in the folder are not
processed.
"""
def __visit_folder__(folder):
process_folder = True
if hasattr(self, 'visit_folder'):
process_folder = self.visit_folder(folder)
# If there is no return value assume true
#
if process_folder is None:
process_folder = True
return process_folder
def __visit_file__(a_file):
if hasattr(self, 'visit_file'):
self.visit_file(a_file)
def __visit_complete__():
if hasattr(self, 'visit_complete'):
self.visit_complete()
for root, dirs, a_files in os.walk(self.folder.path, followlinks=True):
folder = Folder(root)
if not __visit_folder__(folder):
dirs[:] = []
continue
for a_file in a_files:
if not self.pattern or fnmatch.fnmatch(a_file, self.pattern):
__visit_file__(File(folder.child(a_file)))
__visit_complete__()
|
hyde/fswrap | fswrap.py | FolderLister.list | python | def list(self, list_folders=False, list_files=False):
a_files = os.listdir(self.folder.path)
for a_file in a_files:
path = self.folder.child(a_file)
if os.path.isdir(path):
if list_folders:
yield Folder(path)
elif list_files:
if not self.pattern or fnmatch.fnmatch(a_file, self.pattern):
yield File(path) | A simple generator that yields a File or Folder object based on
the arguments. | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L466-L480 | null | class FolderLister(FSVisitor):
"""
Lists the contents of this directory.
If a pattern is provided, only the files that match the pattern are
processed.
"""
def list_all(self):
"""
Yield both Files and Folders as the folder is listed.
"""
return self.list(list_folders=True, list_files=True)
def list_files(self):
"""
Yield only Files.
"""
return self.list(list_folders=False, list_files=True)
def list_folders(self):
"""
Yield only Folders.
"""
return self.list(list_folders=True, list_files=False)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Automatically list the folder contents when the context manager
is exited.
Calls self.visit_folder first and then calls self.visit_file for
any files found. After all files and folders have been exhausted
self.visit_complete is called.
"""
a_files = os.listdir(self.folder.path)
for a_file in a_files:
path = self.folder.child(a_file)
if os.path.isdir(path) and hasattr(self, 'visit_folder'):
self.visit_folder(Folder(path))
elif hasattr(self, 'visit_file'):
if not self.pattern or fnmatch.fnmatch(a_file, self.pattern):
self.visit_file(File(path))
if hasattr(self, 'visit_complete'):
self.visit_complete()
|
hyde/fswrap | fswrap.py | Folder.child_folder | python | def child_folder(self, fragment):
return Folder(os.path.join(self.path, Folder(fragment).path)) | Returns a folder object by combining the fragment to this folder's path | train | https://github.com/hyde/fswrap/blob/41e4ad6f7e9ba73eabe61bd97847cd284e3edbd2/fswrap.py#L531-L535 | null | class Folder(FS):
"""
Represents a directory.
"""
def __init__(self, path):
super(Folder, self).__init__(path)
def child_file(self, fragment):
"""
Returns a `File` object representing the `fragment`.
"""
return File(self.child(fragment))
def child(self, fragment):
"""
Returns a path of a child item represented by `fragment`.
"""
return os.path.join(self.path, FS(fragment).path)
def make(self):
"""
Creates this directory and any of the missing directories in the path.
Any errors that may occur are eaten.
"""
try:
if not self.exists:
logger.info("Creating %s" % self.path)
os.makedirs(self.path)
except os.error:
pass
return self
def zip(self, target=None, basepath=None):
"""
Zips the contents of this folder. If `target` is not provided,
<name>.zip is used instead. `basepath` is used to specify the
base path for files in the archive. The path stored along with
the files in the archive will be relative to the `basepath`.
"""
target = self.parent.child(target or self.name + '.zip')
basepath = basepath or self.path
from zipfile import ZipFile
with ZipFile(target, 'w') as zip:
with self.walker as walker:
@walker.file_visitor
def add_file(f):
zip.write(f.path, f.get_relative_path(basepath))
def delete(self):
"""
Deletes the directory if it exists.
"""
if self.exists:
logger.info("Deleting %s" % self.path)
shutil.rmtree(self.path)
def copy_to(self, destination):
"""
Copies this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copytree(self.path, unicode(target))
return target
def move_to(self, destination):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Move %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def rename_to(self, destination_name):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.parent.child_folder(destination_name)
logger.info("Rename %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def _create_target_tree(self, target):
"""
There is a bug in dir_util that makes `copy_tree` crash if a folder in
the tree has been deleted before and readded now. To workaround the
bug, we first walk the tree and create directories that are needed.
"""
source = self
with source.walker as walker:
@walker.folder_visitor
def visit_folder(folder):
"""
Create the mirror directory
"""
if folder != source:
Folder(folder.get_mirror(target, source)).make()
def copy_contents_to(self, destination):
"""
Copies the contents of this directory to the given destination.
Returns a Folder object that represents the moved directory.
"""
logger.info("Copying contents of %s to %s" % (self, destination))
target = Folder(destination)
target.make()
self._create_target_tree(target)
dir_util.copy_tree(self.path, unicode(target))
return target
def get_walker(self, pattern=None):
"""
Return a `FolderWalker` object with a set pattern.
"""
return FolderWalker(self, pattern)
@property
def walker(self):
"""
Return a `FolderWalker` object
"""
return FolderWalker(self)
def get_lister(self, pattern=None):
"""
Return a `FolderLister` object with a set pattern.
"""
return FolderLister(self, pattern)
@property
def lister(self):
"""
Return a `FolderLister` object
"""
return FolderLister(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.