repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
JonathanRaiman/pytreebank
|
pytreebank/treelstm.py
|
assign_texts
|
python
|
def assign_texts(node, words, next_idx=0):
if len(node.children) == 0:
node.text = words[next_idx]
return next_idx + 1
else:
for child in node.children:
next_idx = assign_texts(child, words, next_idx)
return next_idx
|
Recursively assign the words to nodes by finding and
assigning strings to the leaves of a tree in left
to right order.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/treelstm.py#L44-L56
|
[
"def assign_texts(node, words, next_idx=0):\n \"\"\"\n Recursively assign the words to nodes by finding and\n assigning strings to the leaves of a tree in left\n to right order.\n \"\"\"\n if len(node.children) == 0:\n node.text = words[next_idx]\n return next_idx + 1\n else:\n for child in node.children:\n next_idx = assign_texts(child, words, next_idx)\n return next_idx\n"
] |
"""
Special loading methods for importing dataset as processed
by the TreeLSTM code from https://github.com/stanfordnlp/treelstm
"""
from .labeled_trees import LabeledTree
import codecs
def import_tree_corpus(labels_path, parents_path, texts_path):
"""
Import dataset from the TreeLSTM data generation scrips.
Arguments:
----------
labels_path : str, where are labels are stored (should be in
data/sst/labels.txt).
parents_path : str, where the parent relationships are stored
(should be in data/sst/parents.txt).
texts_path : str, where are strings for each tree are stored
(should be in data/sst/sents.txt).
Returns:
--------
list<LabeledTree> : loaded example trees.
"""
with codecs.open(labels_path, "r", "UTF-8") as f:
label_lines = f.readlines()
with codecs.open(parents_path, "r", "UTF-8") as f:
parent_lines = f.readlines()
with codecs.open(texts_path, "r", "UTF-8") as f:
word_lines = f.readlines()
assert len(label_lines) == len(parent_lines)
assert len(label_lines) == len(word_lines)
trees = []
for labels, parents, words in zip(label_lines, parent_lines, word_lines):
labels = [int(l) + 2 for l in labels.strip().split(" ")]
parents = [int(l) for l in parents.strip().split(" ")]
words = words.strip().split(" ")
assert len(labels) == len(parents)
trees.append(read_tree(parents, labels, words))
return trees
def read_tree(parents, labels, words):
"""
Take as input a list of integers for parents
and labels, along with a list of words, and
reconstruct a LabeledTree.
"""
trees = {}
root = None
for i in range(1, len(parents) + 1):
if not i in trees and parents[i - 1] != - 1:
idx = i
prev = None
while True:
parent = parents[idx - 1]
if parent == -1:
break
tree = LabeledTree()
if prev is not None:
tree.add_child(prev)
trees[idx] = tree
tree.label = labels[idx - 1]
if trees.get(parent) is not None:
trees[parent].add_child(tree)
break
elif parent == 0:
root = tree
break
else:
prev = tree
idx = parent
assert assign_texts(root, words) == len(words)
return root
|
JonathanRaiman/pytreebank
|
pytreebank/treelstm.py
|
read_tree
|
python
|
def read_tree(parents, labels, words):
trees = {}
root = None
for i in range(1, len(parents) + 1):
if not i in trees and parents[i - 1] != - 1:
idx = i
prev = None
while True:
parent = parents[idx - 1]
if parent == -1:
break
tree = LabeledTree()
if prev is not None:
tree.add_child(prev)
trees[idx] = tree
tree.label = labels[idx - 1]
if trees.get(parent) is not None:
trees[parent].add_child(tree)
break
elif parent == 0:
root = tree
break
else:
prev = tree
idx = parent
assert assign_texts(root, words) == len(words)
return root
|
Take as input a list of integers for parents
and labels, along with a list of words, and
reconstruct a LabeledTree.
|
train
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/treelstm.py#L58-L89
|
[
"def assign_texts(node, words, next_idx=0):\n \"\"\"\n Recursively assign the words to nodes by finding and\n assigning strings to the leaves of a tree in left\n to right order.\n \"\"\"\n if len(node.children) == 0:\n node.text = words[next_idx]\n return next_idx + 1\n else:\n for child in node.children:\n next_idx = assign_texts(child, words, next_idx)\n return next_idx\n"
] |
"""
Special loading methods for importing dataset as processed
by the TreeLSTM code from https://github.com/stanfordnlp/treelstm
"""
from .labeled_trees import LabeledTree
import codecs
def import_tree_corpus(labels_path, parents_path, texts_path):
"""
Import dataset from the TreeLSTM data generation scrips.
Arguments:
----------
labels_path : str, where are labels are stored (should be in
data/sst/labels.txt).
parents_path : str, where the parent relationships are stored
(should be in data/sst/parents.txt).
texts_path : str, where are strings for each tree are stored
(should be in data/sst/sents.txt).
Returns:
--------
list<LabeledTree> : loaded example trees.
"""
with codecs.open(labels_path, "r", "UTF-8") as f:
label_lines = f.readlines()
with codecs.open(parents_path, "r", "UTF-8") as f:
parent_lines = f.readlines()
with codecs.open(texts_path, "r", "UTF-8") as f:
word_lines = f.readlines()
assert len(label_lines) == len(parent_lines)
assert len(label_lines) == len(word_lines)
trees = []
for labels, parents, words in zip(label_lines, parent_lines, word_lines):
labels = [int(l) + 2 for l in labels.strip().split(" ")]
parents = [int(l) for l in parents.strip().split(" ")]
words = words.strip().split(" ")
assert len(labels) == len(parents)
trees.append(read_tree(parents, labels, words))
return trees
def assign_texts(node, words, next_idx=0):
"""
Recursively assign the words to nodes by finding and
assigning strings to the leaves of a tree in left
to right order.
"""
if len(node.children) == 0:
node.text = words[next_idx]
return next_idx + 1
else:
for child in node.children:
next_idx = assign_texts(child, words, next_idx)
return next_idx
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/schema.py
|
Schema.check_relations
|
python
|
def check_relations(self, relations):
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
|
Recursive function which checks if a relation is valid.
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/schema.py#L102-L120
| null |
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
'Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified',
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data['meta'] = self.document_meta
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data',
},
},
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_RESOURCE_META_LOAD_FROM] = item['meta']
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get('data', [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data),
None,
)
else:
for data in inner_data:
included_data.extend(
self._extract_from_included(data),
)
if included_data:
value['data'] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
if not is_collection(data):
raise ma.ValidationError([{
'detail': '`data` expected to be a collection.',
'source': {
'pointer': '/data',
},
}])
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
"""
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
def _do_load(self, data, many=None, **kwargs):
"""Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
"""
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get('included', {})
self.document_meta = data.get('meta', {})
try:
result = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id']))
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
pointer = ['/data']
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship,
)
if relationship:
pointer.append('relationships')
elif field_name != 'id':
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append('attributes')
pointer.append(self.inflect(field_name))
if relationship:
pointer.append('data')
return {
'detail': message,
'source': {
'pointer': '/'.join(pointer),
},
}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/schema.py
|
Schema.format_json_api_response
|
python
|
def format_json_api_response(self, data, many):
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
|
Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/schema.py#L123-L132
| null |
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
'Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified',
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
def check_relations(self, relations):
"""Recursive function which checks if a relation is valid."""
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
@ma.post_dump(pass_many=True)
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data['meta'] = self.document_meta
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data',
},
},
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_RESOURCE_META_LOAD_FROM] = item['meta']
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get('data', [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data),
None,
)
else:
for data in inner_data:
included_data.extend(
self._extract_from_included(data),
)
if included_data:
value['data'] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
if not is_collection(data):
raise ma.ValidationError([{
'detail': '`data` expected to be a collection.',
'source': {
'pointer': '/data',
},
}])
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
"""
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
def _do_load(self, data, many=None, **kwargs):
"""Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
"""
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get('included', {})
self.document_meta = data.get('meta', {})
try:
result = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id']))
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
pointer = ['/data']
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship,
)
if relationship:
pointer.append('relationships')
elif field_name != 'id':
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append('attributes')
pointer.append(self.inflect(field_name))
if relationship:
pointer.append('data')
return {
'detail': message,
'source': {
'pointer': '/'.join(pointer),
},
}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/schema.py
|
Schema.on_bind_field
|
python
|
def on_bind_field(self, field_name, field_obj):
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
|
Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/schema.py#L217-L227
| null |
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
'Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified',
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
def check_relations(self, relations):
"""Recursive function which checks if a relation is valid."""
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data['meta'] = self.document_meta
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data',
},
},
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_RESOURCE_META_LOAD_FROM] = item['meta']
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get('data', [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data),
None,
)
else:
for data in inner_data:
included_data.extend(
self._extract_from_included(data),
)
if included_data:
value['data'] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
if not is_collection(data):
raise ma.ValidationError([{
'detail': '`data` expected to be a collection.',
'source': {
'pointer': '/data',
},
}])
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def _do_load(self, data, many=None, **kwargs):
"""Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
"""
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get('included', {})
self.document_meta = data.get('meta', {})
try:
result = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id']))
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
pointer = ['/data']
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship,
)
if relationship:
pointer.append('relationships')
elif field_name != 'id':
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append('attributes')
pointer.append(self.inflect(field_name))
if relationship:
pointer.append('data')
return {
'detail': message,
'source': {
'pointer': '/'.join(pointer),
},
}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/schema.py
|
Schema._do_load
|
python
|
def _do_load(self, data, many=None, **kwargs):
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get('included', {})
self.document_meta = data.get('meta', {})
try:
result = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
|
Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/schema.py#L229-L260
| null |
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
'Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified',
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
def check_relations(self, relations):
"""Recursive function which checks if a relation is valid."""
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data['meta'] = self.document_meta
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data',
},
},
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_RESOURCE_META_LOAD_FROM] = item['meta']
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get('data', [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data),
None,
)
else:
for data in inner_data:
included_data.extend(
self._extract_from_included(data),
)
if included_data:
value['data'] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
if not is_collection(data):
raise ma.ValidationError([{
'detail': '`data` expected to be a collection.',
'source': {
'pointer': '/data',
},
}])
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
"""
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id']))
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
pointer = ['/data']
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship,
)
if relationship:
pointer.append('relationships')
elif field_name != 'id':
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append('attributes')
pointer.append(self.inflect(field_name))
if relationship:
pointer.append('data')
return {
'detail': message,
'source': {
'pointer': '/'.join(pointer),
},
}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/schema.py
|
Schema._extract_from_included
|
python
|
def _extract_from_included(self, data):
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id']))
|
Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/schema.py#L262-L270
| null |
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
'Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified',
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
def check_relations(self, relations):
"""Recursive function which checks if a relation is valid."""
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data['meta'] = self.document_meta
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data',
},
},
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_RESOURCE_META_LOAD_FROM] = item['meta']
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get('data', [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data),
None,
)
else:
for data in inner_data:
included_data.extend(
self._extract_from_included(data),
)
if included_data:
value['data'] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
if not is_collection(data):
raise ma.ValidationError([{
'detail': '`data` expected to be a collection.',
'source': {
'pointer': '/data',
},
}])
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
"""
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
def _do_load(self, data, many=None, **kwargs):
"""Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
"""
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get('included', {})
self.document_meta = data.get('meta', {})
try:
result = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
pointer = ['/data']
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship,
)
if relationship:
pointer.append('relationships')
elif field_name != 'id':
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append('attributes')
pointer.append(self.inflect(field_name))
if relationship:
pointer.append('data')
return {
'detail': message,
'source': {
'pointer': '/'.join(pointer),
},
}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/schema.py
|
Schema.inflect
|
python
|
def inflect(self, text):
return self.opts.inflect(text) if self.opts.inflect else text
|
Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/schema.py#L272-L276
| null |
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
'Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified',
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
def check_relations(self, relations):
"""Recursive function which checks if a relation is valid."""
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data['meta'] = self.document_meta
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data',
},
},
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_RESOURCE_META_LOAD_FROM] = item['meta']
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get('data', [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data),
None,
)
else:
for data in inner_data:
included_data.extend(
self._extract_from_included(data),
)
if included_data:
value['data'] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
if not is_collection(data):
raise ma.ValidationError([{
'detail': '`data` expected to be a collection.',
'source': {
'pointer': '/data',
},
}])
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
"""
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
def _do_load(self, data, many=None, **kwargs):
"""Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
"""
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get('included', {})
self.document_meta = data.get('meta', {})
try:
result = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id']))
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
pointer = ['/data']
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship,
)
if relationship:
pointer.append('relationships')
elif field_name != 'id':
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append('attributes')
pointer.append(self.inflect(field_name))
if relationship:
pointer.append('data')
return {
'detail': message,
'source': {
'pointer': '/'.join(pointer),
},
}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/schema.py
|
Schema.format_errors
|
python
|
def format_errors(self, errors, many):
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
|
Format validation errors as JSON Error objects.
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/schema.py#L280-L301
| null |
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
'Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified',
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
def check_relations(self, relations):
"""Recursive function which checks if a relation is valid."""
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data['meta'] = self.document_meta
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data',
},
},
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_RESOURCE_META_LOAD_FROM] = item['meta']
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get('data', [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data),
None,
)
else:
for data in inner_data:
included_data.extend(
self._extract_from_included(data),
)
if included_data:
value['data'] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
if not is_collection(data):
raise ma.ValidationError([{
'detail': '`data` expected to be a collection.',
'source': {
'pointer': '/data',
},
}])
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
"""
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
def _do_load(self, data, many=None, **kwargs):
"""Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
"""
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get('included', {})
self.document_meta = data.get('meta', {})
try:
result = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id']))
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
pointer = ['/data']
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship,
)
if relationship:
pointer.append('relationships')
elif field_name != 'id':
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append('attributes')
pointer.append(self.inflect(field_name))
if relationship:
pointer.append('data')
return {
'detail': message,
'source': {
'pointer': '/'.join(pointer),
},
}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/schema.py
|
Schema.format_error
|
python
|
def format_error(self, field_name, message, index=None):
pointer = ['/data']
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship,
)
if relationship:
pointer.append('relationships')
elif field_name != 'id':
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append('attributes')
pointer.append(self.inflect(field_name))
if relationship:
pointer.append('data')
return {
'detail': message,
'source': {
'pointer': '/'.join(pointer),
},
}
|
Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/schema.py#L303-L332
| null |
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
'Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified',
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
def check_relations(self, relations):
"""Recursive function which checks if a relation is valid."""
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data['meta'] = self.document_meta
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data',
},
},
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_RESOURCE_META_LOAD_FROM] = item['meta']
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get('data', [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data),
None,
)
else:
for data in inner_data:
included_data.extend(
self._extract_from_included(data),
)
if included_data:
value['data'] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
if not is_collection(data):
raise ma.ValidationError([{
'detail': '`data` expected to be a collection.',
'source': {
'pointer': '/data',
},
}])
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
"""
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
def _do_load(self, data, many=None, **kwargs):
"""Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
"""
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get('included', {})
self.document_meta = data.get('meta', {})
try:
result = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id']))
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/schema.py
|
Schema.format_item
|
python
|
def format_item(self, item):
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
|
Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/schema.py#L334-L379
| null |
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
'Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified',
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
def check_relations(self, relations):
"""Recursive function which checks if a relation is valid."""
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data['meta'] = self.document_meta
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data',
},
},
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_RESOURCE_META_LOAD_FROM] = item['meta']
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get('data', [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data),
None,
)
else:
for data in inner_data:
included_data.extend(
self._extract_from_included(data),
)
if included_data:
value['data'] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
if not is_collection(data):
raise ma.ValidationError([{
'detail': '`data` expected to be a collection.',
'source': {
'pointer': '/data',
},
}])
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
"""
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
def _do_load(self, data, many=None, **kwargs):
"""Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
"""
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get('included', {})
self.document_meta = data.get('meta', {})
try:
result = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id']))
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
pointer = ['/data']
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship,
)
if relationship:
pointer.append('relationships')
elif field_name != 'id':
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append('attributes')
pointer.append(self.inflect(field_name))
if relationship:
pointer.append('data')
return {
'detail': message,
'source': {
'pointer': '/'.join(pointer),
},
}
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/schema.py
|
Schema.format_items
|
python
|
def format_items(self, data, many):
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
|
Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/schema.py#L381-L389
| null |
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
'Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified',
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
def check_relations(self, relations):
"""Recursive function which checks if a relation is valid."""
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data['meta'] = self.document_meta
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data',
},
},
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_RESOURCE_META_LOAD_FROM] = item['meta']
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get('data', [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data),
None,
)
else:
for data in inner_data:
included_data.extend(
self._extract_from_included(data),
)
if included_data:
value['data'] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
if not is_collection(data):
raise ma.ValidationError([{
'detail': '`data` expected to be a collection.',
'source': {
'pointer': '/data',
},
}])
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
"""
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
def _do_load(self, data, many=None, **kwargs):
"""Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
"""
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get('included', {})
self.document_meta = data.get('meta', {})
try:
result = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id']))
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
pointer = ['/data']
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship,
)
if relationship:
pointer.append('relationships')
elif field_name != 'id':
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append('attributes')
pointer.append(self.inflect(field_name))
if relationship:
pointer.append('data')
return {
'detail': message,
'source': {
'pointer': '/'.join(pointer),
},
}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/schema.py
|
Schema.get_top_level_links
|
python
|
def get_top_level_links(self, data, many):
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
|
Hook for adding links to the root of the response data.
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/schema.py#L391-L402
| null |
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
'Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified',
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
def check_relations(self, relations):
"""Recursive function which checks if a relation is valid."""
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data['meta'] = self.document_meta
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data',
},
},
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_RESOURCE_META_LOAD_FROM] = item['meta']
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get('data', [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data),
None,
)
else:
for data in inner_data:
included_data.extend(
self._extract_from_included(data),
)
if included_data:
value['data'] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
if not is_collection(data):
raise ma.ValidationError([{
'detail': '`data` expected to be a collection.',
'source': {
'pointer': '/data',
},
}])
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
"""
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
def _do_load(self, data, many=None, **kwargs):
"""Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
"""
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get('included', {})
self.document_meta = data.get('meta', {})
try:
result = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id']))
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
pointer = ['/data']
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship,
)
if relationship:
pointer.append('relationships')
elif field_name != 'id':
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append('attributes')
pointer.append(self.inflect(field_name))
if relationship:
pointer.append('data')
return {
'detail': message,
'source': {
'pointer': '/'.join(pointer),
},
}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/schema.py
|
Schema.get_resource_links
|
python
|
def get_resource_links(self, item):
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
|
Hook for adding links to a resource object.
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/schema.py#L404-L411
|
[
"def resolve_params(obj, params, default=missing):\n \"\"\"Given a dictionary of keyword arguments, return the same dictionary except with\n values enclosed in `< >` resolved to attributes on `obj`.\n \"\"\"\n param_values = {}\n for name, attr_tpl in iteritems(params):\n attr_name = tpl(str(attr_tpl))\n if attr_name:\n attribute_value = get_value(obj, attr_name, default=default)\n if attribute_value is not missing:\n param_values[name] = attribute_value\n else:\n raise AttributeError(\n '{attr_name!r} is not a valid '\n 'attribute of {obj!r}'.format(attr_name=attr_name, obj=obj),\n )\n else:\n param_values[name] = attr_tpl\n return param_values\n"
] |
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
'Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified',
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
def check_relations(self, relations):
"""Recursive function which checks if a relation is valid."""
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data['meta'] = self.document_meta
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data',
},
},
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_RESOURCE_META_LOAD_FROM] = item['meta']
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get('data', [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data),
None,
)
else:
for data in inner_data:
included_data.extend(
self._extract_from_included(data),
)
if included_data:
value['data'] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
if not is_collection(data):
raise ma.ValidationError([{
'detail': '`data` expected to be a collection.',
'source': {
'pointer': '/data',
},
}])
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
"""
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
def _do_load(self, data, many=None, **kwargs):
"""Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
"""
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get('included', {})
self.document_meta = data.get('meta', {})
try:
result = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id']))
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
pointer = ['/data']
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship,
)
if relationship:
pointer.append('relationships')
elif field_name != 'id':
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append('attributes')
pointer.append(self.inflect(field_name))
if relationship:
pointer.append('data')
return {
'detail': message,
'source': {
'pointer': '/'.join(pointer),
},
}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/schema.py
|
Schema.wrap_response
|
python
|
def wrap_response(self, data, many):
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
|
Wrap data and links according to the JSON API
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/schema.py#L413-L422
| null |
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
'Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified',
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
def check_relations(self, relations):
"""Recursive function which checks if a relation is valid."""
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data['meta'] = self.document_meta
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data',
},
},
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_RESOURCE_META_LOAD_FROM] = item['meta']
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get('data', [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data),
None,
)
else:
for data in inner_data:
included_data.extend(
self._extract_from_included(data),
)
if included_data:
value['data'] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
if not is_collection(data):
raise ma.ValidationError([{
'detail': '`data` expected to be a collection.',
'source': {
'pointer': '/data',
},
}])
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
"""
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
def _do_load(self, data, many=None, **kwargs):
"""Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
"""
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get('included', {})
self.document_meta = data.get('meta', {})
try:
result = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (item for item in self.included_data
if item['type'] == data['type'] and
str(item['id']) == str(data['id']))
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
pointer = ['/data']
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship,
)
if relationship:
pointer.append('relationships')
elif field_name != 'id':
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append('attributes')
pointer.append(self.inflect(field_name))
if relationship:
pointer.append('data')
return {
'detail': message,
'source': {
'pointer': '/'.join(pointer),
},
}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/fields.py
|
Relationship.extract_value
|
python
|
def extract_value(self, data):
errors = []
if 'id' not in data:
errors.append('Must have an `id` field')
if 'type' not in data:
errors.append('Must have a `type` field')
elif data['type'] != self.type_:
errors.append('Invalid `type` specified')
if errors:
raise ValidationError(errors)
# If ``attributes`` is set, we've folded included data into this
# relationship. Unserialize it if we have a schema set; otherwise we
# fall back below to old behaviour of only IDs.
if 'attributes' in data and self.__schema:
result = self.schema.load({'data': data, 'included': self.root.included_data})
return result.data if _MARSHMALLOW_VERSION_INFO[0] < 3 else result
id_value = data.get('id')
if self.__schema:
id_value = self.schema.fields['id'].deserialize(id_value)
return id_value
|
Extract the id key and validate the request structure.
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/fields.py#L183-L208
| null |
class Relationship(BaseRelationship):
"""Framework-independent field which serializes to a "relationship object".
See: http://jsonapi.org/format/#document-resource-object-relationships
Examples: ::
author = Relationship(
related_url='/authors/{author_id}',
related_url_kwargs={'author_id': '<author.id>'},
)
comments = Relationship(
related_url='/posts/{post_id}/comments/',
related_url_kwargs={'post_id': '<id>'},
many=True, include_resource_linkage=True,
type_='comments'
)
This field is read-only by default.
:param str related_url: Format string for related resource links.
:param dict related_url_kwargs: Replacement fields for `related_url`. String arguments
enclosed in `< >` will be interpreted as attributes to pull from the target object.
:param str self_url: Format string for self relationship links.
:param dict self_url_kwargs: Replacement fields for `self_url`. String arguments
enclosed in `< >` will be interpreted as attributes to pull from the target object.
:param bool include_resource_linkage: Whether to include a resource linkage
(http://jsonapi.org/format/#document-resource-object-linkage) in the serialized result.
:param marshmallow_jsonapi.Schema schema: The schema to render the included data with.
:param bool many: Whether the relationship represents a many-to-one or many-to-many
relationship. Only affects serialization of the resource linkage.
:param str type_: The type of resource.
:param str id_field: Attribute name to pull ids from if a resource linkage is included.
"""
default_id_field = 'id'
def __init__(
self,
related_url='', related_url_kwargs=None,
self_url='', self_url_kwargs=None,
include_resource_linkage=False, schema=None,
many=False, type_=None, id_field=None, **kwargs
):
self.related_url = related_url
self.related_url_kwargs = related_url_kwargs or {}
self.self_url = self_url
self.self_url_kwargs = self_url_kwargs or {}
if include_resource_linkage and not type_:
raise ValueError('include_resource_linkage=True requires the type_ argument.')
self.many = many
self.include_resource_linkage = include_resource_linkage
self.include_data = False
self.type_ = type_
self.__id_field = id_field
self.__schema = schema
super(Relationship, self).__init__(**kwargs)
@property
def id_field(self):
if self.__id_field:
return self.__id_field
if self.__schema:
field = self.schema.fields['id']
return field.attribute or self.default_id_field
else:
return self.default_id_field
@property
def schema(self):
only = getattr(self, 'only', None)
exclude = getattr(self, 'exclude', ())
context = getattr(self, 'context', {})
if isinstance(self.__schema, SchemaABC):
return self.__schema
if isinstance(self.__schema, type) and issubclass(self.__schema, SchemaABC):
self.__schema = self.__schema(only=only, exclude=exclude, context=context)
return self.__schema
if isinstance(self.__schema, basestring):
if self.__schema == _RECURSIVE_NESTED:
parent_class = self.parent.__class__
self.__schema = parent_class(
only=only, exclude=exclude, context=context,
include_data=self.parent.include_data,
)
else:
schema_class = class_registry.get_class(self.__schema)
self.__schema = schema_class(
only=only, exclude=exclude,
context=context,
)
return self.__schema
else:
raise ValueError((
'A Schema is required to serialize a nested '
'relationship with include_data'
))
def get_related_url(self, obj):
if self.related_url:
params = resolve_params(obj, self.related_url_kwargs, default=self.default)
non_null_params = {
key: value for key, value in params.items()
if value is not None
}
if non_null_params:
return self.related_url.format(**non_null_params)
return None
def get_self_url(self, obj):
if self.self_url:
params = resolve_params(obj, self.self_url_kwargs, default=self.default)
non_null_params = {
key: value for key, value in params.items()
if value is not None
}
if non_null_params:
return self.self_url.format(**non_null_params)
return None
def get_resource_linkage(self, value):
if self.many:
resource_object = [
{
'type': self.type_,
'id': _stringify(self._get_id(each)),
} for each in value
]
else:
resource_object = {
'type': self.type_,
'id': _stringify(self._get_id(value)),
}
return resource_object
def deserialize(self, value, attr=None, data=None):
"""Deserialize ``value``.
:raise ValidationError: If the value is not type `dict`, if the
value does not contain a `data` key, and if the value is
required but unspecified.
"""
if value is missing_:
return super(Relationship, self).deserialize(value, attr, data)
if not isinstance(value, dict) or 'data' not in value:
# a relationships object does not need 'data' if 'links' is present
if value and 'links' in value:
return missing_
else:
raise ValidationError('Must include a `data` key')
return super(Relationship, self).deserialize(value['data'], attr, data)
def _deserialize(self, value, attr, obj):
if self.many:
if not is_collection(value):
raise ValidationError('Relationship is list-like')
return [self.extract_value(item) for item in value]
if is_collection(value):
raise ValidationError('Relationship is not list-like')
return self.extract_value(value)
def _serialize(self, value, attr, obj):
dict_class = self.parent.dict_class if self.parent else dict
ret = dict_class()
self_url = self.get_self_url(obj)
related_url = self.get_related_url(obj)
if self_url or related_url:
ret['links'] = dict_class()
if self_url:
ret['links']['self'] = self_url
if related_url:
ret['links']['related'] = related_url
# resource linkage is required when including the data
if self.include_resource_linkage or self.include_data:
if value is None:
ret['data'] = [] if self.many else None
else:
ret['data'] = self.get_resource_linkage(value)
if self.include_data and value is not None:
if self.many:
for item in value:
self._serialize_included(item)
else:
self._serialize_included(value)
return ret
def _serialize_included(self, value):
result = self.schema.dump(value)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data = result.data
else:
data = result
item = data['data']
self.root.included_data[(item['type'], item['id'])] = item
for key, value in iteritems(self.schema.included_data):
self.root.included_data[key] = value
def _get_id(self, value):
if _MARSHMALLOW_VERSION_INFO[0] >= 3:
if self.__schema:
return self.schema.get_attribute(value, self.id_field, value)
else:
return get_value(value, self.id_field, value)
else:
if self.__schema:
return self.schema.get_attribute(self.id_field, value, value)
else:
return get_value(value, self.id_field, value)
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/fields.py
|
Relationship.deserialize
|
python
|
def deserialize(self, value, attr=None, data=None):
if value is missing_:
return super(Relationship, self).deserialize(value, attr, data)
if not isinstance(value, dict) or 'data' not in value:
# a relationships object does not need 'data' if 'links' is present
if value and 'links' in value:
return missing_
else:
raise ValidationError('Must include a `data` key')
return super(Relationship, self).deserialize(value['data'], attr, data)
|
Deserialize ``value``.
:raise ValidationError: If the value is not type `dict`, if the
value does not contain a `data` key, and if the value is
required but unspecified.
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/fields.py#L210-L225
| null |
class Relationship(BaseRelationship):
"""Framework-independent field which serializes to a "relationship object".
See: http://jsonapi.org/format/#document-resource-object-relationships
Examples: ::
author = Relationship(
related_url='/authors/{author_id}',
related_url_kwargs={'author_id': '<author.id>'},
)
comments = Relationship(
related_url='/posts/{post_id}/comments/',
related_url_kwargs={'post_id': '<id>'},
many=True, include_resource_linkage=True,
type_='comments'
)
This field is read-only by default.
:param str related_url: Format string for related resource links.
:param dict related_url_kwargs: Replacement fields for `related_url`. String arguments
enclosed in `< >` will be interpreted as attributes to pull from the target object.
:param str self_url: Format string for self relationship links.
:param dict self_url_kwargs: Replacement fields for `self_url`. String arguments
enclosed in `< >` will be interpreted as attributes to pull from the target object.
:param bool include_resource_linkage: Whether to include a resource linkage
(http://jsonapi.org/format/#document-resource-object-linkage) in the serialized result.
:param marshmallow_jsonapi.Schema schema: The schema to render the included data with.
:param bool many: Whether the relationship represents a many-to-one or many-to-many
relationship. Only affects serialization of the resource linkage.
:param str type_: The type of resource.
:param str id_field: Attribute name to pull ids from if a resource linkage is included.
"""
default_id_field = 'id'
def __init__(
self,
related_url='', related_url_kwargs=None,
self_url='', self_url_kwargs=None,
include_resource_linkage=False, schema=None,
many=False, type_=None, id_field=None, **kwargs
):
self.related_url = related_url
self.related_url_kwargs = related_url_kwargs or {}
self.self_url = self_url
self.self_url_kwargs = self_url_kwargs or {}
if include_resource_linkage and not type_:
raise ValueError('include_resource_linkage=True requires the type_ argument.')
self.many = many
self.include_resource_linkage = include_resource_linkage
self.include_data = False
self.type_ = type_
self.__id_field = id_field
self.__schema = schema
super(Relationship, self).__init__(**kwargs)
@property
def id_field(self):
if self.__id_field:
return self.__id_field
if self.__schema:
field = self.schema.fields['id']
return field.attribute or self.default_id_field
else:
return self.default_id_field
@property
def schema(self):
only = getattr(self, 'only', None)
exclude = getattr(self, 'exclude', ())
context = getattr(self, 'context', {})
if isinstance(self.__schema, SchemaABC):
return self.__schema
if isinstance(self.__schema, type) and issubclass(self.__schema, SchemaABC):
self.__schema = self.__schema(only=only, exclude=exclude, context=context)
return self.__schema
if isinstance(self.__schema, basestring):
if self.__schema == _RECURSIVE_NESTED:
parent_class = self.parent.__class__
self.__schema = parent_class(
only=only, exclude=exclude, context=context,
include_data=self.parent.include_data,
)
else:
schema_class = class_registry.get_class(self.__schema)
self.__schema = schema_class(
only=only, exclude=exclude,
context=context,
)
return self.__schema
else:
raise ValueError((
'A Schema is required to serialize a nested '
'relationship with include_data'
))
def get_related_url(self, obj):
if self.related_url:
params = resolve_params(obj, self.related_url_kwargs, default=self.default)
non_null_params = {
key: value for key, value in params.items()
if value is not None
}
if non_null_params:
return self.related_url.format(**non_null_params)
return None
def get_self_url(self, obj):
if self.self_url:
params = resolve_params(obj, self.self_url_kwargs, default=self.default)
non_null_params = {
key: value for key, value in params.items()
if value is not None
}
if non_null_params:
return self.self_url.format(**non_null_params)
return None
def get_resource_linkage(self, value):
if self.many:
resource_object = [
{
'type': self.type_,
'id': _stringify(self._get_id(each)),
} for each in value
]
else:
resource_object = {
'type': self.type_,
'id': _stringify(self._get_id(value)),
}
return resource_object
def extract_value(self, data):
"""Extract the id key and validate the request structure."""
errors = []
if 'id' not in data:
errors.append('Must have an `id` field')
if 'type' not in data:
errors.append('Must have a `type` field')
elif data['type'] != self.type_:
errors.append('Invalid `type` specified')
if errors:
raise ValidationError(errors)
# If ``attributes`` is set, we've folded included data into this
# relationship. Unserialize it if we have a schema set; otherwise we
# fall back below to old behaviour of only IDs.
if 'attributes' in data and self.__schema:
result = self.schema.load({'data': data, 'included': self.root.included_data})
return result.data if _MARSHMALLOW_VERSION_INFO[0] < 3 else result
id_value = data.get('id')
if self.__schema:
id_value = self.schema.fields['id'].deserialize(id_value)
return id_value
def _deserialize(self, value, attr, obj):
if self.many:
if not is_collection(value):
raise ValidationError('Relationship is list-like')
return [self.extract_value(item) for item in value]
if is_collection(value):
raise ValidationError('Relationship is not list-like')
return self.extract_value(value)
def _serialize(self, value, attr, obj):
dict_class = self.parent.dict_class if self.parent else dict
ret = dict_class()
self_url = self.get_self_url(obj)
related_url = self.get_related_url(obj)
if self_url or related_url:
ret['links'] = dict_class()
if self_url:
ret['links']['self'] = self_url
if related_url:
ret['links']['related'] = related_url
# resource linkage is required when including the data
if self.include_resource_linkage or self.include_data:
if value is None:
ret['data'] = [] if self.many else None
else:
ret['data'] = self.get_resource_linkage(value)
if self.include_data and value is not None:
if self.many:
for item in value:
self._serialize_included(item)
else:
self._serialize_included(value)
return ret
def _serialize_included(self, value):
result = self.schema.dump(value)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data = result.data
else:
data = result
item = data['data']
self.root.included_data[(item['type'], item['id'])] = item
for key, value in iteritems(self.schema.included_data):
self.root.included_data[key] = value
def _get_id(self, value):
if _MARSHMALLOW_VERSION_INFO[0] >= 3:
if self.__schema:
return self.schema.get_attribute(value, self.id_field, value)
else:
return get_value(value, self.id_field, value)
else:
if self.__schema:
return self.schema.get_attribute(self.id_field, value, value)
else:
return get_value(value, self.id_field, value)
|
marshmallow-code/marshmallow-jsonapi
|
examples/flask_example.py
|
J
|
python
|
def J(*args, **kwargs):
response = jsonify(*args, **kwargs)
response.mimetype = 'application/vnd.api+json'
return response
|
Wrapper around jsonify that sets the Content-Type of the response to
application/vnd.api+json.
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/examples/flask_example.py#L102-L108
| null |
from flask import Flask, request, jsonify
### MODELS ###
class Model:
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
class Comment(Model):
pass
class Author(Model):
pass
class Post(Model):
pass
### MOCK DATABASE ###
comment1 = Comment(id=1, body='First!')
comment2 = Comment(id=2, body='I like XML better!')
author1 = Author(id=1, first_name='Dan', last_name='Gebhardt', twitter='dgeb')
post1 = Post(
id=1, title='JSON API paints my bikeshed!',
author=author1, comments=[comment1, comment2],
)
db = {
'comments': [
comment1,
comment2,
],
'authors': [author1],
'posts': [post1],
}
### SCHEMAS ###
from marshmallow import validate, ValidationError # noqa: E402
from marshmallow_jsonapi import fields # noqa: E402
from marshmallow_jsonapi.flask import Relationship, Schema # noqa: E402
class CommentSchema(Schema):
id = fields.Str(dump_only=True)
body = fields.Str()
class Meta:
type_ = 'comments'
self_view = 'comment_detail'
self_view_kwargs = {'comment_id': '<id>', '_external': True}
self_view_many = 'comments_list'
class AuthorSchema(Schema):
id = fields.Str(dump_only=True)
first_name = fields.Str(required=True)
last_name = fields.Str(required=True)
password = fields.Str(load_only=True, validate=validate.Length(6))
twitter = fields.Str()
class Meta:
type_ = 'people'
self_view = 'author_detail'
self_view_kwargs = {'author_id': '<id>'}
self_view_many = 'authors_list'
class PostSchema(Schema):
id = fields.Str(dump_only=True)
title = fields.Str()
author = Relationship(
related_view='author_detail',
related_view_kwargs={'author_id': '<author.id>', '_external': True},
include_data=True,
type_='people',
)
comments = Relationship(
related_view='posts_comments',
related_view_kwargs={'post_id': '<id>', '_external': True},
many=True,
include_data=True,
type_='comments',
)
class Meta:
type_ = 'posts'
self_view = 'posts_detail'
self_view_kwargs = {'post_id': '<id>'}
self_view_many = 'posts_list'
### VIEWS ###
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route('/posts/', methods=['GET'])
def posts_list():
posts = db['posts']
data = PostSchema(many=True).dump(posts)
return J(data)
@app.route('/posts/<int:post_id>')
def posts_detail(post_id):
post = db['posts'][post_id - 1]
data = PostSchema().dump(post)
return J(data)
@app.route('/posts/<int:post_id>/comments/')
def posts_comments(post_id):
post = db['posts'][post_id - 1]
comments = post.comments
data = CommentSchema(many=True).dump(comments)
return J(data)
@app.route('/authors/')
def authors_list():
author = db['authors']
data = AuthorSchema(many=True).dump(author)
return J(data)
@app.route('/authors/<int:author_id>')
def author_detail(author_id):
author = db['authors'][author_id - 1]
data = AuthorSchema().dump(author)
return J(data)
@app.route('/authors/', methods=['POST'])
def author_create():
schema = AuthorSchema()
input_data = request.get_json() or {}
try:
data = schema.load(input_data)
except ValidationError as err:
return J(err.messages), 422
id_ = len(db['authors'])
author = Author(id=id_, **data)
db['authors'].append(author)
data = schema.dump(author)
return J(data)
@app.route('/comments/')
def comments_list():
comment = db['comments']
data = CommentSchema(many=True).dump(comment)
return J(data)
@app.route('/comments/<int:comment_id>')
def comment_detail(comment_id):
comment = db['comments'][comment_id - 1]
data = CommentSchema().dump(comment)
return J(data)
if __name__ == '__main__':
app.run()
|
marshmallow-code/marshmallow-jsonapi
|
marshmallow_jsonapi/utils.py
|
resolve_params
|
python
|
def resolve_params(obj, params, default=missing):
param_values = {}
for name, attr_tpl in iteritems(params):
attr_name = tpl(str(attr_tpl))
if attr_name:
attribute_value = get_value(obj, attr_name, default=default)
if attribute_value is not missing:
param_values[name] = attribute_value
else:
raise AttributeError(
'{attr_name!r} is not a valid '
'attribute of {obj!r}'.format(attr_name=attr_name, obj=obj),
)
else:
param_values[name] = attr_tpl
return param_values
|
Given a dictionary of keyword arguments, return the same dictionary except with
values enclosed in `< >` resolved to attributes on `obj`.
|
train
|
https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/utils.py#L38-L56
|
[
"def tpl(val):\n \"\"\"Return value within ``< >`` if possible, else return ``None``.\"\"\"\n match = _tpl_pattern.match(val)\n if match:\n return match.groups()[0]\n return None\n"
] |
# -*- coding: utf-8 -*-
"""Utility functions.
This module should be considered private API.
"""
import re
import marshmallow
from marshmallow.compat import iteritems
from marshmallow.utils import get_value as _get_value, missing
_MARSHMALLOW_VERSION_INFO = tuple(
[int(part) for part in marshmallow.__version__.split('.') if part.isdigit()],
)
def get_dump_key(field):
if _MARSHMALLOW_VERSION_INFO[0] < 3:
return field.dump_to
else:
return field.data_key
if _MARSHMALLOW_VERSION_INFO[0] >= 3:
get_value = _get_value
else:
def get_value(obj, attr, *args, **kwargs):
return _get_value(attr, obj, *args, **kwargs)
_tpl_pattern = re.compile(r'\s*<\s*(\S*)\s*>\s*')
def tpl(val):
"""Return value within ``< >`` if possible, else return ``None``."""
match = _tpl_pattern.match(val)
if match:
return match.groups()[0]
return None
|
dnouri/nolearn
|
nolearn/lasagne/visualize.py
|
plot_conv_weights
|
python
|
def plot_conv_weights(layer, figsize=(6, 6)):
W = layer.W.get_value()
shape = W.shape
nrows = np.ceil(np.sqrt(shape[0])).astype(int)
ncols = nrows
for feature_map in range(shape[1]):
figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[0]:
break
axes[r, c].imshow(W[i, feature_map], cmap='gray',
interpolation='none')
return plt
|
Plot the weights of a specific layer.
Only really makes sense with convolutional layers.
Parameters
----------
layer : lasagne.layers.Layer
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/visualize.py#L26-L54
| null |
from itertools import product
from lasagne.layers import get_output
from lasagne.layers import get_output_shape
from lasagne.objectives import binary_crossentropy
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import io
import lasagne
def plot_loss(net):
train_loss = [row['train_loss'] for row in net.train_history_]
valid_loss = [row['valid_loss'] for row in net.train_history_]
plt.plot(train_loss, label='train loss')
plt.plot(valid_loss, label='valid loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
return plt
def plot_conv_activity(layer, x, figsize=(6, 8)):
"""Plot the acitivities of a specific layer.
Only really makes sense with layers that work 2D data (2D
convolutional layers, 2D pooling layers ...).
Parameters
----------
layer : lasagne.layers.Layer
x : numpy.ndarray
Only takes one sample at a time, i.e. x.shape[0] == 1.
"""
if x.shape[0] != 1:
raise ValueError("Only one sample can be plotted at a time.")
# compile theano function
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
shape = activity.shape
nrows = np.ceil(np.sqrt(shape[1])).astype(int)
ncols = nrows
figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False)
axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray',
interpolation='none')
axes[0, ncols // 2].set_title('original')
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[1]:
break
ndim = activity[0][i].ndim
if ndim != 2:
raise ValueError("Wrong number of dimensions, image data should "
"have 2, instead got {}".format(ndim))
axes[r + 1, c].imshow(-activity[0][i], cmap='gray',
interpolation='none')
return plt
def occlusion_heatmap(net, x, target, square_length=7):
"""An occlusion test that checks an image for its critical parts.
In this function, a square part of the image is occluded (i.e. set
to 0) and then the net is tested for its propensity to predict the
correct label. One should expect that this propensity shrinks of
critical parts of the image are occluded. If not, this indicates
overfitting.
Depending on the depth of the net and the size of the image, this
function may take awhile to finish, since one prediction for each
pixel of the image is made.
Currently, all color channels are occluded at the same time. Also,
this does not really work if images are randomly distorted by the
batch iterator.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
x : np.array
The input data, should be of shape (1, c, x, y). Only makes
sense with image data.
target : int
The true value of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
Results
-------
heat_array : np.array (with same size as image)
An 2D np.array that at each point (i, j) contains the predicted
probability of the correct class if the image is occluded by a
square with center (i, j).
"""
if (x.ndim != 4) or x.shape[0] != 1:
raise ValueError("This function requires the input data to be of "
"shape (1, c, x, y), instead got {}".format(x.shape))
if square_length % 2 == 0:
raise ValueError("Square length has to be an odd number, instead "
"got {}.".format(square_length))
num_classes = get_output_shape(net.layers_[-1])[1]
img = x[0].copy()
bs, col, s0, s1 = x.shape
heat_array = np.zeros((s0, s1))
pad = square_length // 2 + 1
x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype)
probs = np.zeros((s0, s1, num_classes))
# generate occluded images
for i in range(s0):
# batch s1 occluded images for faster prediction
for j in range(s1):
x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant')
x_pad[:, i:i + square_length, j:j + square_length] = 0.
x_occluded[j] = x_pad[:, pad:-pad, pad:-pad]
y_proba = net.predict_proba(x_occluded)
probs[i] = y_proba.reshape(s1, num_classes)
# from predicted probabilities, pick only those of target class
for i in range(s0):
for j in range(s1):
heat_array[i, j] = probs[i, j, target]
return heat_array
def _plot_heat_map(net, X, figsize, get_heat_image):
if (X.ndim != 4):
raise ValueError("This function requires the input data to be of "
"shape (b, c, x, y), instead got {}".format(X.shape))
num_images = X.shape[0]
if figsize[1] is None:
figsize = (figsize[0], num_images * figsize[0] / 3)
figs, axes = plt.subplots(num_images, 3, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for n in range(num_images):
heat_img = get_heat_image(net, X[n:n + 1, :, :, :], n)
ax = axes if num_images == 1 else axes[n]
img = X[n, :, :, :].mean(0)
ax[0].imshow(-img, interpolation='nearest', cmap='gray')
ax[0].set_title('image')
ax[1].imshow(-heat_img, interpolation='nearest', cmap='Reds')
ax[1].set_title('critical parts')
ax[2].imshow(-img, interpolation='nearest', cmap='gray')
ax[2].imshow(-heat_img, interpolation='nearest', cmap='Reds',
alpha=0.6)
ax[2].set_title('super-imposed')
return plt
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)):
"""Plot which parts of an image are particularly import for the
net to classify the image correctly.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
X : numpy.array
The input data, should be of shape (b, c, 0, 1). Only makes
sense with image data.
target : list or numpy.array of ints
The true values of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at. If more than one sample is passed to X, each of them needs
its own target.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
figsize : tuple (int, int)
Size of the figure.
Plots
-----
Figure with 3 subplots: the original image, the occlusion heatmap,
and both images super-imposed.
"""
return _plot_heat_map(
net, X, figsize, lambda net, X, n: occlusion_heatmap(
net, X, target[n], square_length))
def saliency_map(input, output, pred, X):
score = -binary_crossentropy(output[:, pred], np.array([1])).sum()
return np.abs(T.grad(score, input).eval({input: X}))
def saliency_map_net(net, X):
input = net.layers_[0].input_var
output = get_output(net.layers_[-1])
pred = output.eval({input: X}).argmax(axis=1)
return saliency_map(input, output, pred, X)[0].transpose(1, 2, 0).squeeze()
def plot_saliency(net, X, figsize=(9, None)):
return _plot_heat_map(
net, X, figsize, lambda net, X, n: -saliency_map_net(net, X))
def get_hex_color(layer_type):
"""
Determines the hex color for a layer.
:parameters:
- layer_type : string
Class name of the layer
:returns:
- color : string containing a hex color for filling block.
"""
COLORS = ['#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B',
'#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416',
'#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D',
'#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12']
hashed = int(hash(layer_type)) % 5
if "conv" in layer_type.lower():
return COLORS[:5][hashed]
if layer_type in lasagne.layers.pool.__all__:
return COLORS[5:10][hashed]
if layer_type in lasagne.layers.recurrent.__all__:
return COLORS[10:15][hashed]
else:
return COLORS[15:20][hashed]
def make_pydot_graph(layers, output_shape=True, verbose=False):
"""
:parameters:
- layers : list
List of the layers, as obtained from lasagne.layers.get_all_layers
- output_shape: (default `True`)
If `True`, the output shape of each layer will be displayed.
- verbose: (default `False`)
If `True`, layer attributes like filter shape, stride, etc.
will be displayed.
:returns:
- pydot_graph : PyDot object containing the graph
"""
import pydotplus as pydot
pydot_graph = pydot.Dot('Network', graph_type='digraph')
pydot_nodes = {}
pydot_edges = []
for i, layer in enumerate(layers):
layer_name = getattr(layer, 'name', None)
if layer_name is None:
layer_name = layer.__class__.__name__
layer_type = '{0}'.format(layer_name)
key = repr(layer)
label = layer_type
color = get_hex_color(layer_type)
if verbose:
for attr in ['num_filters', 'num_units', 'ds',
'filter_shape', 'stride', 'strides', 'p']:
if hasattr(layer, attr):
label += '\n{0}: {1}'.format(attr, getattr(layer, attr))
if hasattr(layer, 'nonlinearity'):
try:
nonlinearity = layer.nonlinearity.__name__
except AttributeError:
nonlinearity = layer.nonlinearity.__class__.__name__
label += '\nnonlinearity: {0}'.format(nonlinearity)
if output_shape:
label += '\nOutput shape: {0}'.format(layer.output_shape)
pydot_nodes[key] = pydot.Node(
key, label=label, shape='record', fillcolor=color, style='filled')
if hasattr(layer, 'input_layers'):
for input_layer in layer.input_layers:
pydot_edges.append([repr(input_layer), key])
if hasattr(layer, 'input_layer'):
pydot_edges.append([repr(layer.input_layer), key])
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edges in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]]))
return pydot_graph
def draw_to_file(layers, filename, **kwargs):
"""
Draws a network diagram to a file
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- filename : string
The filename to save output to
- **kwargs: see docstring of make_pydot_graph for other options
"""
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
ext = filename[filename.rfind('.') + 1:]
with io.open(filename, 'wb') as fid:
fid.write(dot.create(format=ext))
def draw_to_notebook(layers, **kwargs):
"""
Draws a network diagram in an IPython notebook
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- **kwargs : see the docstring of make_pydot_graph for other options
"""
from IPython.display import Image
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
return Image(dot.create_png())
|
dnouri/nolearn
|
nolearn/lasagne/visualize.py
|
plot_conv_activity
|
python
|
def plot_conv_activity(layer, x, figsize=(6, 8)):
if x.shape[0] != 1:
raise ValueError("Only one sample can be plotted at a time.")
# compile theano function
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
shape = activity.shape
nrows = np.ceil(np.sqrt(shape[1])).astype(int)
ncols = nrows
figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False)
axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray',
interpolation='none')
axes[0, ncols // 2].set_title('original')
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[1]:
break
ndim = activity[0][i].ndim
if ndim != 2:
raise ValueError("Wrong number of dimensions, image data should "
"have 2, instead got {}".format(ndim))
axes[r + 1, c].imshow(-activity[0][i], cmap='gray',
interpolation='none')
return plt
|
Plot the acitivities of a specific layer.
Only really makes sense with layers that work 2D data (2D
convolutional layers, 2D pooling layers ...).
Parameters
----------
layer : lasagne.layers.Layer
x : numpy.ndarray
Only takes one sample at a time, i.e. x.shape[0] == 1.
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/visualize.py#L57-L102
| null |
from itertools import product
from lasagne.layers import get_output
from lasagne.layers import get_output_shape
from lasagne.objectives import binary_crossentropy
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import io
import lasagne
def plot_loss(net):
train_loss = [row['train_loss'] for row in net.train_history_]
valid_loss = [row['valid_loss'] for row in net.train_history_]
plt.plot(train_loss, label='train loss')
plt.plot(valid_loss, label='valid loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
return plt
def plot_conv_weights(layer, figsize=(6, 6)):
"""Plot the weights of a specific layer.
Only really makes sense with convolutional layers.
Parameters
----------
layer : lasagne.layers.Layer
"""
W = layer.W.get_value()
shape = W.shape
nrows = np.ceil(np.sqrt(shape[0])).astype(int)
ncols = nrows
for feature_map in range(shape[1]):
figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[0]:
break
axes[r, c].imshow(W[i, feature_map], cmap='gray',
interpolation='none')
return plt
def occlusion_heatmap(net, x, target, square_length=7):
"""An occlusion test that checks an image for its critical parts.
In this function, a square part of the image is occluded (i.e. set
to 0) and then the net is tested for its propensity to predict the
correct label. One should expect that this propensity shrinks of
critical parts of the image are occluded. If not, this indicates
overfitting.
Depending on the depth of the net and the size of the image, this
function may take awhile to finish, since one prediction for each
pixel of the image is made.
Currently, all color channels are occluded at the same time. Also,
this does not really work if images are randomly distorted by the
batch iterator.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
x : np.array
The input data, should be of shape (1, c, x, y). Only makes
sense with image data.
target : int
The true value of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
Results
-------
heat_array : np.array (with same size as image)
An 2D np.array that at each point (i, j) contains the predicted
probability of the correct class if the image is occluded by a
square with center (i, j).
"""
if (x.ndim != 4) or x.shape[0] != 1:
raise ValueError("This function requires the input data to be of "
"shape (1, c, x, y), instead got {}".format(x.shape))
if square_length % 2 == 0:
raise ValueError("Square length has to be an odd number, instead "
"got {}.".format(square_length))
num_classes = get_output_shape(net.layers_[-1])[1]
img = x[0].copy()
bs, col, s0, s1 = x.shape
heat_array = np.zeros((s0, s1))
pad = square_length // 2 + 1
x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype)
probs = np.zeros((s0, s1, num_classes))
# generate occluded images
for i in range(s0):
# batch s1 occluded images for faster prediction
for j in range(s1):
x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant')
x_pad[:, i:i + square_length, j:j + square_length] = 0.
x_occluded[j] = x_pad[:, pad:-pad, pad:-pad]
y_proba = net.predict_proba(x_occluded)
probs[i] = y_proba.reshape(s1, num_classes)
# from predicted probabilities, pick only those of target class
for i in range(s0):
for j in range(s1):
heat_array[i, j] = probs[i, j, target]
return heat_array
def _plot_heat_map(net, X, figsize, get_heat_image):
if (X.ndim != 4):
raise ValueError("This function requires the input data to be of "
"shape (b, c, x, y), instead got {}".format(X.shape))
num_images = X.shape[0]
if figsize[1] is None:
figsize = (figsize[0], num_images * figsize[0] / 3)
figs, axes = plt.subplots(num_images, 3, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for n in range(num_images):
heat_img = get_heat_image(net, X[n:n + 1, :, :, :], n)
ax = axes if num_images == 1 else axes[n]
img = X[n, :, :, :].mean(0)
ax[0].imshow(-img, interpolation='nearest', cmap='gray')
ax[0].set_title('image')
ax[1].imshow(-heat_img, interpolation='nearest', cmap='Reds')
ax[1].set_title('critical parts')
ax[2].imshow(-img, interpolation='nearest', cmap='gray')
ax[2].imshow(-heat_img, interpolation='nearest', cmap='Reds',
alpha=0.6)
ax[2].set_title('super-imposed')
return plt
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)):
"""Plot which parts of an image are particularly import for the
net to classify the image correctly.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
X : numpy.array
The input data, should be of shape (b, c, 0, 1). Only makes
sense with image data.
target : list or numpy.array of ints
The true values of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at. If more than one sample is passed to X, each of them needs
its own target.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
figsize : tuple (int, int)
Size of the figure.
Plots
-----
Figure with 3 subplots: the original image, the occlusion heatmap,
and both images super-imposed.
"""
return _plot_heat_map(
net, X, figsize, lambda net, X, n: occlusion_heatmap(
net, X, target[n], square_length))
def saliency_map(input, output, pred, X):
score = -binary_crossentropy(output[:, pred], np.array([1])).sum()
return np.abs(T.grad(score, input).eval({input: X}))
def saliency_map_net(net, X):
input = net.layers_[0].input_var
output = get_output(net.layers_[-1])
pred = output.eval({input: X}).argmax(axis=1)
return saliency_map(input, output, pred, X)[0].transpose(1, 2, 0).squeeze()
def plot_saliency(net, X, figsize=(9, None)):
return _plot_heat_map(
net, X, figsize, lambda net, X, n: -saliency_map_net(net, X))
def get_hex_color(layer_type):
"""
Determines the hex color for a layer.
:parameters:
- layer_type : string
Class name of the layer
:returns:
- color : string containing a hex color for filling block.
"""
COLORS = ['#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B',
'#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416',
'#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D',
'#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12']
hashed = int(hash(layer_type)) % 5
if "conv" in layer_type.lower():
return COLORS[:5][hashed]
if layer_type in lasagne.layers.pool.__all__:
return COLORS[5:10][hashed]
if layer_type in lasagne.layers.recurrent.__all__:
return COLORS[10:15][hashed]
else:
return COLORS[15:20][hashed]
def make_pydot_graph(layers, output_shape=True, verbose=False):
"""
:parameters:
- layers : list
List of the layers, as obtained from lasagne.layers.get_all_layers
- output_shape: (default `True`)
If `True`, the output shape of each layer will be displayed.
- verbose: (default `False`)
If `True`, layer attributes like filter shape, stride, etc.
will be displayed.
:returns:
- pydot_graph : PyDot object containing the graph
"""
import pydotplus as pydot
pydot_graph = pydot.Dot('Network', graph_type='digraph')
pydot_nodes = {}
pydot_edges = []
for i, layer in enumerate(layers):
layer_name = getattr(layer, 'name', None)
if layer_name is None:
layer_name = layer.__class__.__name__
layer_type = '{0}'.format(layer_name)
key = repr(layer)
label = layer_type
color = get_hex_color(layer_type)
if verbose:
for attr in ['num_filters', 'num_units', 'ds',
'filter_shape', 'stride', 'strides', 'p']:
if hasattr(layer, attr):
label += '\n{0}: {1}'.format(attr, getattr(layer, attr))
if hasattr(layer, 'nonlinearity'):
try:
nonlinearity = layer.nonlinearity.__name__
except AttributeError:
nonlinearity = layer.nonlinearity.__class__.__name__
label += '\nnonlinearity: {0}'.format(nonlinearity)
if output_shape:
label += '\nOutput shape: {0}'.format(layer.output_shape)
pydot_nodes[key] = pydot.Node(
key, label=label, shape='record', fillcolor=color, style='filled')
if hasattr(layer, 'input_layers'):
for input_layer in layer.input_layers:
pydot_edges.append([repr(input_layer), key])
if hasattr(layer, 'input_layer'):
pydot_edges.append([repr(layer.input_layer), key])
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edges in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]]))
return pydot_graph
def draw_to_file(layers, filename, **kwargs):
"""
Draws a network diagram to a file
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- filename : string
The filename to save output to
- **kwargs: see docstring of make_pydot_graph for other options
"""
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
ext = filename[filename.rfind('.') + 1:]
with io.open(filename, 'wb') as fid:
fid.write(dot.create(format=ext))
def draw_to_notebook(layers, **kwargs):
"""
Draws a network diagram in an IPython notebook
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- **kwargs : see the docstring of make_pydot_graph for other options
"""
from IPython.display import Image
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
return Image(dot.create_png())
|
dnouri/nolearn
|
nolearn/lasagne/visualize.py
|
occlusion_heatmap
|
python
|
def occlusion_heatmap(net, x, target, square_length=7):
if (x.ndim != 4) or x.shape[0] != 1:
raise ValueError("This function requires the input data to be of "
"shape (1, c, x, y), instead got {}".format(x.shape))
if square_length % 2 == 0:
raise ValueError("Square length has to be an odd number, instead "
"got {}.".format(square_length))
num_classes = get_output_shape(net.layers_[-1])[1]
img = x[0].copy()
bs, col, s0, s1 = x.shape
heat_array = np.zeros((s0, s1))
pad = square_length // 2 + 1
x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype)
probs = np.zeros((s0, s1, num_classes))
# generate occluded images
for i in range(s0):
# batch s1 occluded images for faster prediction
for j in range(s1):
x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant')
x_pad[:, i:i + square_length, j:j + square_length] = 0.
x_occluded[j] = x_pad[:, pad:-pad, pad:-pad]
y_proba = net.predict_proba(x_occluded)
probs[i] = y_proba.reshape(s1, num_classes)
# from predicted probabilities, pick only those of target class
for i in range(s0):
for j in range(s1):
heat_array[i, j] = probs[i, j, target]
return heat_array
|
An occlusion test that checks an image for its critical parts.
In this function, a square part of the image is occluded (i.e. set
to 0) and then the net is tested for its propensity to predict the
correct label. One should expect that this propensity shrinks of
critical parts of the image are occluded. If not, this indicates
overfitting.
Depending on the depth of the net and the size of the image, this
function may take awhile to finish, since one prediction for each
pixel of the image is made.
Currently, all color channels are occluded at the same time. Also,
this does not really work if images are randomly distorted by the
batch iterator.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
x : np.array
The input data, should be of shape (1, c, x, y). Only makes
sense with image data.
target : int
The true value of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
Results
-------
heat_array : np.array (with same size as image)
An 2D np.array that at each point (i, j) contains the predicted
probability of the correct class if the image is occluded by a
square with center (i, j).
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/visualize.py#L105-L180
| null |
from itertools import product
from lasagne.layers import get_output
from lasagne.layers import get_output_shape
from lasagne.objectives import binary_crossentropy
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import io
import lasagne
def plot_loss(net):
train_loss = [row['train_loss'] for row in net.train_history_]
valid_loss = [row['valid_loss'] for row in net.train_history_]
plt.plot(train_loss, label='train loss')
plt.plot(valid_loss, label='valid loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
return plt
def plot_conv_weights(layer, figsize=(6, 6)):
"""Plot the weights of a specific layer.
Only really makes sense with convolutional layers.
Parameters
----------
layer : lasagne.layers.Layer
"""
W = layer.W.get_value()
shape = W.shape
nrows = np.ceil(np.sqrt(shape[0])).astype(int)
ncols = nrows
for feature_map in range(shape[1]):
figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[0]:
break
axes[r, c].imshow(W[i, feature_map], cmap='gray',
interpolation='none')
return plt
def plot_conv_activity(layer, x, figsize=(6, 8)):
"""Plot the acitivities of a specific layer.
Only really makes sense with layers that work 2D data (2D
convolutional layers, 2D pooling layers ...).
Parameters
----------
layer : lasagne.layers.Layer
x : numpy.ndarray
Only takes one sample at a time, i.e. x.shape[0] == 1.
"""
if x.shape[0] != 1:
raise ValueError("Only one sample can be plotted at a time.")
# compile theano function
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
shape = activity.shape
nrows = np.ceil(np.sqrt(shape[1])).astype(int)
ncols = nrows
figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False)
axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray',
interpolation='none')
axes[0, ncols // 2].set_title('original')
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[1]:
break
ndim = activity[0][i].ndim
if ndim != 2:
raise ValueError("Wrong number of dimensions, image data should "
"have 2, instead got {}".format(ndim))
axes[r + 1, c].imshow(-activity[0][i], cmap='gray',
interpolation='none')
return plt
def _plot_heat_map(net, X, figsize, get_heat_image):
if (X.ndim != 4):
raise ValueError("This function requires the input data to be of "
"shape (b, c, x, y), instead got {}".format(X.shape))
num_images = X.shape[0]
if figsize[1] is None:
figsize = (figsize[0], num_images * figsize[0] / 3)
figs, axes = plt.subplots(num_images, 3, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for n in range(num_images):
heat_img = get_heat_image(net, X[n:n + 1, :, :, :], n)
ax = axes if num_images == 1 else axes[n]
img = X[n, :, :, :].mean(0)
ax[0].imshow(-img, interpolation='nearest', cmap='gray')
ax[0].set_title('image')
ax[1].imshow(-heat_img, interpolation='nearest', cmap='Reds')
ax[1].set_title('critical parts')
ax[2].imshow(-img, interpolation='nearest', cmap='gray')
ax[2].imshow(-heat_img, interpolation='nearest', cmap='Reds',
alpha=0.6)
ax[2].set_title('super-imposed')
return plt
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)):
"""Plot which parts of an image are particularly import for the
net to classify the image correctly.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
X : numpy.array
The input data, should be of shape (b, c, 0, 1). Only makes
sense with image data.
target : list or numpy.array of ints
The true values of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at. If more than one sample is passed to X, each of them needs
its own target.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
figsize : tuple (int, int)
Size of the figure.
Plots
-----
Figure with 3 subplots: the original image, the occlusion heatmap,
and both images super-imposed.
"""
return _plot_heat_map(
net, X, figsize, lambda net, X, n: occlusion_heatmap(
net, X, target[n], square_length))
def saliency_map(input, output, pred, X):
score = -binary_crossentropy(output[:, pred], np.array([1])).sum()
return np.abs(T.grad(score, input).eval({input: X}))
def saliency_map_net(net, X):
input = net.layers_[0].input_var
output = get_output(net.layers_[-1])
pred = output.eval({input: X}).argmax(axis=1)
return saliency_map(input, output, pred, X)[0].transpose(1, 2, 0).squeeze()
def plot_saliency(net, X, figsize=(9, None)):
return _plot_heat_map(
net, X, figsize, lambda net, X, n: -saliency_map_net(net, X))
def get_hex_color(layer_type):
"""
Determines the hex color for a layer.
:parameters:
- layer_type : string
Class name of the layer
:returns:
- color : string containing a hex color for filling block.
"""
COLORS = ['#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B',
'#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416',
'#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D',
'#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12']
hashed = int(hash(layer_type)) % 5
if "conv" in layer_type.lower():
return COLORS[:5][hashed]
if layer_type in lasagne.layers.pool.__all__:
return COLORS[5:10][hashed]
if layer_type in lasagne.layers.recurrent.__all__:
return COLORS[10:15][hashed]
else:
return COLORS[15:20][hashed]
def make_pydot_graph(layers, output_shape=True, verbose=False):
"""
:parameters:
- layers : list
List of the layers, as obtained from lasagne.layers.get_all_layers
- output_shape: (default `True`)
If `True`, the output shape of each layer will be displayed.
- verbose: (default `False`)
If `True`, layer attributes like filter shape, stride, etc.
will be displayed.
:returns:
- pydot_graph : PyDot object containing the graph
"""
import pydotplus as pydot
pydot_graph = pydot.Dot('Network', graph_type='digraph')
pydot_nodes = {}
pydot_edges = []
for i, layer in enumerate(layers):
layer_name = getattr(layer, 'name', None)
if layer_name is None:
layer_name = layer.__class__.__name__
layer_type = '{0}'.format(layer_name)
key = repr(layer)
label = layer_type
color = get_hex_color(layer_type)
if verbose:
for attr in ['num_filters', 'num_units', 'ds',
'filter_shape', 'stride', 'strides', 'p']:
if hasattr(layer, attr):
label += '\n{0}: {1}'.format(attr, getattr(layer, attr))
if hasattr(layer, 'nonlinearity'):
try:
nonlinearity = layer.nonlinearity.__name__
except AttributeError:
nonlinearity = layer.nonlinearity.__class__.__name__
label += '\nnonlinearity: {0}'.format(nonlinearity)
if output_shape:
label += '\nOutput shape: {0}'.format(layer.output_shape)
pydot_nodes[key] = pydot.Node(
key, label=label, shape='record', fillcolor=color, style='filled')
if hasattr(layer, 'input_layers'):
for input_layer in layer.input_layers:
pydot_edges.append([repr(input_layer), key])
if hasattr(layer, 'input_layer'):
pydot_edges.append([repr(layer.input_layer), key])
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edges in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]]))
return pydot_graph
def draw_to_file(layers, filename, **kwargs):
"""
Draws a network diagram to a file
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- filename : string
The filename to save output to
- **kwargs: see docstring of make_pydot_graph for other options
"""
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
ext = filename[filename.rfind('.') + 1:]
with io.open(filename, 'wb') as fid:
fid.write(dot.create(format=ext))
def draw_to_notebook(layers, **kwargs):
"""
Draws a network diagram in an IPython notebook
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- **kwargs : see the docstring of make_pydot_graph for other options
"""
from IPython.display import Image
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
return Image(dot.create_png())
|
dnouri/nolearn
|
nolearn/lasagne/visualize.py
|
plot_occlusion
|
python
|
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)):
return _plot_heat_map(
net, X, figsize, lambda net, X, n: occlusion_heatmap(
net, X, target[n], square_length))
|
Plot which parts of an image are particularly import for the
net to classify the image correctly.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
X : numpy.array
The input data, should be of shape (b, c, 0, 1). Only makes
sense with image data.
target : list or numpy.array of ints
The true values of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at. If more than one sample is passed to X, each of them needs
its own target.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
figsize : tuple (int, int)
Size of the figure.
Plots
-----
Figure with 3 subplots: the original image, the occlusion heatmap,
and both images super-imposed.
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/visualize.py#L214-L250
|
[
"def _plot_heat_map(net, X, figsize, get_heat_image):\n if (X.ndim != 4):\n raise ValueError(\"This function requires the input data to be of \"\n \"shape (b, c, x, y), instead got {}\".format(X.shape))\n\n num_images = X.shape[0]\n if figsize[1] is None:\n figsize = (figsize[0], num_images * figsize[0] / 3)\n figs, axes = plt.subplots(num_images, 3, figsize=figsize)\n\n for ax in axes.flatten():\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis('off')\n\n for n in range(num_images):\n heat_img = get_heat_image(net, X[n:n + 1, :, :, :], n)\n\n ax = axes if num_images == 1 else axes[n]\n img = X[n, :, :, :].mean(0)\n ax[0].imshow(-img, interpolation='nearest', cmap='gray')\n ax[0].set_title('image')\n ax[1].imshow(-heat_img, interpolation='nearest', cmap='Reds')\n ax[1].set_title('critical parts')\n ax[2].imshow(-img, interpolation='nearest', cmap='gray')\n ax[2].imshow(-heat_img, interpolation='nearest', cmap='Reds',\n alpha=0.6)\n ax[2].set_title('super-imposed')\n return plt\n"
] |
from itertools import product
from lasagne.layers import get_output
from lasagne.layers import get_output_shape
from lasagne.objectives import binary_crossentropy
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import io
import lasagne
def plot_loss(net):
train_loss = [row['train_loss'] for row in net.train_history_]
valid_loss = [row['valid_loss'] for row in net.train_history_]
plt.plot(train_loss, label='train loss')
plt.plot(valid_loss, label='valid loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
return plt
def plot_conv_weights(layer, figsize=(6, 6)):
"""Plot the weights of a specific layer.
Only really makes sense with convolutional layers.
Parameters
----------
layer : lasagne.layers.Layer
"""
W = layer.W.get_value()
shape = W.shape
nrows = np.ceil(np.sqrt(shape[0])).astype(int)
ncols = nrows
for feature_map in range(shape[1]):
figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[0]:
break
axes[r, c].imshow(W[i, feature_map], cmap='gray',
interpolation='none')
return plt
def plot_conv_activity(layer, x, figsize=(6, 8)):
"""Plot the acitivities of a specific layer.
Only really makes sense with layers that work 2D data (2D
convolutional layers, 2D pooling layers ...).
Parameters
----------
layer : lasagne.layers.Layer
x : numpy.ndarray
Only takes one sample at a time, i.e. x.shape[0] == 1.
"""
if x.shape[0] != 1:
raise ValueError("Only one sample can be plotted at a time.")
# compile theano function
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
shape = activity.shape
nrows = np.ceil(np.sqrt(shape[1])).astype(int)
ncols = nrows
figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False)
axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray',
interpolation='none')
axes[0, ncols // 2].set_title('original')
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[1]:
break
ndim = activity[0][i].ndim
if ndim != 2:
raise ValueError("Wrong number of dimensions, image data should "
"have 2, instead got {}".format(ndim))
axes[r + 1, c].imshow(-activity[0][i], cmap='gray',
interpolation='none')
return plt
def occlusion_heatmap(net, x, target, square_length=7):
"""An occlusion test that checks an image for its critical parts.
In this function, a square part of the image is occluded (i.e. set
to 0) and then the net is tested for its propensity to predict the
correct label. One should expect that this propensity shrinks of
critical parts of the image are occluded. If not, this indicates
overfitting.
Depending on the depth of the net and the size of the image, this
function may take awhile to finish, since one prediction for each
pixel of the image is made.
Currently, all color channels are occluded at the same time. Also,
this does not really work if images are randomly distorted by the
batch iterator.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
x : np.array
The input data, should be of shape (1, c, x, y). Only makes
sense with image data.
target : int
The true value of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
Results
-------
heat_array : np.array (with same size as image)
An 2D np.array that at each point (i, j) contains the predicted
probability of the correct class if the image is occluded by a
square with center (i, j).
"""
if (x.ndim != 4) or x.shape[0] != 1:
raise ValueError("This function requires the input data to be of "
"shape (1, c, x, y), instead got {}".format(x.shape))
if square_length % 2 == 0:
raise ValueError("Square length has to be an odd number, instead "
"got {}.".format(square_length))
num_classes = get_output_shape(net.layers_[-1])[1]
img = x[0].copy()
bs, col, s0, s1 = x.shape
heat_array = np.zeros((s0, s1))
pad = square_length // 2 + 1
x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype)
probs = np.zeros((s0, s1, num_classes))
# generate occluded images
for i in range(s0):
# batch s1 occluded images for faster prediction
for j in range(s1):
x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant')
x_pad[:, i:i + square_length, j:j + square_length] = 0.
x_occluded[j] = x_pad[:, pad:-pad, pad:-pad]
y_proba = net.predict_proba(x_occluded)
probs[i] = y_proba.reshape(s1, num_classes)
# from predicted probabilities, pick only those of target class
for i in range(s0):
for j in range(s1):
heat_array[i, j] = probs[i, j, target]
return heat_array
def _plot_heat_map(net, X, figsize, get_heat_image):
if (X.ndim != 4):
raise ValueError("This function requires the input data to be of "
"shape (b, c, x, y), instead got {}".format(X.shape))
num_images = X.shape[0]
if figsize[1] is None:
figsize = (figsize[0], num_images * figsize[0] / 3)
figs, axes = plt.subplots(num_images, 3, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for n in range(num_images):
heat_img = get_heat_image(net, X[n:n + 1, :, :, :], n)
ax = axes if num_images == 1 else axes[n]
img = X[n, :, :, :].mean(0)
ax[0].imshow(-img, interpolation='nearest', cmap='gray')
ax[0].set_title('image')
ax[1].imshow(-heat_img, interpolation='nearest', cmap='Reds')
ax[1].set_title('critical parts')
ax[2].imshow(-img, interpolation='nearest', cmap='gray')
ax[2].imshow(-heat_img, interpolation='nearest', cmap='Reds',
alpha=0.6)
ax[2].set_title('super-imposed')
return plt
def saliency_map(input, output, pred, X):
score = -binary_crossentropy(output[:, pred], np.array([1])).sum()
return np.abs(T.grad(score, input).eval({input: X}))
def saliency_map_net(net, X):
input = net.layers_[0].input_var
output = get_output(net.layers_[-1])
pred = output.eval({input: X}).argmax(axis=1)
return saliency_map(input, output, pred, X)[0].transpose(1, 2, 0).squeeze()
def plot_saliency(net, X, figsize=(9, None)):
return _plot_heat_map(
net, X, figsize, lambda net, X, n: -saliency_map_net(net, X))
def get_hex_color(layer_type):
"""
Determines the hex color for a layer.
:parameters:
- layer_type : string
Class name of the layer
:returns:
- color : string containing a hex color for filling block.
"""
COLORS = ['#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B',
'#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416',
'#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D',
'#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12']
hashed = int(hash(layer_type)) % 5
if "conv" in layer_type.lower():
return COLORS[:5][hashed]
if layer_type in lasagne.layers.pool.__all__:
return COLORS[5:10][hashed]
if layer_type in lasagne.layers.recurrent.__all__:
return COLORS[10:15][hashed]
else:
return COLORS[15:20][hashed]
def make_pydot_graph(layers, output_shape=True, verbose=False):
"""
:parameters:
- layers : list
List of the layers, as obtained from lasagne.layers.get_all_layers
- output_shape: (default `True`)
If `True`, the output shape of each layer will be displayed.
- verbose: (default `False`)
If `True`, layer attributes like filter shape, stride, etc.
will be displayed.
:returns:
- pydot_graph : PyDot object containing the graph
"""
import pydotplus as pydot
pydot_graph = pydot.Dot('Network', graph_type='digraph')
pydot_nodes = {}
pydot_edges = []
for i, layer in enumerate(layers):
layer_name = getattr(layer, 'name', None)
if layer_name is None:
layer_name = layer.__class__.__name__
layer_type = '{0}'.format(layer_name)
key = repr(layer)
label = layer_type
color = get_hex_color(layer_type)
if verbose:
for attr in ['num_filters', 'num_units', 'ds',
'filter_shape', 'stride', 'strides', 'p']:
if hasattr(layer, attr):
label += '\n{0}: {1}'.format(attr, getattr(layer, attr))
if hasattr(layer, 'nonlinearity'):
try:
nonlinearity = layer.nonlinearity.__name__
except AttributeError:
nonlinearity = layer.nonlinearity.__class__.__name__
label += '\nnonlinearity: {0}'.format(nonlinearity)
if output_shape:
label += '\nOutput shape: {0}'.format(layer.output_shape)
pydot_nodes[key] = pydot.Node(
key, label=label, shape='record', fillcolor=color, style='filled')
if hasattr(layer, 'input_layers'):
for input_layer in layer.input_layers:
pydot_edges.append([repr(input_layer), key])
if hasattr(layer, 'input_layer'):
pydot_edges.append([repr(layer.input_layer), key])
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edges in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]]))
return pydot_graph
def draw_to_file(layers, filename, **kwargs):
"""
Draws a network diagram to a file
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- filename : string
The filename to save output to
- **kwargs: see docstring of make_pydot_graph for other options
"""
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
ext = filename[filename.rfind('.') + 1:]
with io.open(filename, 'wb') as fid:
fid.write(dot.create(format=ext))
def draw_to_notebook(layers, **kwargs):
"""
Draws a network diagram in an IPython notebook
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- **kwargs : see the docstring of make_pydot_graph for other options
"""
from IPython.display import Image
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
return Image(dot.create_png())
|
dnouri/nolearn
|
nolearn/lasagne/visualize.py
|
get_hex_color
|
python
|
def get_hex_color(layer_type):
COLORS = ['#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B',
'#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416',
'#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D',
'#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12']
hashed = int(hash(layer_type)) % 5
if "conv" in layer_type.lower():
return COLORS[:5][hashed]
if layer_type in lasagne.layers.pool.__all__:
return COLORS[5:10][hashed]
if layer_type in lasagne.layers.recurrent.__all__:
return COLORS[10:15][hashed]
else:
return COLORS[15:20][hashed]
|
Determines the hex color for a layer.
:parameters:
- layer_type : string
Class name of the layer
:returns:
- color : string containing a hex color for filling block.
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/visualize.py#L270-L293
| null |
from itertools import product
from lasagne.layers import get_output
from lasagne.layers import get_output_shape
from lasagne.objectives import binary_crossentropy
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import io
import lasagne
def plot_loss(net):
train_loss = [row['train_loss'] for row in net.train_history_]
valid_loss = [row['valid_loss'] for row in net.train_history_]
plt.plot(train_loss, label='train loss')
plt.plot(valid_loss, label='valid loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
return plt
def plot_conv_weights(layer, figsize=(6, 6)):
"""Plot the weights of a specific layer.
Only really makes sense with convolutional layers.
Parameters
----------
layer : lasagne.layers.Layer
"""
W = layer.W.get_value()
shape = W.shape
nrows = np.ceil(np.sqrt(shape[0])).astype(int)
ncols = nrows
for feature_map in range(shape[1]):
figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[0]:
break
axes[r, c].imshow(W[i, feature_map], cmap='gray',
interpolation='none')
return plt
def plot_conv_activity(layer, x, figsize=(6, 8)):
"""Plot the acitivities of a specific layer.
Only really makes sense with layers that work 2D data (2D
convolutional layers, 2D pooling layers ...).
Parameters
----------
layer : lasagne.layers.Layer
x : numpy.ndarray
Only takes one sample at a time, i.e. x.shape[0] == 1.
"""
if x.shape[0] != 1:
raise ValueError("Only one sample can be plotted at a time.")
# compile theano function
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
shape = activity.shape
nrows = np.ceil(np.sqrt(shape[1])).astype(int)
ncols = nrows
figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False)
axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray',
interpolation='none')
axes[0, ncols // 2].set_title('original')
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[1]:
break
ndim = activity[0][i].ndim
if ndim != 2:
raise ValueError("Wrong number of dimensions, image data should "
"have 2, instead got {}".format(ndim))
axes[r + 1, c].imshow(-activity[0][i], cmap='gray',
interpolation='none')
return plt
def occlusion_heatmap(net, x, target, square_length=7):
"""An occlusion test that checks an image for its critical parts.
In this function, a square part of the image is occluded (i.e. set
to 0) and then the net is tested for its propensity to predict the
correct label. One should expect that this propensity shrinks of
critical parts of the image are occluded. If not, this indicates
overfitting.
Depending on the depth of the net and the size of the image, this
function may take awhile to finish, since one prediction for each
pixel of the image is made.
Currently, all color channels are occluded at the same time. Also,
this does not really work if images are randomly distorted by the
batch iterator.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
x : np.array
The input data, should be of shape (1, c, x, y). Only makes
sense with image data.
target : int
The true value of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
Results
-------
heat_array : np.array (with same size as image)
An 2D np.array that at each point (i, j) contains the predicted
probability of the correct class if the image is occluded by a
square with center (i, j).
"""
if (x.ndim != 4) or x.shape[0] != 1:
raise ValueError("This function requires the input data to be of "
"shape (1, c, x, y), instead got {}".format(x.shape))
if square_length % 2 == 0:
raise ValueError("Square length has to be an odd number, instead "
"got {}.".format(square_length))
num_classes = get_output_shape(net.layers_[-1])[1]
img = x[0].copy()
bs, col, s0, s1 = x.shape
heat_array = np.zeros((s0, s1))
pad = square_length // 2 + 1
x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype)
probs = np.zeros((s0, s1, num_classes))
# generate occluded images
for i in range(s0):
# batch s1 occluded images for faster prediction
for j in range(s1):
x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant')
x_pad[:, i:i + square_length, j:j + square_length] = 0.
x_occluded[j] = x_pad[:, pad:-pad, pad:-pad]
y_proba = net.predict_proba(x_occluded)
probs[i] = y_proba.reshape(s1, num_classes)
# from predicted probabilities, pick only those of target class
for i in range(s0):
for j in range(s1):
heat_array[i, j] = probs[i, j, target]
return heat_array
def _plot_heat_map(net, X, figsize, get_heat_image):
if (X.ndim != 4):
raise ValueError("This function requires the input data to be of "
"shape (b, c, x, y), instead got {}".format(X.shape))
num_images = X.shape[0]
if figsize[1] is None:
figsize = (figsize[0], num_images * figsize[0] / 3)
figs, axes = plt.subplots(num_images, 3, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for n in range(num_images):
heat_img = get_heat_image(net, X[n:n + 1, :, :, :], n)
ax = axes if num_images == 1 else axes[n]
img = X[n, :, :, :].mean(0)
ax[0].imshow(-img, interpolation='nearest', cmap='gray')
ax[0].set_title('image')
ax[1].imshow(-heat_img, interpolation='nearest', cmap='Reds')
ax[1].set_title('critical parts')
ax[2].imshow(-img, interpolation='nearest', cmap='gray')
ax[2].imshow(-heat_img, interpolation='nearest', cmap='Reds',
alpha=0.6)
ax[2].set_title('super-imposed')
return plt
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)):
"""Plot which parts of an image are particularly import for the
net to classify the image correctly.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
X : numpy.array
The input data, should be of shape (b, c, 0, 1). Only makes
sense with image data.
target : list or numpy.array of ints
The true values of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at. If more than one sample is passed to X, each of them needs
its own target.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
figsize : tuple (int, int)
Size of the figure.
Plots
-----
Figure with 3 subplots: the original image, the occlusion heatmap,
and both images super-imposed.
"""
return _plot_heat_map(
net, X, figsize, lambda net, X, n: occlusion_heatmap(
net, X, target[n], square_length))
def saliency_map(input, output, pred, X):
score = -binary_crossentropy(output[:, pred], np.array([1])).sum()
return np.abs(T.grad(score, input).eval({input: X}))
def saliency_map_net(net, X):
input = net.layers_[0].input_var
output = get_output(net.layers_[-1])
pred = output.eval({input: X}).argmax(axis=1)
return saliency_map(input, output, pred, X)[0].transpose(1, 2, 0).squeeze()
def plot_saliency(net, X, figsize=(9, None)):
return _plot_heat_map(
net, X, figsize, lambda net, X, n: -saliency_map_net(net, X))
def make_pydot_graph(layers, output_shape=True, verbose=False):
"""
:parameters:
- layers : list
List of the layers, as obtained from lasagne.layers.get_all_layers
- output_shape: (default `True`)
If `True`, the output shape of each layer will be displayed.
- verbose: (default `False`)
If `True`, layer attributes like filter shape, stride, etc.
will be displayed.
:returns:
- pydot_graph : PyDot object containing the graph
"""
import pydotplus as pydot
pydot_graph = pydot.Dot('Network', graph_type='digraph')
pydot_nodes = {}
pydot_edges = []
for i, layer in enumerate(layers):
layer_name = getattr(layer, 'name', None)
if layer_name is None:
layer_name = layer.__class__.__name__
layer_type = '{0}'.format(layer_name)
key = repr(layer)
label = layer_type
color = get_hex_color(layer_type)
if verbose:
for attr in ['num_filters', 'num_units', 'ds',
'filter_shape', 'stride', 'strides', 'p']:
if hasattr(layer, attr):
label += '\n{0}: {1}'.format(attr, getattr(layer, attr))
if hasattr(layer, 'nonlinearity'):
try:
nonlinearity = layer.nonlinearity.__name__
except AttributeError:
nonlinearity = layer.nonlinearity.__class__.__name__
label += '\nnonlinearity: {0}'.format(nonlinearity)
if output_shape:
label += '\nOutput shape: {0}'.format(layer.output_shape)
pydot_nodes[key] = pydot.Node(
key, label=label, shape='record', fillcolor=color, style='filled')
if hasattr(layer, 'input_layers'):
for input_layer in layer.input_layers:
pydot_edges.append([repr(input_layer), key])
if hasattr(layer, 'input_layer'):
pydot_edges.append([repr(layer.input_layer), key])
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edges in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]]))
return pydot_graph
def draw_to_file(layers, filename, **kwargs):
"""
Draws a network diagram to a file
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- filename : string
The filename to save output to
- **kwargs: see docstring of make_pydot_graph for other options
"""
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
ext = filename[filename.rfind('.') + 1:]
with io.open(filename, 'wb') as fid:
fid.write(dot.create(format=ext))
def draw_to_notebook(layers, **kwargs):
"""
Draws a network diagram in an IPython notebook
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- **kwargs : see the docstring of make_pydot_graph for other options
"""
from IPython.display import Image
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
return Image(dot.create_png())
|
dnouri/nolearn
|
nolearn/lasagne/visualize.py
|
make_pydot_graph
|
python
|
def make_pydot_graph(layers, output_shape=True, verbose=False):
import pydotplus as pydot
pydot_graph = pydot.Dot('Network', graph_type='digraph')
pydot_nodes = {}
pydot_edges = []
for i, layer in enumerate(layers):
layer_name = getattr(layer, 'name', None)
if layer_name is None:
layer_name = layer.__class__.__name__
layer_type = '{0}'.format(layer_name)
key = repr(layer)
label = layer_type
color = get_hex_color(layer_type)
if verbose:
for attr in ['num_filters', 'num_units', 'ds',
'filter_shape', 'stride', 'strides', 'p']:
if hasattr(layer, attr):
label += '\n{0}: {1}'.format(attr, getattr(layer, attr))
if hasattr(layer, 'nonlinearity'):
try:
nonlinearity = layer.nonlinearity.__name__
except AttributeError:
nonlinearity = layer.nonlinearity.__class__.__name__
label += '\nnonlinearity: {0}'.format(nonlinearity)
if output_shape:
label += '\nOutput shape: {0}'.format(layer.output_shape)
pydot_nodes[key] = pydot.Node(
key, label=label, shape='record', fillcolor=color, style='filled')
if hasattr(layer, 'input_layers'):
for input_layer in layer.input_layers:
pydot_edges.append([repr(input_layer), key])
if hasattr(layer, 'input_layer'):
pydot_edges.append([repr(layer.input_layer), key])
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edges in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]]))
return pydot_graph
|
:parameters:
- layers : list
List of the layers, as obtained from lasagne.layers.get_all_layers
- output_shape: (default `True`)
If `True`, the output shape of each layer will be displayed.
- verbose: (default `False`)
If `True`, layer attributes like filter shape, stride, etc.
will be displayed.
:returns:
- pydot_graph : PyDot object containing the graph
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/visualize.py#L296-L352
|
[
"def get_hex_color(layer_type):\n \"\"\"\n Determines the hex color for a layer.\n :parameters:\n - layer_type : string\n Class name of the layer\n :returns:\n - color : string containing a hex color for filling block.\n \"\"\"\n COLORS = ['#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B',\n '#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416',\n '#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D',\n '#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12']\n\n hashed = int(hash(layer_type)) % 5\n\n if \"conv\" in layer_type.lower():\n return COLORS[:5][hashed]\n if layer_type in lasagne.layers.pool.__all__:\n return COLORS[5:10][hashed]\n if layer_type in lasagne.layers.recurrent.__all__:\n return COLORS[10:15][hashed]\n else:\n return COLORS[15:20][hashed]\n"
] |
from itertools import product
from lasagne.layers import get_output
from lasagne.layers import get_output_shape
from lasagne.objectives import binary_crossentropy
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import io
import lasagne
def plot_loss(net):
train_loss = [row['train_loss'] for row in net.train_history_]
valid_loss = [row['valid_loss'] for row in net.train_history_]
plt.plot(train_loss, label='train loss')
plt.plot(valid_loss, label='valid loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
return plt
def plot_conv_weights(layer, figsize=(6, 6)):
"""Plot the weights of a specific layer.
Only really makes sense with convolutional layers.
Parameters
----------
layer : lasagne.layers.Layer
"""
W = layer.W.get_value()
shape = W.shape
nrows = np.ceil(np.sqrt(shape[0])).astype(int)
ncols = nrows
for feature_map in range(shape[1]):
figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[0]:
break
axes[r, c].imshow(W[i, feature_map], cmap='gray',
interpolation='none')
return plt
def plot_conv_activity(layer, x, figsize=(6, 8)):
"""Plot the acitivities of a specific layer.
Only really makes sense with layers that work 2D data (2D
convolutional layers, 2D pooling layers ...).
Parameters
----------
layer : lasagne.layers.Layer
x : numpy.ndarray
Only takes one sample at a time, i.e. x.shape[0] == 1.
"""
if x.shape[0] != 1:
raise ValueError("Only one sample can be plotted at a time.")
# compile theano function
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
shape = activity.shape
nrows = np.ceil(np.sqrt(shape[1])).astype(int)
ncols = nrows
figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False)
axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray',
interpolation='none')
axes[0, ncols // 2].set_title('original')
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[1]:
break
ndim = activity[0][i].ndim
if ndim != 2:
raise ValueError("Wrong number of dimensions, image data should "
"have 2, instead got {}".format(ndim))
axes[r + 1, c].imshow(-activity[0][i], cmap='gray',
interpolation='none')
return plt
def occlusion_heatmap(net, x, target, square_length=7):
"""An occlusion test that checks an image for its critical parts.
In this function, a square part of the image is occluded (i.e. set
to 0) and then the net is tested for its propensity to predict the
correct label. One should expect that this propensity shrinks of
critical parts of the image are occluded. If not, this indicates
overfitting.
Depending on the depth of the net and the size of the image, this
function may take awhile to finish, since one prediction for each
pixel of the image is made.
Currently, all color channels are occluded at the same time. Also,
this does not really work if images are randomly distorted by the
batch iterator.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
x : np.array
The input data, should be of shape (1, c, x, y). Only makes
sense with image data.
target : int
The true value of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
Results
-------
heat_array : np.array (with same size as image)
An 2D np.array that at each point (i, j) contains the predicted
probability of the correct class if the image is occluded by a
square with center (i, j).
"""
if (x.ndim != 4) or x.shape[0] != 1:
raise ValueError("This function requires the input data to be of "
"shape (1, c, x, y), instead got {}".format(x.shape))
if square_length % 2 == 0:
raise ValueError("Square length has to be an odd number, instead "
"got {}.".format(square_length))
num_classes = get_output_shape(net.layers_[-1])[1]
img = x[0].copy()
bs, col, s0, s1 = x.shape
heat_array = np.zeros((s0, s1))
pad = square_length // 2 + 1
x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype)
probs = np.zeros((s0, s1, num_classes))
# generate occluded images
for i in range(s0):
# batch s1 occluded images for faster prediction
for j in range(s1):
x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant')
x_pad[:, i:i + square_length, j:j + square_length] = 0.
x_occluded[j] = x_pad[:, pad:-pad, pad:-pad]
y_proba = net.predict_proba(x_occluded)
probs[i] = y_proba.reshape(s1, num_classes)
# from predicted probabilities, pick only those of target class
for i in range(s0):
for j in range(s1):
heat_array[i, j] = probs[i, j, target]
return heat_array
def _plot_heat_map(net, X, figsize, get_heat_image):
if (X.ndim != 4):
raise ValueError("This function requires the input data to be of "
"shape (b, c, x, y), instead got {}".format(X.shape))
num_images = X.shape[0]
if figsize[1] is None:
figsize = (figsize[0], num_images * figsize[0] / 3)
figs, axes = plt.subplots(num_images, 3, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for n in range(num_images):
heat_img = get_heat_image(net, X[n:n + 1, :, :, :], n)
ax = axes if num_images == 1 else axes[n]
img = X[n, :, :, :].mean(0)
ax[0].imshow(-img, interpolation='nearest', cmap='gray')
ax[0].set_title('image')
ax[1].imshow(-heat_img, interpolation='nearest', cmap='Reds')
ax[1].set_title('critical parts')
ax[2].imshow(-img, interpolation='nearest', cmap='gray')
ax[2].imshow(-heat_img, interpolation='nearest', cmap='Reds',
alpha=0.6)
ax[2].set_title('super-imposed')
return plt
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)):
"""Plot which parts of an image are particularly import for the
net to classify the image correctly.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
X : numpy.array
The input data, should be of shape (b, c, 0, 1). Only makes
sense with image data.
target : list or numpy.array of ints
The true values of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at. If more than one sample is passed to X, each of them needs
its own target.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
figsize : tuple (int, int)
Size of the figure.
Plots
-----
Figure with 3 subplots: the original image, the occlusion heatmap,
and both images super-imposed.
"""
return _plot_heat_map(
net, X, figsize, lambda net, X, n: occlusion_heatmap(
net, X, target[n], square_length))
def saliency_map(input, output, pred, X):
score = -binary_crossentropy(output[:, pred], np.array([1])).sum()
return np.abs(T.grad(score, input).eval({input: X}))
def saliency_map_net(net, X):
input = net.layers_[0].input_var
output = get_output(net.layers_[-1])
pred = output.eval({input: X}).argmax(axis=1)
return saliency_map(input, output, pred, X)[0].transpose(1, 2, 0).squeeze()
def plot_saliency(net, X, figsize=(9, None)):
return _plot_heat_map(
net, X, figsize, lambda net, X, n: -saliency_map_net(net, X))
def get_hex_color(layer_type):
"""
Determines the hex color for a layer.
:parameters:
- layer_type : string
Class name of the layer
:returns:
- color : string containing a hex color for filling block.
"""
COLORS = ['#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B',
'#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416',
'#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D',
'#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12']
hashed = int(hash(layer_type)) % 5
if "conv" in layer_type.lower():
return COLORS[:5][hashed]
if layer_type in lasagne.layers.pool.__all__:
return COLORS[5:10][hashed]
if layer_type in lasagne.layers.recurrent.__all__:
return COLORS[10:15][hashed]
else:
return COLORS[15:20][hashed]
def draw_to_file(layers, filename, **kwargs):
"""
Draws a network diagram to a file
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- filename : string
The filename to save output to
- **kwargs: see docstring of make_pydot_graph for other options
"""
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
ext = filename[filename.rfind('.') + 1:]
with io.open(filename, 'wb') as fid:
fid.write(dot.create(format=ext))
def draw_to_notebook(layers, **kwargs):
"""
Draws a network diagram in an IPython notebook
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- **kwargs : see the docstring of make_pydot_graph for other options
"""
from IPython.display import Image
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
return Image(dot.create_png())
|
dnouri/nolearn
|
nolearn/lasagne/visualize.py
|
draw_to_file
|
python
|
def draw_to_file(layers, filename, **kwargs):
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
ext = filename[filename.rfind('.') + 1:]
with io.open(filename, 'wb') as fid:
fid.write(dot.create(format=ext))
|
Draws a network diagram to a file
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- filename : string
The filename to save output to
- **kwargs: see docstring of make_pydot_graph for other options
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/visualize.py#L355-L370
|
[
"def make_pydot_graph(layers, output_shape=True, verbose=False):\n \"\"\"\n :parameters:\n - layers : list\n List of the layers, as obtained from lasagne.layers.get_all_layers\n - output_shape: (default `True`)\n If `True`, the output shape of each layer will be displayed.\n - verbose: (default `False`)\n If `True`, layer attributes like filter shape, stride, etc.\n will be displayed.\n :returns:\n - pydot_graph : PyDot object containing the graph\n \"\"\"\n import pydotplus as pydot\n pydot_graph = pydot.Dot('Network', graph_type='digraph')\n pydot_nodes = {}\n pydot_edges = []\n for i, layer in enumerate(layers):\n layer_name = getattr(layer, 'name', None)\n if layer_name is None:\n layer_name = layer.__class__.__name__\n layer_type = '{0}'.format(layer_name)\n key = repr(layer)\n label = layer_type\n color = get_hex_color(layer_type)\n if verbose:\n for attr in ['num_filters', 'num_units', 'ds',\n 'filter_shape', 'stride', 'strides', 'p']:\n if hasattr(layer, attr):\n label += '\\n{0}: {1}'.format(attr, getattr(layer, attr))\n if hasattr(layer, 'nonlinearity'):\n try:\n nonlinearity = layer.nonlinearity.__name__\n except AttributeError:\n nonlinearity = layer.nonlinearity.__class__.__name__\n label += '\\nnonlinearity: {0}'.format(nonlinearity)\n\n if output_shape:\n label += '\\nOutput shape: {0}'.format(layer.output_shape)\n\n pydot_nodes[key] = pydot.Node(\n key, label=label, shape='record', fillcolor=color, style='filled')\n\n if hasattr(layer, 'input_layers'):\n for input_layer in layer.input_layers:\n pydot_edges.append([repr(input_layer), key])\n\n if hasattr(layer, 'input_layer'):\n pydot_edges.append([repr(layer.input_layer), key])\n\n for node in pydot_nodes.values():\n pydot_graph.add_node(node)\n\n for edges in pydot_edges:\n pydot_graph.add_edge(\n pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]]))\n return pydot_graph\n"
] |
from itertools import product
from lasagne.layers import get_output
from lasagne.layers import get_output_shape
from lasagne.objectives import binary_crossentropy
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import io
import lasagne
def plot_loss(net):
train_loss = [row['train_loss'] for row in net.train_history_]
valid_loss = [row['valid_loss'] for row in net.train_history_]
plt.plot(train_loss, label='train loss')
plt.plot(valid_loss, label='valid loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
return plt
def plot_conv_weights(layer, figsize=(6, 6)):
"""Plot the weights of a specific layer.
Only really makes sense with convolutional layers.
Parameters
----------
layer : lasagne.layers.Layer
"""
W = layer.W.get_value()
shape = W.shape
nrows = np.ceil(np.sqrt(shape[0])).astype(int)
ncols = nrows
for feature_map in range(shape[1]):
figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[0]:
break
axes[r, c].imshow(W[i, feature_map], cmap='gray',
interpolation='none')
return plt
def plot_conv_activity(layer, x, figsize=(6, 8)):
"""Plot the acitivities of a specific layer.
Only really makes sense with layers that work 2D data (2D
convolutional layers, 2D pooling layers ...).
Parameters
----------
layer : lasagne.layers.Layer
x : numpy.ndarray
Only takes one sample at a time, i.e. x.shape[0] == 1.
"""
if x.shape[0] != 1:
raise ValueError("Only one sample can be plotted at a time.")
# compile theano function
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
shape = activity.shape
nrows = np.ceil(np.sqrt(shape[1])).astype(int)
ncols = nrows
figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False)
axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray',
interpolation='none')
axes[0, ncols // 2].set_title('original')
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[1]:
break
ndim = activity[0][i].ndim
if ndim != 2:
raise ValueError("Wrong number of dimensions, image data should "
"have 2, instead got {}".format(ndim))
axes[r + 1, c].imshow(-activity[0][i], cmap='gray',
interpolation='none')
return plt
def occlusion_heatmap(net, x, target, square_length=7):
"""An occlusion test that checks an image for its critical parts.
In this function, a square part of the image is occluded (i.e. set
to 0) and then the net is tested for its propensity to predict the
correct label. One should expect that this propensity shrinks of
critical parts of the image are occluded. If not, this indicates
overfitting.
Depending on the depth of the net and the size of the image, this
function may take awhile to finish, since one prediction for each
pixel of the image is made.
Currently, all color channels are occluded at the same time. Also,
this does not really work if images are randomly distorted by the
batch iterator.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
x : np.array
The input data, should be of shape (1, c, x, y). Only makes
sense with image data.
target : int
The true value of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
Results
-------
heat_array : np.array (with same size as image)
An 2D np.array that at each point (i, j) contains the predicted
probability of the correct class if the image is occluded by a
square with center (i, j).
"""
if (x.ndim != 4) or x.shape[0] != 1:
raise ValueError("This function requires the input data to be of "
"shape (1, c, x, y), instead got {}".format(x.shape))
if square_length % 2 == 0:
raise ValueError("Square length has to be an odd number, instead "
"got {}.".format(square_length))
num_classes = get_output_shape(net.layers_[-1])[1]
img = x[0].copy()
bs, col, s0, s1 = x.shape
heat_array = np.zeros((s0, s1))
pad = square_length // 2 + 1
x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype)
probs = np.zeros((s0, s1, num_classes))
# generate occluded images
for i in range(s0):
# batch s1 occluded images for faster prediction
for j in range(s1):
x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant')
x_pad[:, i:i + square_length, j:j + square_length] = 0.
x_occluded[j] = x_pad[:, pad:-pad, pad:-pad]
y_proba = net.predict_proba(x_occluded)
probs[i] = y_proba.reshape(s1, num_classes)
# from predicted probabilities, pick only those of target class
for i in range(s0):
for j in range(s1):
heat_array[i, j] = probs[i, j, target]
return heat_array
def _plot_heat_map(net, X, figsize, get_heat_image):
if (X.ndim != 4):
raise ValueError("This function requires the input data to be of "
"shape (b, c, x, y), instead got {}".format(X.shape))
num_images = X.shape[0]
if figsize[1] is None:
figsize = (figsize[0], num_images * figsize[0] / 3)
figs, axes = plt.subplots(num_images, 3, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for n in range(num_images):
heat_img = get_heat_image(net, X[n:n + 1, :, :, :], n)
ax = axes if num_images == 1 else axes[n]
img = X[n, :, :, :].mean(0)
ax[0].imshow(-img, interpolation='nearest', cmap='gray')
ax[0].set_title('image')
ax[1].imshow(-heat_img, interpolation='nearest', cmap='Reds')
ax[1].set_title('critical parts')
ax[2].imshow(-img, interpolation='nearest', cmap='gray')
ax[2].imshow(-heat_img, interpolation='nearest', cmap='Reds',
alpha=0.6)
ax[2].set_title('super-imposed')
return plt
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)):
"""Plot which parts of an image are particularly import for the
net to classify the image correctly.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
X : numpy.array
The input data, should be of shape (b, c, 0, 1). Only makes
sense with image data.
target : list or numpy.array of ints
The true values of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at. If more than one sample is passed to X, each of them needs
its own target.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
figsize : tuple (int, int)
Size of the figure.
Plots
-----
Figure with 3 subplots: the original image, the occlusion heatmap,
and both images super-imposed.
"""
return _plot_heat_map(
net, X, figsize, lambda net, X, n: occlusion_heatmap(
net, X, target[n], square_length))
def saliency_map(input, output, pred, X):
score = -binary_crossentropy(output[:, pred], np.array([1])).sum()
return np.abs(T.grad(score, input).eval({input: X}))
def saliency_map_net(net, X):
input = net.layers_[0].input_var
output = get_output(net.layers_[-1])
pred = output.eval({input: X}).argmax(axis=1)
return saliency_map(input, output, pred, X)[0].transpose(1, 2, 0).squeeze()
def plot_saliency(net, X, figsize=(9, None)):
return _plot_heat_map(
net, X, figsize, lambda net, X, n: -saliency_map_net(net, X))
def get_hex_color(layer_type):
"""
Determines the hex color for a layer.
:parameters:
- layer_type : string
Class name of the layer
:returns:
- color : string containing a hex color for filling block.
"""
COLORS = ['#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B',
'#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416',
'#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D',
'#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12']
hashed = int(hash(layer_type)) % 5
if "conv" in layer_type.lower():
return COLORS[:5][hashed]
if layer_type in lasagne.layers.pool.__all__:
return COLORS[5:10][hashed]
if layer_type in lasagne.layers.recurrent.__all__:
return COLORS[10:15][hashed]
else:
return COLORS[15:20][hashed]
def make_pydot_graph(layers, output_shape=True, verbose=False):
"""
:parameters:
- layers : list
List of the layers, as obtained from lasagne.layers.get_all_layers
- output_shape: (default `True`)
If `True`, the output shape of each layer will be displayed.
- verbose: (default `False`)
If `True`, layer attributes like filter shape, stride, etc.
will be displayed.
:returns:
- pydot_graph : PyDot object containing the graph
"""
import pydotplus as pydot
pydot_graph = pydot.Dot('Network', graph_type='digraph')
pydot_nodes = {}
pydot_edges = []
for i, layer in enumerate(layers):
layer_name = getattr(layer, 'name', None)
if layer_name is None:
layer_name = layer.__class__.__name__
layer_type = '{0}'.format(layer_name)
key = repr(layer)
label = layer_type
color = get_hex_color(layer_type)
if verbose:
for attr in ['num_filters', 'num_units', 'ds',
'filter_shape', 'stride', 'strides', 'p']:
if hasattr(layer, attr):
label += '\n{0}: {1}'.format(attr, getattr(layer, attr))
if hasattr(layer, 'nonlinearity'):
try:
nonlinearity = layer.nonlinearity.__name__
except AttributeError:
nonlinearity = layer.nonlinearity.__class__.__name__
label += '\nnonlinearity: {0}'.format(nonlinearity)
if output_shape:
label += '\nOutput shape: {0}'.format(layer.output_shape)
pydot_nodes[key] = pydot.Node(
key, label=label, shape='record', fillcolor=color, style='filled')
if hasattr(layer, 'input_layers'):
for input_layer in layer.input_layers:
pydot_edges.append([repr(input_layer), key])
if hasattr(layer, 'input_layer'):
pydot_edges.append([repr(layer.input_layer), key])
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edges in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]]))
return pydot_graph
def draw_to_notebook(layers, **kwargs):
"""
Draws a network diagram in an IPython notebook
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- **kwargs : see the docstring of make_pydot_graph for other options
"""
from IPython.display import Image
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
return Image(dot.create_png())
|
dnouri/nolearn
|
nolearn/lasagne/visualize.py
|
draw_to_notebook
|
python
|
def draw_to_notebook(layers, **kwargs):
from IPython.display import Image
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
return Image(dot.create_png())
|
Draws a network diagram in an IPython notebook
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- **kwargs : see the docstring of make_pydot_graph for other options
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/visualize.py#L373-L385
|
[
"def make_pydot_graph(layers, output_shape=True, verbose=False):\n \"\"\"\n :parameters:\n - layers : list\n List of the layers, as obtained from lasagne.layers.get_all_layers\n - output_shape: (default `True`)\n If `True`, the output shape of each layer will be displayed.\n - verbose: (default `False`)\n If `True`, layer attributes like filter shape, stride, etc.\n will be displayed.\n :returns:\n - pydot_graph : PyDot object containing the graph\n \"\"\"\n import pydotplus as pydot\n pydot_graph = pydot.Dot('Network', graph_type='digraph')\n pydot_nodes = {}\n pydot_edges = []\n for i, layer in enumerate(layers):\n layer_name = getattr(layer, 'name', None)\n if layer_name is None:\n layer_name = layer.__class__.__name__\n layer_type = '{0}'.format(layer_name)\n key = repr(layer)\n label = layer_type\n color = get_hex_color(layer_type)\n if verbose:\n for attr in ['num_filters', 'num_units', 'ds',\n 'filter_shape', 'stride', 'strides', 'p']:\n if hasattr(layer, attr):\n label += '\\n{0}: {1}'.format(attr, getattr(layer, attr))\n if hasattr(layer, 'nonlinearity'):\n try:\n nonlinearity = layer.nonlinearity.__name__\n except AttributeError:\n nonlinearity = layer.nonlinearity.__class__.__name__\n label += '\\nnonlinearity: {0}'.format(nonlinearity)\n\n if output_shape:\n label += '\\nOutput shape: {0}'.format(layer.output_shape)\n\n pydot_nodes[key] = pydot.Node(\n key, label=label, shape='record', fillcolor=color, style='filled')\n\n if hasattr(layer, 'input_layers'):\n for input_layer in layer.input_layers:\n pydot_edges.append([repr(input_layer), key])\n\n if hasattr(layer, 'input_layer'):\n pydot_edges.append([repr(layer.input_layer), key])\n\n for node in pydot_nodes.values():\n pydot_graph.add_node(node)\n\n for edges in pydot_edges:\n pydot_graph.add_edge(\n pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]]))\n return pydot_graph\n"
] |
from itertools import product
from lasagne.layers import get_output
from lasagne.layers import get_output_shape
from lasagne.objectives import binary_crossentropy
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import io
import lasagne
def plot_loss(net):
train_loss = [row['train_loss'] for row in net.train_history_]
valid_loss = [row['valid_loss'] for row in net.train_history_]
plt.plot(train_loss, label='train loss')
plt.plot(valid_loss, label='valid loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
return plt
def plot_conv_weights(layer, figsize=(6, 6)):
"""Plot the weights of a specific layer.
Only really makes sense with convolutional layers.
Parameters
----------
layer : lasagne.layers.Layer
"""
W = layer.W.get_value()
shape = W.shape
nrows = np.ceil(np.sqrt(shape[0])).astype(int)
ncols = nrows
for feature_map in range(shape[1]):
figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[0]:
break
axes[r, c].imshow(W[i, feature_map], cmap='gray',
interpolation='none')
return plt
def plot_conv_activity(layer, x, figsize=(6, 8)):
"""Plot the acitivities of a specific layer.
Only really makes sense with layers that work 2D data (2D
convolutional layers, 2D pooling layers ...).
Parameters
----------
layer : lasagne.layers.Layer
x : numpy.ndarray
Only takes one sample at a time, i.e. x.shape[0] == 1.
"""
if x.shape[0] != 1:
raise ValueError("Only one sample can be plotted at a time.")
# compile theano function
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
shape = activity.shape
nrows = np.ceil(np.sqrt(shape[1])).astype(int)
ncols = nrows
figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False)
axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray',
interpolation='none')
axes[0, ncols // 2].set_title('original')
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[1]:
break
ndim = activity[0][i].ndim
if ndim != 2:
raise ValueError("Wrong number of dimensions, image data should "
"have 2, instead got {}".format(ndim))
axes[r + 1, c].imshow(-activity[0][i], cmap='gray',
interpolation='none')
return plt
def occlusion_heatmap(net, x, target, square_length=7):
"""An occlusion test that checks an image for its critical parts.
In this function, a square part of the image is occluded (i.e. set
to 0) and then the net is tested for its propensity to predict the
correct label. One should expect that this propensity shrinks of
critical parts of the image are occluded. If not, this indicates
overfitting.
Depending on the depth of the net and the size of the image, this
function may take awhile to finish, since one prediction for each
pixel of the image is made.
Currently, all color channels are occluded at the same time. Also,
this does not really work if images are randomly distorted by the
batch iterator.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
x : np.array
The input data, should be of shape (1, c, x, y). Only makes
sense with image data.
target : int
The true value of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
Results
-------
heat_array : np.array (with same size as image)
An 2D np.array that at each point (i, j) contains the predicted
probability of the correct class if the image is occluded by a
square with center (i, j).
"""
if (x.ndim != 4) or x.shape[0] != 1:
raise ValueError("This function requires the input data to be of "
"shape (1, c, x, y), instead got {}".format(x.shape))
if square_length % 2 == 0:
raise ValueError("Square length has to be an odd number, instead "
"got {}.".format(square_length))
num_classes = get_output_shape(net.layers_[-1])[1]
img = x[0].copy()
bs, col, s0, s1 = x.shape
heat_array = np.zeros((s0, s1))
pad = square_length // 2 + 1
x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype)
probs = np.zeros((s0, s1, num_classes))
# generate occluded images
for i in range(s0):
# batch s1 occluded images for faster prediction
for j in range(s1):
x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant')
x_pad[:, i:i + square_length, j:j + square_length] = 0.
x_occluded[j] = x_pad[:, pad:-pad, pad:-pad]
y_proba = net.predict_proba(x_occluded)
probs[i] = y_proba.reshape(s1, num_classes)
# from predicted probabilities, pick only those of target class
for i in range(s0):
for j in range(s1):
heat_array[i, j] = probs[i, j, target]
return heat_array
def _plot_heat_map(net, X, figsize, get_heat_image):
if (X.ndim != 4):
raise ValueError("This function requires the input data to be of "
"shape (b, c, x, y), instead got {}".format(X.shape))
num_images = X.shape[0]
if figsize[1] is None:
figsize = (figsize[0], num_images * figsize[0] / 3)
figs, axes = plt.subplots(num_images, 3, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for n in range(num_images):
heat_img = get_heat_image(net, X[n:n + 1, :, :, :], n)
ax = axes if num_images == 1 else axes[n]
img = X[n, :, :, :].mean(0)
ax[0].imshow(-img, interpolation='nearest', cmap='gray')
ax[0].set_title('image')
ax[1].imshow(-heat_img, interpolation='nearest', cmap='Reds')
ax[1].set_title('critical parts')
ax[2].imshow(-img, interpolation='nearest', cmap='gray')
ax[2].imshow(-heat_img, interpolation='nearest', cmap='Reds',
alpha=0.6)
ax[2].set_title('super-imposed')
return plt
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)):
"""Plot which parts of an image are particularly import for the
net to classify the image correctly.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
X : numpy.array
The input data, should be of shape (b, c, 0, 1). Only makes
sense with image data.
target : list or numpy.array of ints
The true values of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at. If more than one sample is passed to X, each of them needs
its own target.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
figsize : tuple (int, int)
Size of the figure.
Plots
-----
Figure with 3 subplots: the original image, the occlusion heatmap,
and both images super-imposed.
"""
return _plot_heat_map(
net, X, figsize, lambda net, X, n: occlusion_heatmap(
net, X, target[n], square_length))
def saliency_map(input, output, pred, X):
score = -binary_crossentropy(output[:, pred], np.array([1])).sum()
return np.abs(T.grad(score, input).eval({input: X}))
def saliency_map_net(net, X):
input = net.layers_[0].input_var
output = get_output(net.layers_[-1])
pred = output.eval({input: X}).argmax(axis=1)
return saliency_map(input, output, pred, X)[0].transpose(1, 2, 0).squeeze()
def plot_saliency(net, X, figsize=(9, None)):
return _plot_heat_map(
net, X, figsize, lambda net, X, n: -saliency_map_net(net, X))
def get_hex_color(layer_type):
"""
Determines the hex color for a layer.
:parameters:
- layer_type : string
Class name of the layer
:returns:
- color : string containing a hex color for filling block.
"""
COLORS = ['#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B',
'#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416',
'#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D',
'#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12']
hashed = int(hash(layer_type)) % 5
if "conv" in layer_type.lower():
return COLORS[:5][hashed]
if layer_type in lasagne.layers.pool.__all__:
return COLORS[5:10][hashed]
if layer_type in lasagne.layers.recurrent.__all__:
return COLORS[10:15][hashed]
else:
return COLORS[15:20][hashed]
def make_pydot_graph(layers, output_shape=True, verbose=False):
"""
:parameters:
- layers : list
List of the layers, as obtained from lasagne.layers.get_all_layers
- output_shape: (default `True`)
If `True`, the output shape of each layer will be displayed.
- verbose: (default `False`)
If `True`, layer attributes like filter shape, stride, etc.
will be displayed.
:returns:
- pydot_graph : PyDot object containing the graph
"""
import pydotplus as pydot
pydot_graph = pydot.Dot('Network', graph_type='digraph')
pydot_nodes = {}
pydot_edges = []
for i, layer in enumerate(layers):
layer_name = getattr(layer, 'name', None)
if layer_name is None:
layer_name = layer.__class__.__name__
layer_type = '{0}'.format(layer_name)
key = repr(layer)
label = layer_type
color = get_hex_color(layer_type)
if verbose:
for attr in ['num_filters', 'num_units', 'ds',
'filter_shape', 'stride', 'strides', 'p']:
if hasattr(layer, attr):
label += '\n{0}: {1}'.format(attr, getattr(layer, attr))
if hasattr(layer, 'nonlinearity'):
try:
nonlinearity = layer.nonlinearity.__name__
except AttributeError:
nonlinearity = layer.nonlinearity.__class__.__name__
label += '\nnonlinearity: {0}'.format(nonlinearity)
if output_shape:
label += '\nOutput shape: {0}'.format(layer.output_shape)
pydot_nodes[key] = pydot.Node(
key, label=label, shape='record', fillcolor=color, style='filled')
if hasattr(layer, 'input_layers'):
for input_layer in layer.input_layers:
pydot_edges.append([repr(input_layer), key])
if hasattr(layer, 'input_layer'):
pydot_edges.append([repr(layer.input_layer), key])
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edges in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]]))
return pydot_graph
def draw_to_file(layers, filename, **kwargs):
"""
Draws a network diagram to a file
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- filename : string
The filename to save output to
- **kwargs: see docstring of make_pydot_graph for other options
"""
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
ext = filename[filename.rfind('.') + 1:]
with io.open(filename, 'wb') as fid:
fid.write(dot.create(format=ext))
|
dnouri/nolearn
|
nolearn/lasagne/util.py
|
get_real_filter
|
python
|
def get_real_filter(layers, img_size):
real_filter = np.zeros((len(layers), 2))
conv_mode = True
first_conv_layer = True
expon = np.ones((1, 2))
for i, layer in enumerate(layers[1:]):
j = i + 1
if not conv_mode:
real_filter[j] = img_size
continue
if is_conv2d(layer):
if not first_conv_layer:
new_filter = np.array(layer.filter_size) * expon
real_filter[j] = new_filter
else:
new_filter = np.array(layer.filter_size) * expon
real_filter[j] = new_filter
first_conv_layer = False
elif is_maxpool2d(layer):
real_filter[j] = real_filter[i]
expon *= np.array(layer.pool_size)
else:
conv_mode = False
real_filter[j] = img_size
real_filter[0] = img_size
return real_filter
|
Get the real filter sizes of each layer involved in
convoluation. See Xudong Cao:
https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code
This does not yet take into consideration feature pooling,
padding, striding and similar gimmicks.
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/util.py#L57-L91
|
[
"def is_conv2d(layers):\n if isinstance(layers, Layer):\n return isinstance(layers, tuple(convlayers))\n return any([isinstance(layer, tuple(convlayers))\n for layer in layers])\n",
"def is_maxpool2d(layers):\n if isinstance(layers, Layer):\n return isinstance(layers, tuple(maxpoollayers))\n return any([isinstance(layer, tuple(maxpoollayers))\n for layer in layers])\n"
] |
from functools import reduce
from operator import mul
from lasagne.layers import Layer
from lasagne.layers import Conv2DLayer
from lasagne.layers import MaxPool2DLayer
import numpy as np
from tabulate import tabulate
convlayers = [Conv2DLayer]
maxpoollayers = [MaxPool2DLayer]
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
from lasagne.layers.cuda_convnet import MaxPool2DCCLayer
convlayers.append(Conv2DCCLayer)
maxpoollayers.append(MaxPool2DCCLayer)
except ImportError:
pass
try:
from lasagne.layers.dnn import Conv2DDNNLayer
from lasagne.layers.dnn import MaxPool2DDNNLayer
convlayers.append(Conv2DDNNLayer)
maxpoollayers.append(MaxPool2DDNNLayer)
except ImportError:
pass
try:
from lasagne.layers.corrmm import Conv2DMMLayer
convlayers.append(Conv2DMMLayer)
except ImportError:
pass
class ansi:
BLUE = '\033[94m'
CYAN = '\033[36m'
GREEN = '\033[32m'
MAGENTA = '\033[35m'
RED = '\033[31m'
ENDC = '\033[0m'
def is_conv2d(layers):
if isinstance(layers, Layer):
return isinstance(layers, tuple(convlayers))
return any([isinstance(layer, tuple(convlayers))
for layer in layers])
def is_maxpool2d(layers):
if isinstance(layers, Layer):
return isinstance(layers, tuple(maxpoollayers))
return any([isinstance(layer, tuple(maxpoollayers))
for layer in layers])
def get_receptive_field(layers, img_size):
"""Get the real filter sizes of each layer involved in
convoluation. See Xudong Cao:
https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code
This does not yet take into consideration feature pooling,
padding, striding and similar gimmicks.
"""
receptive_field = np.zeros((len(layers), 2))
conv_mode = True
first_conv_layer = True
expon = np.ones((1, 2))
for i, layer in enumerate(layers[1:]):
j = i + 1
if not conv_mode:
receptive_field[j] = img_size
continue
if is_conv2d(layer):
if not first_conv_layer:
last_field = receptive_field[i]
new_field = (last_field + expon *
(np.array(layer.filter_size) - 1))
receptive_field[j] = new_field
else:
receptive_field[j] = layer.filter_size
first_conv_layer = False
elif is_maxpool2d(layer):
receptive_field[j] = receptive_field[i]
expon *= np.array(layer.pool_size)
else:
conv_mode = False
receptive_field[j] = img_size
receptive_field[0] = img_size
return receptive_field
def get_conv_infos(net, min_capacity=100. / 6, detailed=False):
CYA = ansi.CYAN
END = ansi.ENDC
MAG = ansi.MAGENTA
RED = ansi.RED
layers = net.layers_.values()
# assume that first layer is input layer
img_size = layers[0].output_shape[2:]
header = ['name', 'size', 'total', 'cap.Y', 'cap.X',
'cov.Y', 'cov.X']
if detailed:
header += ['filter Y', 'filter X', 'field Y', 'field X']
shapes = [layer.output_shape[1:] for layer in layers]
totals = [str(reduce(mul, shape)) if shape else '0' for shape in shapes]
shapes = ['x'.join(map(str, shape)) for shape in shapes]
shapes = np.array(shapes).reshape(-1, 1)
totals = np.array(totals).reshape(-1, 1)
real_filters = get_real_filter(layers, img_size)
receptive_fields = get_receptive_field(layers, img_size)
capacity = 100. * real_filters / receptive_fields
capacity[np.logical_not(np.isfinite(capacity))] = 1
img_coverage = 100. * receptive_fields / img_size
layer_names = [layer.name if layer.name
else str(layer).rsplit('.')[-1].split(' ')[0]
for layer in layers]
colored_names = []
for name, (covy, covx), (capy, capx) in zip(
layer_names, img_coverage, capacity):
if (
((covy > 100) or (covx > 100)) and
((capy < min_capacity) or (capx < min_capacity))
):
name = "{}{}{}".format(RED, name, END)
elif (covy > 100) or (covx > 100):
name = "{}{}{}".format(CYA, name, END)
elif (capy < min_capacity) or (capx < min_capacity):
name = "{}{}{}".format(MAG, name, END)
colored_names.append(name)
colored_names = np.array(colored_names).reshape(-1, 1)
table = np.hstack((colored_names, shapes, totals, capacity, img_coverage))
if detailed:
table = np.hstack((table, real_filters.astype(int),
receptive_fields.astype(int)))
return tabulate(table, header, floatfmt='.2f')
|
dnouri/nolearn
|
nolearn/lasagne/util.py
|
get_receptive_field
|
python
|
def get_receptive_field(layers, img_size):
receptive_field = np.zeros((len(layers), 2))
conv_mode = True
first_conv_layer = True
expon = np.ones((1, 2))
for i, layer in enumerate(layers[1:]):
j = i + 1
if not conv_mode:
receptive_field[j] = img_size
continue
if is_conv2d(layer):
if not first_conv_layer:
last_field = receptive_field[i]
new_field = (last_field + expon *
(np.array(layer.filter_size) - 1))
receptive_field[j] = new_field
else:
receptive_field[j] = layer.filter_size
first_conv_layer = False
elif is_maxpool2d(layer):
receptive_field[j] = receptive_field[i]
expon *= np.array(layer.pool_size)
else:
conv_mode = False
receptive_field[j] = img_size
receptive_field[0] = img_size
return receptive_field
|
Get the real filter sizes of each layer involved in
convoluation. See Xudong Cao:
https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code
This does not yet take into consideration feature pooling,
padding, striding and similar gimmicks.
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/util.py#L94-L129
|
[
"def is_conv2d(layers):\n if isinstance(layers, Layer):\n return isinstance(layers, tuple(convlayers))\n return any([isinstance(layer, tuple(convlayers))\n for layer in layers])\n",
"def is_maxpool2d(layers):\n if isinstance(layers, Layer):\n return isinstance(layers, tuple(maxpoollayers))\n return any([isinstance(layer, tuple(maxpoollayers))\n for layer in layers])\n"
] |
from functools import reduce
from operator import mul
from lasagne.layers import Layer
from lasagne.layers import Conv2DLayer
from lasagne.layers import MaxPool2DLayer
import numpy as np
from tabulate import tabulate
convlayers = [Conv2DLayer]
maxpoollayers = [MaxPool2DLayer]
try:
from lasagne.layers.cuda_convnet import Conv2DCCLayer
from lasagne.layers.cuda_convnet import MaxPool2DCCLayer
convlayers.append(Conv2DCCLayer)
maxpoollayers.append(MaxPool2DCCLayer)
except ImportError:
pass
try:
from lasagne.layers.dnn import Conv2DDNNLayer
from lasagne.layers.dnn import MaxPool2DDNNLayer
convlayers.append(Conv2DDNNLayer)
maxpoollayers.append(MaxPool2DDNNLayer)
except ImportError:
pass
try:
from lasagne.layers.corrmm import Conv2DMMLayer
convlayers.append(Conv2DMMLayer)
except ImportError:
pass
class ansi:
BLUE = '\033[94m'
CYAN = '\033[36m'
GREEN = '\033[32m'
MAGENTA = '\033[35m'
RED = '\033[31m'
ENDC = '\033[0m'
def is_conv2d(layers):
if isinstance(layers, Layer):
return isinstance(layers, tuple(convlayers))
return any([isinstance(layer, tuple(convlayers))
for layer in layers])
def is_maxpool2d(layers):
if isinstance(layers, Layer):
return isinstance(layers, tuple(maxpoollayers))
return any([isinstance(layer, tuple(maxpoollayers))
for layer in layers])
def get_real_filter(layers, img_size):
"""Get the real filter sizes of each layer involved in
convoluation. See Xudong Cao:
https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code
This does not yet take into consideration feature pooling,
padding, striding and similar gimmicks.
"""
real_filter = np.zeros((len(layers), 2))
conv_mode = True
first_conv_layer = True
expon = np.ones((1, 2))
for i, layer in enumerate(layers[1:]):
j = i + 1
if not conv_mode:
real_filter[j] = img_size
continue
if is_conv2d(layer):
if not first_conv_layer:
new_filter = np.array(layer.filter_size) * expon
real_filter[j] = new_filter
else:
new_filter = np.array(layer.filter_size) * expon
real_filter[j] = new_filter
first_conv_layer = False
elif is_maxpool2d(layer):
real_filter[j] = real_filter[i]
expon *= np.array(layer.pool_size)
else:
conv_mode = False
real_filter[j] = img_size
real_filter[0] = img_size
return real_filter
def get_conv_infos(net, min_capacity=100. / 6, detailed=False):
CYA = ansi.CYAN
END = ansi.ENDC
MAG = ansi.MAGENTA
RED = ansi.RED
layers = net.layers_.values()
# assume that first layer is input layer
img_size = layers[0].output_shape[2:]
header = ['name', 'size', 'total', 'cap.Y', 'cap.X',
'cov.Y', 'cov.X']
if detailed:
header += ['filter Y', 'filter X', 'field Y', 'field X']
shapes = [layer.output_shape[1:] for layer in layers]
totals = [str(reduce(mul, shape)) if shape else '0' for shape in shapes]
shapes = ['x'.join(map(str, shape)) for shape in shapes]
shapes = np.array(shapes).reshape(-1, 1)
totals = np.array(totals).reshape(-1, 1)
real_filters = get_real_filter(layers, img_size)
receptive_fields = get_receptive_field(layers, img_size)
capacity = 100. * real_filters / receptive_fields
capacity[np.logical_not(np.isfinite(capacity))] = 1
img_coverage = 100. * receptive_fields / img_size
layer_names = [layer.name if layer.name
else str(layer).rsplit('.')[-1].split(' ')[0]
for layer in layers]
colored_names = []
for name, (covy, covx), (capy, capx) in zip(
layer_names, img_coverage, capacity):
if (
((covy > 100) or (covx > 100)) and
((capy < min_capacity) or (capx < min_capacity))
):
name = "{}{}{}".format(RED, name, END)
elif (covy > 100) or (covx > 100):
name = "{}{}{}".format(CYA, name, END)
elif (capy < min_capacity) or (capx < min_capacity):
name = "{}{}{}".format(MAG, name, END)
colored_names.append(name)
colored_names = np.array(colored_names).reshape(-1, 1)
table = np.hstack((colored_names, shapes, totals, capacity, img_coverage))
if detailed:
table = np.hstack((table, real_filters.astype(int),
receptive_fields.astype(int)))
return tabulate(table, header, floatfmt='.2f')
|
dnouri/nolearn
|
nolearn/decaf.py
|
ConvNetFeatures.prepare_image
|
python
|
def prepare_image(self, image):
from decaf.util import transform # soft dep
_JEFFNET_FLIP = True
# first, extract the 256x256 center.
image = transform.scale_and_extract(transform.as_rgb(image), 256)
# convert to [0,255] float32
image = image.astype(np.float32) * 255.
if _JEFFNET_FLIP:
# Flip the image if necessary, maintaining the c_contiguous order
image = image[::-1, :].copy()
# subtract the mean
image -= self.net_._data_mean
return image
|
Returns image of shape `(256, 256, 3)`, as expected by
`transform` when `classify_direct = True`.
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/decaf.py#L138-L154
| null |
class ConvNetFeatures(BaseEstimator):
"""Extract features from images using a pretrained ConvNet.
Based on Yangqing Jia and Jeff Donahue's `DeCAF
<https://github.com/UCB-ICSI-Vision-Group/decaf-release/wiki>`_.
Please make sure you read and accept DeCAF's license before you
use this class.
If ``classify_direct=False``, expects its input X to be a list of
image filenames or arrays as produced by
`np.array(Image.open(filename))`.
"""
verbose = 0
def __init__(
self,
feature_layer='fc7_cudanet_out',
pretrained_params='imagenet.decafnet.epoch90',
pretrained_meta='imagenet.decafnet.meta',
center_only=True,
classify_direct=False,
verbose=0,
):
"""
:param feature_layer: The ConvNet layer that's used for
feature extraction. Defaults to
`fc7_cudanet_out`. A description of all
available layers for the
ImageNet-1k-pretrained ConvNet is found
in the DeCAF wiki. They are:
- `pool5_cudanet_out`
- `fc6_cudanet_out`
- `fc6_neuron_cudanet_out`
- `fc7_cudanet_out`
- `fc7_neuron_cudanet_out`
- `probs_cudanet_out`
:param pretrained_params: This must point to the file with the
pretrained parameters. Defaults to
`imagenet.decafnet.epoch90`. For
the ImageNet-1k-pretrained ConvNet
this file can be obtained from here:
http://www.eecs.berkeley.edu/~jiayq/decaf_pretrained/
:param pretrained_meta: Similar to `pretrained_params`, this
must file to the file with the
pretrained parameters' metadata.
Defaults to `imagenet.decafnet.meta`.
:param center_only: Use the center patch of the image only
when extracting features. If `False`, use
four corners, the image center and flipped
variants and average a total of 10 feature
vectors, which will usually yield better
results. Defaults to `True`.
:param classify_direct: When `True`, assume that input X is an
array of shape (num x 256 x 256 x 3)
as returned by `prepare_image`.
"""
self.feature_layer = feature_layer
self.pretrained_params = pretrained_params
self.pretrained_meta = pretrained_meta
self.center_only = center_only
self.classify_direct = classify_direct
self.net_ = None
if (not os.path.exists(pretrained_params) or
not os.path.exists(pretrained_meta)):
raise ValueError(
"Pre-trained ConvNet parameters not found. You may"
"need to download the files from "
"http://www.eecs.berkeley.edu/~jiayq/decaf_pretrained/ and "
"pass the path to the two files as `pretrained_params` and "
"`pretrained_meta` to the `{}` estimator.".format(
self.__class__.__name__))
def fit(self, X=None, y=None):
from decaf.scripts.imagenet import DecafNet # soft dep
if self.net_ is None:
self.net_ = DecafNet(
self.pretrained_params,
self.pretrained_meta,
)
return self
@cache.cached(_transform_cache_key)
def transform(self, X):
features = []
for img in X:
if self.classify_direct:
images = self.net_.oversample(
img, center_only=self.center_only)
self.net_.classify_direct(images)
else:
if isinstance(img, str):
import Image # soft dep
img = np.array(Image.open(img))
self.net_.classify(img, center_only=self.center_only)
feat = None
for layer in self.feature_layer.split(','):
val = self.net_.feature(layer)
if feat is None:
feat = val
else:
feat = np.hstack([feat, val])
if not self.center_only:
feat = feat.flatten()
features.append(feat)
if self.verbose:
sys.stdout.write(
"\r[ConvNet] %d%%" % (100. * len(features) / len(X)))
sys.stdout.flush()
if self.verbose:
sys.stdout.write('\n')
return np.vstack(features)
|
dnouri/nolearn
|
nolearn/metrics.py
|
multiclass_logloss
|
python
|
def multiclass_logloss(actual, predicted, eps=1e-15):
# Convert 'actual' to a binary array if it's not already:
if len(actual.shape) == 1:
actual2 = np.zeros((actual.shape[0], predicted.shape[1]))
for i, val in enumerate(actual):
actual2[i, val] = 1
actual = actual2
clip = np.clip(predicted, eps, 1 - eps)
rows = actual.shape[0]
vsota = np.sum(actual * np.log(clip))
return -1.0 / rows * vsota
|
Multi class version of Logarithmic Loss metric.
:param actual: Array containing the actual target classes
:param predicted: Matrix with class predictions, one probability per class
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/metrics.py#L8-L24
| null |
from __future__ import print_function
import numpy as np
from sklearn.base import clone
from sklearn.metrics import f1_score
class LearningCurve(object):
score_func = staticmethod(f1_score)
def __init__(self, score_func=None):
if score_func is None:
score_func = self.score_func
self.score_func = score_func
def predict(self, clf, X):
return clf.predict(X)
def __call__(self, dataset, classifier, steps=10,
verbose=0, random_state=42):
"""Create a learning curve that uses more training cases with
each step.
:param dataset: Dataset to work with
:type dataset: :class:`~nolearn.dataset.Dataset`
:param classifier: Classifier for fitting and making predictions.
:type classifier: :class:`~sklearn.base.BaseEstimator`
:param steps: Number of steps in the learning curve.
:type steps: int
:result: 3-tuple with lists `scores_train`, `scores_test`, `sizes`
Drawing the resulting learning curve can be done like this:
.. code-block:: python
dataset = Dataset()
clf = LogisticRegression()
scores_train, scores_test, sizes = learning_curve(dataset, clf)
pl.plot(sizes, scores_train, 'b', label='training set')
pl.plot(sizes, scores_test, 'r', label='test set')
pl.legend(loc='lower right')
pl.show()
"""
X_train, X_test, y_train, y_test = dataset.train_test_split()
scores_train = []
scores_test = []
sizes = []
if verbose:
print(" n train test")
for frac in np.linspace(0.1, 1.0, num=steps):
frac_size = int(X_train.shape[0] * frac)
sizes.append(frac_size)
X_train1 = X_train[:frac_size]
y_train1 = y_train[:frac_size]
clf = clone(classifier)
clf.fit(X_train1, y_train1)
predict_train = self.predict(clf, X_train1)
predict_test = self.predict(clf, X_test)
score_train = self.score_func(y_train1, predict_train)
score_test = self.score_func(y_test, predict_test)
scores_train.append(score_train)
scores_test.append(score_test)
if verbose:
print(" %8d %0.4f %0.4f" % (
frac_size, score_train, score_test))
return scores_train, scores_test, sizes
class LearningCurveProbas(LearningCurve):
score_func = staticmethod(multiclass_logloss)
def predict(self, clf, X):
return clf.predict_proba(X)
learning_curve = LearningCurve().__call__
#: Same as :func:`learning_curve` but uses :func:`multiclass_logloss`
#: as the loss funtion.
learning_curve_logloss = LearningCurveProbas().__call__
|
dnouri/nolearn
|
nolearn/lasagne/base.py
|
objective
|
python
|
def objective(layers,
loss_function,
target,
aggregate=aggregate,
deterministic=False,
l1=0,
l2=0,
get_output_kw=None):
if get_output_kw is None:
get_output_kw = {}
output_layer = layers[-1]
network_output = get_output(
output_layer, deterministic=deterministic, **get_output_kw)
loss = aggregate(loss_function(network_output, target))
if l1:
loss += regularization.regularize_layer_params(
layers.values(), regularization.l1) * l1
if l2:
loss += regularization.regularize_layer_params(
layers.values(), regularization.l2) * l2
return loss
|
Default implementation of the NeuralNet objective.
:param layers: The underlying layers of the NeuralNetwork
:param loss_function: The callable loss function to use
:param target: the expected output
:param aggregate: the aggregation function to use
:param deterministic: Whether or not to get a deterministic output
:param l1: Optional l1 regularization parameter
:param l2: Optional l2 regularization parameter
:param get_output_kw: optional kwargs to pass to
:meth:`NeuralNetwork.get_output`
:return: The total calculated loss
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/base.py#L166-L202
| null |
from __future__ import absolute_import
from .._compat import basestring
from .._compat import chain_exception
from .._compat import pickle
from collections import OrderedDict, Iterable
import itertools
from pydoc import locate
from warnings import warn
from time import time
from lasagne.layers import get_all_layers
from lasagne.layers import get_output
from lasagne.layers import InputLayer
from lasagne.layers import Layer
from lasagne import regularization
from lasagne.objectives import aggregate
from lasagne.objectives import categorical_crossentropy
from lasagne.objectives import squared_error
from lasagne.updates import nesterov_momentum
from lasagne.utils import floatX
from lasagne.utils import unique
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.cross_validation import KFold
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
from sklearn.preprocessing import LabelEncoder
import theano
from theano import tensor as T
from . import PrintLog
from . import PrintLayerInfo
class _list(list):
pass
class _dict(dict):
def __contains__(self, key):
return True
def _sldict(arr, sl):
if isinstance(arr, dict):
return {k: v[sl] for k, v in arr.items()}
else:
return arr[sl]
def _shuffle_arrays(arrays, random):
rstate = random.get_state()
for array in arrays:
if isinstance(array, dict):
for v in list(array.values()):
random.set_state(rstate)
random.shuffle(v)
else:
random.set_state(rstate)
random.shuffle(array)
class Layers(OrderedDict):
def __getitem__(self, key):
if isinstance(key, int):
return list(self.values()).__getitem__(key)
elif isinstance(key, slice):
items = list(self.items()).__getitem__(key)
return Layers(items)
else:
return super(Layers, self).__getitem__(key)
def keys(self):
return list(super(Layers, self).keys())
def values(self):
return list(super(Layers, self).values())
class BatchIterator(object):
def __init__(self, batch_size, shuffle=False, seed=42):
self.batch_size = batch_size
self.shuffle = shuffle
self.random = np.random.RandomState(seed)
def __call__(self, X, y=None):
if self.shuffle:
_shuffle_arrays([X, y] if y is not None else [X], self.random)
self.X, self.y = X, y
return self
def __iter__(self):
bs = self.batch_size
for i in range((self.n_samples + bs - 1) // bs):
sl = slice(i * bs, (i + 1) * bs)
Xb = _sldict(self.X, sl)
if self.y is not None:
yb = _sldict(self.y, sl)
else:
yb = None
yield self.transform(Xb, yb)
@property
def n_samples(self):
X = self.X
if isinstance(X, dict):
return len(list(X.values())[0])
else:
return len(X)
def transform(self, Xb, yb):
return Xb, yb
def __getstate__(self):
state = dict(self.__dict__)
for attr in ('X', 'y',):
if attr in state:
del state[attr]
return state
def grad_scale(layer, scale):
for param in layer.get_params(trainable=True):
param.tag.grad_scale = floatX(scale)
return layer
class TrainSplit(object):
def __init__(self, eval_size, stratify=True, shuffle=False, seed=42):
self.eval_size = eval_size
self.stratify = stratify
self.shuffle = shuffle
self.random = np.random.RandomState(seed)
def __call__(self, X, y, net):
if self.shuffle:
_shuffle_arrays([X, y] if y is not None else [X], self.random)
if self.eval_size:
if net.regression or not self.stratify:
kf = KFold(y.shape[0], round(1. / self.eval_size))
else:
kf = StratifiedKFold(y, round(1. / self.eval_size))
train_indices, valid_indices = next(iter(kf))
X_train = _sldict(X, train_indices)
y_train = _sldict(y, train_indices)
X_valid = _sldict(X, valid_indices)
y_valid = _sldict(y, valid_indices)
else:
X_train, y_train = X, y
X_valid, y_valid = _sldict(X, slice(0,0)), _sldict(y, slice(0,0))
return X_train, X_valid, y_train, y_valid
class LegacyTrainTestSplit(object): # BBB
def __init__(self, eval_size=0.2):
self.eval_size = eval_size
def __call__(self, X, y, net):
return net.train_test_split(X, y, self.eval_size)
class NeuralNet(BaseEstimator):
"""A configurable Neural Network estimator based on Lasagne.
Compatible with scikit-learn estimators.
Attributes
----------
train_history_:
A list of network training info for each epoch.
Each index contains a dictionary with the following keys
* epoch - The epoch number
* train_loss_best - True if this epoch had the best training loss so far
* valid_loss_best - True if this epoch had the best validation loss so far
* train_loss - The training loss for this epoch
* valid_loss - The validation loss for this epoch
* valid_accuracy - The validation accuracy for this epoch
layers_: A dictionary of lasagne layers keyed by the layer's name, or the layer's index
layer_reference_params:
A list of Lasagne layer parameter names that may reference
other layers, excluding 'incoming' and 'incomings'.
"""
layer_reference_params = ['mask_input']
def __init__(
self,
layers,
update=nesterov_momentum,
loss=None, # BBB
objective=objective,
objective_loss_function=None,
batch_iterator_train=BatchIterator(batch_size=128),
batch_iterator_test=BatchIterator(batch_size=128),
regression=False,
max_epochs=100,
train_split=TrainSplit(eval_size=0.2),
custom_scores=None,
scores_train=None,
scores_valid=None,
X_tensor_type=None,
y_tensor_type=None,
use_label_encoder=False,
on_batch_finished=None,
on_epoch_finished=None,
on_training_started=None,
on_training_finished=None,
more_params=None,
check_input=True,
verbose=0,
**kwargs
):
"""
Initialize a Neural Network
Parameters
----------
layers:
A list of lasagne layers to compose into the final neural net.
See :ref:`layer-def`
update:
The update function to use when training. Uses the form
provided by the :mod:`lasagne.updates` implementations.
objective:
The objective function to use when training. The callable
will be passed the NeuralNetwork's :attr:`.layers_`
attribute as the first argument, and the output target as
the second argument.
max_epochs:
The number of epochs to train. This is used as the
default when calling the :meth:`.fit` method without an
epochs argument.
Other Parameters
----------------
batch_iterator_train:
The sample iterator to use while training the network.
batch_iterator_test:
The sample Iterator to use while testing and validating
the network.
regression:
Whether or not this is a regressor network. Determines
the default objective and scoring functions.
train_split:
The method used to separate training and validation
samples. See :class:`TrainSplit` for the default
implementation.
y_tensor_type:
The type of tensor to use to hold the network's output.
Typically ``T.ivector`` (the default) for classification
tasks.
on_training_started, on_batch_finished, on_epoch_finished,
on_training_finished:
A list of functions which are called during training at
the corresponding times.
The functions will be passed the NeuralNet as the first
parameter and its :attr:`.train_history_` attribute as the
second parameter.
custom_scores:
A list of callable custom scoring functions.
The functions will be passed the expected y values as the
first argument, and the predicted y_values as the second
argument.
use_label_encoder:
If true, all y_values will be encoded using a
:class:`sklearn.preprocessing.LabelEncoder` instance.
verbose:
The verbosity level of the network.
Any non-zero value will cause the network to print the
layer info at the start of training, as well as print a
log of the training history after each epoch. Larger
values will increase the amount of info shown.
more_params:
A set of more parameters to use when initializing layers
defined using the dictionary method.
Note
----
* Extra arguments can be passed to the call to the *update*
function by prepending the string ``update_`` to the
corresponding argument name,
e.g. ``update_learning_rate=0.01`` will define the
``learning_rate`` parameter of the update function.
* Extra arguments can be provided to the objective call
through the Neural Network by prepending the string
``objective_`` to the corresponding argument name.
"""
if loss is not None:
raise ValueError(
"The 'loss' parameter was removed, please use "
"'objective_loss_function' instead.") # BBB
if hasattr(objective, 'get_loss'):
raise ValueError(
"The 'Objective' class is no longer supported, please "
"use 'nolearn.lasagne.objective' or similar.") # BBB
if objective_loss_function is None:
objective_loss_function = (
squared_error if regression else categorical_crossentropy)
if hasattr(self, 'train_test_split'): # BBB
warn("The 'train_test_split' method has been deprecated, please "
"use the 'train_split' parameter instead.")
train_split = LegacyTrainTestSplit(
eval_size=kwargs.pop('eval_size', 0.2))
if 'eval_size' in kwargs: # BBB
warn("The 'eval_size' argument has been deprecated, please use "
"the 'train_split' parameter instead, e.g.\n"
"train_split=TrainSplit(eval_size=0.4)")
train_split.eval_size = kwargs.pop('eval_size')
if y_tensor_type is None:
if regression:
y_tensor_type = T.TensorType(
theano.config.floatX, (False, False))
else:
y_tensor_type = T.ivector
if X_tensor_type is not None:
raise ValueError(
"The 'X_tensor_type' parameter has been removed. "
"It's unnecessary.") # BBB
if 'custom_score' in kwargs:
warn("The 'custom_score' argument has been deprecated, please use "
"the 'custom_scores' parameter instead, which is just "
"a list of custom scores e.g.\n"
"custom_scores=[('first output', lambda y1, y2: abs(y1[0,0]-y2[0,0])), ('second output', lambda y1,y2: abs(y1[0,1]-y2[0,1]))]")
# add it to custom_scores
if custom_scores is None:
custom_scores = [kwargs.pop('custom_score')]
else:
custom_scores.append(kwargs.pop('custom_score'))
if isinstance(layers, Layer):
layers = _list([layers])
elif isinstance(layers, Iterable):
layers = _list(layers)
self.layers = layers
self.update = update
self.objective = objective
self.objective_loss_function = objective_loss_function
self.batch_iterator_train = batch_iterator_train
self.batch_iterator_test = batch_iterator_test
self.regression = regression
self.max_epochs = max_epochs
self.train_split = train_split
self.custom_scores = custom_scores
self.scores_train = scores_train or []
self.scores_valid = scores_valid or []
self.y_tensor_type = y_tensor_type
self.use_label_encoder = use_label_encoder
self.on_batch_finished = on_batch_finished or []
self.on_epoch_finished = on_epoch_finished or []
self.on_training_started = on_training_started or []
self.on_training_finished = on_training_finished or []
self.more_params = more_params or {}
self.check_input = check_input
self.verbose = verbose
if self.verbose:
# XXX: PrintLog should come before any other handlers,
# because early stopping will otherwise cause the last
# line not to be printed
self.on_epoch_finished.append(PrintLog())
self.on_training_started.append(PrintLayerInfo())
for key in kwargs.keys():
assert not hasattr(self, key)
vars(self).update(kwargs)
self._kwarg_keys = list(kwargs.keys())
self.train_history_ = []
if 'batch_iterator' in kwargs: # BBB
raise ValueError(
"The 'batch_iterator' argument has been replaced. "
"Use 'batch_iterator_train' and 'batch_iterator_test' instead."
)
def _check_for_unused_kwargs(self):
names = self.layers_.keys() + ['update', 'objective']
for k in self._kwarg_keys:
for n in names:
prefix = '{}_'.format(n)
if k.startswith(prefix):
break
else:
raise ValueError("Unused kwarg: {}".format(k))
def _check_good_input(self, X, y=None):
if isinstance(X, dict):
lengths = [len(X1) for X1 in X.values()]
if len(set(lengths)) > 1:
raise ValueError("Not all values of X are of equal length.")
x_len = lengths[0]
else:
x_len = len(X)
if y is not None:
if len(y) != x_len:
raise ValueError("X and y are not of equal length.")
if self.regression and y is not None and y.ndim == 1:
y = y.reshape(-1, 1)
return X, y
def initialize(self):
"""Initializes the network. Checks that no extra kwargs were
passed to the constructor, and compiles the train, predict,
and evaluation functions.
Subsequent calls to this function will return without any action.
"""
if getattr(self, '_initialized', False):
return
out = getattr(self, '_output_layers', None)
if out is None:
self.initialize_layers()
self._check_for_unused_kwargs()
iter_funcs = self._create_iter_funcs(
self.layers_, self.objective, self.update,
self.y_tensor_type,
)
self.train_iter_, self.eval_iter_, self.predict_iter_ = iter_funcs
self._initialized = True
def _get_params_for(self, name):
collected = {}
prefix = '{}_'.format(name)
params = vars(self)
more_params = self.more_params
for key, value in itertools.chain(params.items(), more_params.items()):
if key.startswith(prefix):
collected[key[len(prefix):]] = value
return collected
def _layer_name(self, layer_class, index):
return "{}{}".format(
layer_class.__name__.lower().replace("layer", ""), index)
def initialize_layers(self, layers=None):
"""Sets up the Lasagne layers
:param layers: The dictionary of layers, or a
:class:`lasagne.Layers` instance, describing the underlying
network
:return: the output layer of the underlying lasagne network.
:seealso: :ref:`layer-def`
"""
if layers is not None:
self.layers = layers
self.layers_ = Layers()
#If a Layer, or a list of Layers was passed in
if isinstance(self.layers[0], Layer):
for out_layer in self.layers:
for i, layer in enumerate(get_all_layers(out_layer)):
if layer not in self.layers_.values():
name = layer.name or self._layer_name(layer.__class__, i)
self.layers_[name] = layer
if self._get_params_for(name) != {}:
raise ValueError(
"You can't use keyword params when passing a Lasagne "
"instance object as the 'layers' parameter of "
"'NeuralNet'."
)
self._output_layers = self.layers
return self.layers
# 'self.layers' are a list of '(Layer class, kwargs)', so
# we'll have to actually instantiate the layers given the
# arguments:
layer = None
for i, layer_def in enumerate(self.layers):
if isinstance(layer_def[1], dict):
# Newer format: (Layer, {'layer': 'kwargs'})
layer_factory, layer_kw = layer_def
layer_kw = layer_kw.copy()
else:
# The legacy format: ('name', Layer)
layer_name, layer_factory = layer_def
layer_kw = {'name': layer_name}
if isinstance(layer_factory, str):
layer_factory = locate(layer_factory)
assert layer_factory is not None
if 'name' not in layer_kw:
layer_kw['name'] = self._layer_name(layer_factory, i)
more_params = self._get_params_for(layer_kw['name'])
layer_kw.update(more_params)
if layer_kw['name'] in self.layers_:
raise ValueError(
"Two layers with name {}.".format(layer_kw['name']))
# Any layers that aren't subclasses of InputLayer are
# assumed to require an 'incoming' paramter. By default,
# we'll use the previous layer as input:
try:
is_input_layer = issubclass(layer_factory, InputLayer)
except TypeError:
is_input_layer = False
if not is_input_layer:
if 'incoming' in layer_kw:
layer_kw['incoming'] = self.layers_[
layer_kw['incoming']]
elif 'incomings' in layer_kw:
layer_kw['incomings'] = [
self.layers_[name] for name in layer_kw['incomings']]
else:
layer_kw['incoming'] = layer
# Deal with additional string parameters that may
# reference other layers; currently only 'mask_input'.
for param in self.layer_reference_params:
if param in layer_kw:
val = layer_kw[param]
if isinstance(val, basestring):
layer_kw[param] = self.layers_[val]
for attr in ('W', 'b'):
if isinstance(layer_kw.get(attr), str):
name = layer_kw[attr]
layer_kw[attr] = getattr(self.layers_[name], attr, None)
try:
layer_wrapper = layer_kw.pop('layer_wrapper', None)
layer = layer_factory(**layer_kw)
except TypeError as e:
msg = ("Failed to instantiate {} with args {}.\n"
"Maybe parameter names have changed?".format(
layer_factory, layer_kw))
chain_exception(TypeError(msg), e)
self.layers_[layer_kw['name']] = layer
if layer_wrapper is not None:
layer = layer_wrapper(layer)
self.layers_["LW_%s" % layer_kw['name']] = layer
self._output_layers = [layer]
return [layer]
def _create_iter_funcs(self, layers, objective, update, output_type):
y_batch = output_type('y_batch')
objective_kw = self._get_params_for('objective')
loss_train = objective(
layers, target=y_batch, **objective_kw)
loss_eval = objective(
layers, target=y_batch, deterministic=True, **objective_kw)
output_layer = self._output_layers
predict_proba = get_output(output_layer, None, deterministic=True)
if not self.regression:
predict = predict_proba[0].argmax(axis=1)
accuracy = T.mean(T.eq(predict, y_batch))
else:
accuracy = loss_eval
scores_train = [
s[1](predict_proba, y_batch) for s in self.scores_train]
scores_valid = [
s[1](predict_proba, y_batch) for s in self.scores_valid]
all_params = self.get_all_params(trainable=True)
grads = theano.grad(loss_train, all_params)
for idx, param in enumerate(all_params):
grad_scale = getattr(param.tag, 'grad_scale', 1)
if grad_scale != 1:
grads[idx] *= grad_scale
update_params = self._get_params_for('update')
updates = update(grads, all_params, **update_params)
input_layers = [layer for layer in layers.values()
if isinstance(layer, InputLayer)]
X_inputs = [theano.In(input_layer.input_var, name=input_layer.name)
for input_layer in input_layers]
inputs = X_inputs + [theano.In(y_batch, name="y")]
train_iter = theano.function(
inputs=inputs,
outputs=[loss_train] + scores_train,
updates=updates,
allow_input_downcast=True,
on_unused_input='ignore',
)
eval_iter = theano.function(
inputs=inputs,
outputs=[loss_eval, accuracy] + scores_valid,
allow_input_downcast=True,
on_unused_input='ignore',
)
predict_iter = theano.function(
inputs=X_inputs,
outputs=predict_proba,
allow_input_downcast=True,
on_unused_input='ignore',
)
return train_iter, eval_iter, predict_iter
def fit(self, X, y, epochs=None):
"""
Runs the training loop for a given number of epochs
:param X: The input data
:param y: The ground truth
:param epochs: The number of epochs to run, if `None` runs for the
network's :attr:`max_epochs`
:return: This instance
"""
if self.check_input:
X, y = self._check_good_input(X, y)
if self.use_label_encoder:
self.enc_ = LabelEncoder()
y = self.enc_.fit_transform(y).astype(np.int32)
self.classes_ = self.enc_.classes_
self.initialize()
try:
self.train_loop(X, y, epochs=epochs)
except KeyboardInterrupt:
pass
return self
def partial_fit(self, X, y, classes=None):
"""
Runs a single epoch using the provided data
:return: This instance
"""
return self.fit(X, y, epochs=1)
def train_loop(self, X, y, epochs=None):
epochs = epochs or self.max_epochs
X_train, X_valid, y_train, y_valid = self.train_split(X, y, self)
on_batch_finished = self.on_batch_finished
if not isinstance(on_batch_finished, (list, tuple)):
on_batch_finished = [on_batch_finished]
on_epoch_finished = self.on_epoch_finished
if not isinstance(on_epoch_finished, (list, tuple)):
on_epoch_finished = [on_epoch_finished]
on_training_started = self.on_training_started
if not isinstance(on_training_started, (list, tuple)):
on_training_started = [on_training_started]
on_training_finished = self.on_training_finished
if not isinstance(on_training_finished, (list, tuple)):
on_training_finished = [on_training_finished]
epoch = 0
best_valid_loss = (
min([row['valid_loss'] for row in self.train_history_]) if
self.train_history_ else np.inf
)
best_train_loss = (
min([row['train_loss'] for row in self.train_history_]) if
self.train_history_ else np.inf
)
for func in on_training_started:
func(self, self.train_history_)
num_epochs_past = len(self.train_history_)
while epoch < epochs:
epoch += 1
train_outputs = []
valid_outputs = []
if self.custom_scores:
custom_scores = [[] for _ in self.custom_scores]
else:
custom_scores = []
t0 = time()
batch_train_sizes = []
for Xb, yb in self.batch_iterator_train(X_train, y_train):
train_outputs.append(
self.apply_batch_func(self.train_iter_, Xb, yb))
batch_train_sizes.append(len(Xb))
for func in on_batch_finished:
func(self, self.train_history_)
batch_valid_sizes = []
for Xb, yb in self.batch_iterator_test(X_valid, y_valid):
valid_outputs.append(
self.apply_batch_func(self.eval_iter_, Xb, yb))
batch_valid_sizes.append(len(Xb))
if self.custom_scores:
y_prob = self.apply_batch_func(self.predict_iter_, Xb)
y_prob = y_prob[0] if len(y_prob) == 1 else y_prob
for custom_scorer, custom_score in zip(
self.custom_scores, custom_scores):
custom_score.append(custom_scorer[1](yb, y_prob))
train_outputs = np.array(train_outputs, dtype=object).T
train_outputs = [
np.average(
[np.mean(row) for row in col],
weights=batch_train_sizes,
)
for col in train_outputs
]
if valid_outputs:
valid_outputs = np.array(valid_outputs, dtype=object).T
valid_outputs = [
np.average(
[np.mean(row) for row in col],
weights=batch_valid_sizes,
)
for col in valid_outputs
]
if custom_scores:
avg_custom_scores = np.average(
custom_scores, weights=batch_valid_sizes, axis=1)
if train_outputs[0] < best_train_loss:
best_train_loss = train_outputs[0]
if valid_outputs and valid_outputs[0] < best_valid_loss:
best_valid_loss = valid_outputs[0]
info = {
'epoch': num_epochs_past + epoch,
'train_loss': train_outputs[0],
'train_loss_best': best_train_loss == train_outputs[0],
'valid_loss': valid_outputs[0]
if valid_outputs else np.nan,
'valid_loss_best': best_valid_loss == valid_outputs[0]
if valid_outputs else np.nan,
'valid_accuracy': valid_outputs[1]
if valid_outputs else np.nan,
'dur': time() - t0,
}
if self.custom_scores:
for index, custom_score in enumerate(self.custom_scores):
info[custom_score[0]] = avg_custom_scores[index]
if self.scores_train:
for index, (name, func) in enumerate(self.scores_train):
info[name] = train_outputs[index + 1]
if self.scores_valid:
for index, (name, func) in enumerate(self.scores_valid):
info[name] = valid_outputs[index + 2]
self.train_history_.append(info)
try:
for func in on_epoch_finished:
func(self, self.train_history_)
except StopIteration:
break
for func in on_training_finished:
func(self, self.train_history_)
@staticmethod
def apply_batch_func(func, Xb, yb=None):
if isinstance(Xb, dict):
kwargs = dict(Xb)
if yb is not None:
kwargs['y'] = yb
return func(**kwargs)
else:
return func(Xb) if yb is None else func(Xb, yb)
def predict_proba(self, X):
probas = []
for Xb, yb in self.batch_iterator_test(X):
probas.append(self.apply_batch_func(self.predict_iter_, Xb))
output = tuple(np.vstack(o) for o in zip(*probas))
return output if len(output) > 1 else output[0]
def predict(self, X):
if self.regression:
return self.predict_proba(X)
else:
y_pred = np.argmax(self.predict_proba(X), axis=1)
if self.use_label_encoder:
y_pred = self.enc_.inverse_transform(y_pred)
return y_pred
def get_output(self, layer, X):
if isinstance(layer, basestring):
layer = self.layers_[layer]
fn_cache = getattr(self, '_get_output_fn_cache', None)
if fn_cache is None:
fn_cache = {}
self._get_output_fn_cache = fn_cache
if layer not in fn_cache:
xs = self.layers_[0].input_var.type()
get_activity = theano.function([xs], get_output(layer, xs))
fn_cache[layer] = get_activity
else:
get_activity = fn_cache[layer]
outputs = []
for Xb, yb in self.batch_iterator_test(X):
outputs.append(get_activity(Xb))
return np.vstack(outputs)
def score(self, X, y):
score = r2_score if self.regression else accuracy_score
return float(score(self.predict(X), y))
def get_all_layers(self):
return self.layers_.values()
def get_all_params(self, **kwargs):
layers = self.get_all_layers()
params = sum([l.get_params(**kwargs) for l in layers], [])
return unique(params)
def get_all_params_values(self):
return_value = OrderedDict()
for name, layer in self.layers_.items():
return_value[name] = [p.get_value() for p in layer.get_params()]
return return_value
def load_params_from(self, source):
self.initialize()
if isinstance(source, basestring):
with open(source, 'rb') as f:
source = pickle.load(f)
if isinstance(source, NeuralNet):
source = source.get_all_params_values()
success = "Loaded parameters to layer '{}' (shape {})."
failure = ("Could not load parameters to layer '{}' because "
"shapes did not match: {} vs {}.")
for key, values in source.items():
layer = self.layers_.get(key)
if layer is not None:
for p1, p2v in zip(layer.get_params(), values):
shape1 = p1.get_value().shape
shape2 = p2v.shape
shape1s = 'x'.join(map(str, shape1))
shape2s = 'x'.join(map(str, shape2))
if shape1 == shape2:
p1.set_value(p2v)
if self.verbose:
print(success.format(
key, shape1s, shape2s))
else:
if self.verbose:
print(failure.format(
key, shape1s, shape2s))
def save_params_to(self, fname):
params = self.get_all_params_values()
with open(fname, 'wb') as f:
pickle.dump(params, f, -1)
def load_weights_from(self, source):
warn("The 'load_weights_from' method will be removed in nolearn 0.6. "
"Please use 'load_params_from' instead.")
if isinstance(source, list):
raise ValueError(
"Loading weights from a list of parameter values is no "
"longer supported. Please send me something like the "
"return value of 'net.get_all_params_values()' instead.")
return self.load_params_from(source)
def save_weights_to(self, fname):
warn("The 'save_weights_to' method will be removed in nolearn 0.6. "
"Please use 'save_params_to' instead.")
return self.save_params_to(fname)
def __setstate__(self, state): # BBB for pickles that don't have the graph
self.__dict__.update(state)
self.initialize()
def get_params(self, deep=True):
params = super(NeuralNet, self).get_params(deep=deep)
# Incidentally, Lasagne layers have a 'get_params' too, which
# for sklearn's 'clone' means it would treat it in a special
# way when cloning. Wrapping the list of layers in a custom
# list type does the trick here, but of course it's crazy:
params['layers'] = _list(params['layers'])
return _dict(params)
def _get_param_names(self):
# This allows us to have **kwargs in __init__ (woot!):
param_names = super(NeuralNet, self)._get_param_names()
return param_names + self._kwarg_keys
|
dnouri/nolearn
|
nolearn/lasagne/base.py
|
NeuralNet.initialize
|
python
|
def initialize(self):
if getattr(self, '_initialized', False):
return
out = getattr(self, '_output_layers', None)
if out is None:
self.initialize_layers()
self._check_for_unused_kwargs()
iter_funcs = self._create_iter_funcs(
self.layers_, self.objective, self.update,
self.y_tensor_type,
)
self.train_iter_, self.eval_iter_, self.predict_iter_ = iter_funcs
self._initialized = True
|
Initializes the network. Checks that no extra kwargs were
passed to the constructor, and compiles the train, predict,
and evaluation functions.
Subsequent calls to this function will return without any action.
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/base.py#L473-L493
| null |
class NeuralNet(BaseEstimator):
"""A configurable Neural Network estimator based on Lasagne.
Compatible with scikit-learn estimators.
Attributes
----------
train_history_:
A list of network training info for each epoch.
Each index contains a dictionary with the following keys
* epoch - The epoch number
* train_loss_best - True if this epoch had the best training loss so far
* valid_loss_best - True if this epoch had the best validation loss so far
* train_loss - The training loss for this epoch
* valid_loss - The validation loss for this epoch
* valid_accuracy - The validation accuracy for this epoch
layers_: A dictionary of lasagne layers keyed by the layer's name, or the layer's index
layer_reference_params:
A list of Lasagne layer parameter names that may reference
other layers, excluding 'incoming' and 'incomings'.
"""
layer_reference_params = ['mask_input']
def __init__(
self,
layers,
update=nesterov_momentum,
loss=None, # BBB
objective=objective,
objective_loss_function=None,
batch_iterator_train=BatchIterator(batch_size=128),
batch_iterator_test=BatchIterator(batch_size=128),
regression=False,
max_epochs=100,
train_split=TrainSplit(eval_size=0.2),
custom_scores=None,
scores_train=None,
scores_valid=None,
X_tensor_type=None,
y_tensor_type=None,
use_label_encoder=False,
on_batch_finished=None,
on_epoch_finished=None,
on_training_started=None,
on_training_finished=None,
more_params=None,
check_input=True,
verbose=0,
**kwargs
):
"""
Initialize a Neural Network
Parameters
----------
layers:
A list of lasagne layers to compose into the final neural net.
See :ref:`layer-def`
update:
The update function to use when training. Uses the form
provided by the :mod:`lasagne.updates` implementations.
objective:
The objective function to use when training. The callable
will be passed the NeuralNetwork's :attr:`.layers_`
attribute as the first argument, and the output target as
the second argument.
max_epochs:
The number of epochs to train. This is used as the
default when calling the :meth:`.fit` method without an
epochs argument.
Other Parameters
----------------
batch_iterator_train:
The sample iterator to use while training the network.
batch_iterator_test:
The sample Iterator to use while testing and validating
the network.
regression:
Whether or not this is a regressor network. Determines
the default objective and scoring functions.
train_split:
The method used to separate training and validation
samples. See :class:`TrainSplit` for the default
implementation.
y_tensor_type:
The type of tensor to use to hold the network's output.
Typically ``T.ivector`` (the default) for classification
tasks.
on_training_started, on_batch_finished, on_epoch_finished,
on_training_finished:
A list of functions which are called during training at
the corresponding times.
The functions will be passed the NeuralNet as the first
parameter and its :attr:`.train_history_` attribute as the
second parameter.
custom_scores:
A list of callable custom scoring functions.
The functions will be passed the expected y values as the
first argument, and the predicted y_values as the second
argument.
use_label_encoder:
If true, all y_values will be encoded using a
:class:`sklearn.preprocessing.LabelEncoder` instance.
verbose:
The verbosity level of the network.
Any non-zero value will cause the network to print the
layer info at the start of training, as well as print a
log of the training history after each epoch. Larger
values will increase the amount of info shown.
more_params:
A set of more parameters to use when initializing layers
defined using the dictionary method.
Note
----
* Extra arguments can be passed to the call to the *update*
function by prepending the string ``update_`` to the
corresponding argument name,
e.g. ``update_learning_rate=0.01`` will define the
``learning_rate`` parameter of the update function.
* Extra arguments can be provided to the objective call
through the Neural Network by prepending the string
``objective_`` to the corresponding argument name.
"""
if loss is not None:
raise ValueError(
"The 'loss' parameter was removed, please use "
"'objective_loss_function' instead.") # BBB
if hasattr(objective, 'get_loss'):
raise ValueError(
"The 'Objective' class is no longer supported, please "
"use 'nolearn.lasagne.objective' or similar.") # BBB
if objective_loss_function is None:
objective_loss_function = (
squared_error if regression else categorical_crossentropy)
if hasattr(self, 'train_test_split'): # BBB
warn("The 'train_test_split' method has been deprecated, please "
"use the 'train_split' parameter instead.")
train_split = LegacyTrainTestSplit(
eval_size=kwargs.pop('eval_size', 0.2))
if 'eval_size' in kwargs: # BBB
warn("The 'eval_size' argument has been deprecated, please use "
"the 'train_split' parameter instead, e.g.\n"
"train_split=TrainSplit(eval_size=0.4)")
train_split.eval_size = kwargs.pop('eval_size')
if y_tensor_type is None:
if regression:
y_tensor_type = T.TensorType(
theano.config.floatX, (False, False))
else:
y_tensor_type = T.ivector
if X_tensor_type is not None:
raise ValueError(
"The 'X_tensor_type' parameter has been removed. "
"It's unnecessary.") # BBB
if 'custom_score' in kwargs:
warn("The 'custom_score' argument has been deprecated, please use "
"the 'custom_scores' parameter instead, which is just "
"a list of custom scores e.g.\n"
"custom_scores=[('first output', lambda y1, y2: abs(y1[0,0]-y2[0,0])), ('second output', lambda y1,y2: abs(y1[0,1]-y2[0,1]))]")
# add it to custom_scores
if custom_scores is None:
custom_scores = [kwargs.pop('custom_score')]
else:
custom_scores.append(kwargs.pop('custom_score'))
if isinstance(layers, Layer):
layers = _list([layers])
elif isinstance(layers, Iterable):
layers = _list(layers)
self.layers = layers
self.update = update
self.objective = objective
self.objective_loss_function = objective_loss_function
self.batch_iterator_train = batch_iterator_train
self.batch_iterator_test = batch_iterator_test
self.regression = regression
self.max_epochs = max_epochs
self.train_split = train_split
self.custom_scores = custom_scores
self.scores_train = scores_train or []
self.scores_valid = scores_valid or []
self.y_tensor_type = y_tensor_type
self.use_label_encoder = use_label_encoder
self.on_batch_finished = on_batch_finished or []
self.on_epoch_finished = on_epoch_finished or []
self.on_training_started = on_training_started or []
self.on_training_finished = on_training_finished or []
self.more_params = more_params or {}
self.check_input = check_input
self.verbose = verbose
if self.verbose:
# XXX: PrintLog should come before any other handlers,
# because early stopping will otherwise cause the last
# line not to be printed
self.on_epoch_finished.append(PrintLog())
self.on_training_started.append(PrintLayerInfo())
for key in kwargs.keys():
assert not hasattr(self, key)
vars(self).update(kwargs)
self._kwarg_keys = list(kwargs.keys())
self.train_history_ = []
if 'batch_iterator' in kwargs: # BBB
raise ValueError(
"The 'batch_iterator' argument has been replaced. "
"Use 'batch_iterator_train' and 'batch_iterator_test' instead."
)
def _check_for_unused_kwargs(self):
names = self.layers_.keys() + ['update', 'objective']
for k in self._kwarg_keys:
for n in names:
prefix = '{}_'.format(n)
if k.startswith(prefix):
break
else:
raise ValueError("Unused kwarg: {}".format(k))
def _check_good_input(self, X, y=None):
if isinstance(X, dict):
lengths = [len(X1) for X1 in X.values()]
if len(set(lengths)) > 1:
raise ValueError("Not all values of X are of equal length.")
x_len = lengths[0]
else:
x_len = len(X)
if y is not None:
if len(y) != x_len:
raise ValueError("X and y are not of equal length.")
if self.regression and y is not None and y.ndim == 1:
y = y.reshape(-1, 1)
return X, y
def _get_params_for(self, name):
collected = {}
prefix = '{}_'.format(name)
params = vars(self)
more_params = self.more_params
for key, value in itertools.chain(params.items(), more_params.items()):
if key.startswith(prefix):
collected[key[len(prefix):]] = value
return collected
def _layer_name(self, layer_class, index):
return "{}{}".format(
layer_class.__name__.lower().replace("layer", ""), index)
def initialize_layers(self, layers=None):
"""Sets up the Lasagne layers
:param layers: The dictionary of layers, or a
:class:`lasagne.Layers` instance, describing the underlying
network
:return: the output layer of the underlying lasagne network.
:seealso: :ref:`layer-def`
"""
if layers is not None:
self.layers = layers
self.layers_ = Layers()
#If a Layer, or a list of Layers was passed in
if isinstance(self.layers[0], Layer):
for out_layer in self.layers:
for i, layer in enumerate(get_all_layers(out_layer)):
if layer not in self.layers_.values():
name = layer.name or self._layer_name(layer.__class__, i)
self.layers_[name] = layer
if self._get_params_for(name) != {}:
raise ValueError(
"You can't use keyword params when passing a Lasagne "
"instance object as the 'layers' parameter of "
"'NeuralNet'."
)
self._output_layers = self.layers
return self.layers
# 'self.layers' are a list of '(Layer class, kwargs)', so
# we'll have to actually instantiate the layers given the
# arguments:
layer = None
for i, layer_def in enumerate(self.layers):
if isinstance(layer_def[1], dict):
# Newer format: (Layer, {'layer': 'kwargs'})
layer_factory, layer_kw = layer_def
layer_kw = layer_kw.copy()
else:
# The legacy format: ('name', Layer)
layer_name, layer_factory = layer_def
layer_kw = {'name': layer_name}
if isinstance(layer_factory, str):
layer_factory = locate(layer_factory)
assert layer_factory is not None
if 'name' not in layer_kw:
layer_kw['name'] = self._layer_name(layer_factory, i)
more_params = self._get_params_for(layer_kw['name'])
layer_kw.update(more_params)
if layer_kw['name'] in self.layers_:
raise ValueError(
"Two layers with name {}.".format(layer_kw['name']))
# Any layers that aren't subclasses of InputLayer are
# assumed to require an 'incoming' paramter. By default,
# we'll use the previous layer as input:
try:
is_input_layer = issubclass(layer_factory, InputLayer)
except TypeError:
is_input_layer = False
if not is_input_layer:
if 'incoming' in layer_kw:
layer_kw['incoming'] = self.layers_[
layer_kw['incoming']]
elif 'incomings' in layer_kw:
layer_kw['incomings'] = [
self.layers_[name] for name in layer_kw['incomings']]
else:
layer_kw['incoming'] = layer
# Deal with additional string parameters that may
# reference other layers; currently only 'mask_input'.
for param in self.layer_reference_params:
if param in layer_kw:
val = layer_kw[param]
if isinstance(val, basestring):
layer_kw[param] = self.layers_[val]
for attr in ('W', 'b'):
if isinstance(layer_kw.get(attr), str):
name = layer_kw[attr]
layer_kw[attr] = getattr(self.layers_[name], attr, None)
try:
layer_wrapper = layer_kw.pop('layer_wrapper', None)
layer = layer_factory(**layer_kw)
except TypeError as e:
msg = ("Failed to instantiate {} with args {}.\n"
"Maybe parameter names have changed?".format(
layer_factory, layer_kw))
chain_exception(TypeError(msg), e)
self.layers_[layer_kw['name']] = layer
if layer_wrapper is not None:
layer = layer_wrapper(layer)
self.layers_["LW_%s" % layer_kw['name']] = layer
self._output_layers = [layer]
return [layer]
def _create_iter_funcs(self, layers, objective, update, output_type):
y_batch = output_type('y_batch')
objective_kw = self._get_params_for('objective')
loss_train = objective(
layers, target=y_batch, **objective_kw)
loss_eval = objective(
layers, target=y_batch, deterministic=True, **objective_kw)
output_layer = self._output_layers
predict_proba = get_output(output_layer, None, deterministic=True)
if not self.regression:
predict = predict_proba[0].argmax(axis=1)
accuracy = T.mean(T.eq(predict, y_batch))
else:
accuracy = loss_eval
scores_train = [
s[1](predict_proba, y_batch) for s in self.scores_train]
scores_valid = [
s[1](predict_proba, y_batch) for s in self.scores_valid]
all_params = self.get_all_params(trainable=True)
grads = theano.grad(loss_train, all_params)
for idx, param in enumerate(all_params):
grad_scale = getattr(param.tag, 'grad_scale', 1)
if grad_scale != 1:
grads[idx] *= grad_scale
update_params = self._get_params_for('update')
updates = update(grads, all_params, **update_params)
input_layers = [layer for layer in layers.values()
if isinstance(layer, InputLayer)]
X_inputs = [theano.In(input_layer.input_var, name=input_layer.name)
for input_layer in input_layers]
inputs = X_inputs + [theano.In(y_batch, name="y")]
train_iter = theano.function(
inputs=inputs,
outputs=[loss_train] + scores_train,
updates=updates,
allow_input_downcast=True,
on_unused_input='ignore',
)
eval_iter = theano.function(
inputs=inputs,
outputs=[loss_eval, accuracy] + scores_valid,
allow_input_downcast=True,
on_unused_input='ignore',
)
predict_iter = theano.function(
inputs=X_inputs,
outputs=predict_proba,
allow_input_downcast=True,
on_unused_input='ignore',
)
return train_iter, eval_iter, predict_iter
def fit(self, X, y, epochs=None):
"""
Runs the training loop for a given number of epochs
:param X: The input data
:param y: The ground truth
:param epochs: The number of epochs to run, if `None` runs for the
network's :attr:`max_epochs`
:return: This instance
"""
if self.check_input:
X, y = self._check_good_input(X, y)
if self.use_label_encoder:
self.enc_ = LabelEncoder()
y = self.enc_.fit_transform(y).astype(np.int32)
self.classes_ = self.enc_.classes_
self.initialize()
try:
self.train_loop(X, y, epochs=epochs)
except KeyboardInterrupt:
pass
return self
def partial_fit(self, X, y, classes=None):
"""
Runs a single epoch using the provided data
:return: This instance
"""
return self.fit(X, y, epochs=1)
def train_loop(self, X, y, epochs=None):
epochs = epochs or self.max_epochs
X_train, X_valid, y_train, y_valid = self.train_split(X, y, self)
on_batch_finished = self.on_batch_finished
if not isinstance(on_batch_finished, (list, tuple)):
on_batch_finished = [on_batch_finished]
on_epoch_finished = self.on_epoch_finished
if not isinstance(on_epoch_finished, (list, tuple)):
on_epoch_finished = [on_epoch_finished]
on_training_started = self.on_training_started
if not isinstance(on_training_started, (list, tuple)):
on_training_started = [on_training_started]
on_training_finished = self.on_training_finished
if not isinstance(on_training_finished, (list, tuple)):
on_training_finished = [on_training_finished]
epoch = 0
best_valid_loss = (
min([row['valid_loss'] for row in self.train_history_]) if
self.train_history_ else np.inf
)
best_train_loss = (
min([row['train_loss'] for row in self.train_history_]) if
self.train_history_ else np.inf
)
for func in on_training_started:
func(self, self.train_history_)
num_epochs_past = len(self.train_history_)
while epoch < epochs:
epoch += 1
train_outputs = []
valid_outputs = []
if self.custom_scores:
custom_scores = [[] for _ in self.custom_scores]
else:
custom_scores = []
t0 = time()
batch_train_sizes = []
for Xb, yb in self.batch_iterator_train(X_train, y_train):
train_outputs.append(
self.apply_batch_func(self.train_iter_, Xb, yb))
batch_train_sizes.append(len(Xb))
for func in on_batch_finished:
func(self, self.train_history_)
batch_valid_sizes = []
for Xb, yb in self.batch_iterator_test(X_valid, y_valid):
valid_outputs.append(
self.apply_batch_func(self.eval_iter_, Xb, yb))
batch_valid_sizes.append(len(Xb))
if self.custom_scores:
y_prob = self.apply_batch_func(self.predict_iter_, Xb)
y_prob = y_prob[0] if len(y_prob) == 1 else y_prob
for custom_scorer, custom_score in zip(
self.custom_scores, custom_scores):
custom_score.append(custom_scorer[1](yb, y_prob))
train_outputs = np.array(train_outputs, dtype=object).T
train_outputs = [
np.average(
[np.mean(row) for row in col],
weights=batch_train_sizes,
)
for col in train_outputs
]
if valid_outputs:
valid_outputs = np.array(valid_outputs, dtype=object).T
valid_outputs = [
np.average(
[np.mean(row) for row in col],
weights=batch_valid_sizes,
)
for col in valid_outputs
]
if custom_scores:
avg_custom_scores = np.average(
custom_scores, weights=batch_valid_sizes, axis=1)
if train_outputs[0] < best_train_loss:
best_train_loss = train_outputs[0]
if valid_outputs and valid_outputs[0] < best_valid_loss:
best_valid_loss = valid_outputs[0]
info = {
'epoch': num_epochs_past + epoch,
'train_loss': train_outputs[0],
'train_loss_best': best_train_loss == train_outputs[0],
'valid_loss': valid_outputs[0]
if valid_outputs else np.nan,
'valid_loss_best': best_valid_loss == valid_outputs[0]
if valid_outputs else np.nan,
'valid_accuracy': valid_outputs[1]
if valid_outputs else np.nan,
'dur': time() - t0,
}
if self.custom_scores:
for index, custom_score in enumerate(self.custom_scores):
info[custom_score[0]] = avg_custom_scores[index]
if self.scores_train:
for index, (name, func) in enumerate(self.scores_train):
info[name] = train_outputs[index + 1]
if self.scores_valid:
for index, (name, func) in enumerate(self.scores_valid):
info[name] = valid_outputs[index + 2]
self.train_history_.append(info)
try:
for func in on_epoch_finished:
func(self, self.train_history_)
except StopIteration:
break
for func in on_training_finished:
func(self, self.train_history_)
@staticmethod
def apply_batch_func(func, Xb, yb=None):
if isinstance(Xb, dict):
kwargs = dict(Xb)
if yb is not None:
kwargs['y'] = yb
return func(**kwargs)
else:
return func(Xb) if yb is None else func(Xb, yb)
def predict_proba(self, X):
probas = []
for Xb, yb in self.batch_iterator_test(X):
probas.append(self.apply_batch_func(self.predict_iter_, Xb))
output = tuple(np.vstack(o) for o in zip(*probas))
return output if len(output) > 1 else output[0]
def predict(self, X):
if self.regression:
return self.predict_proba(X)
else:
y_pred = np.argmax(self.predict_proba(X), axis=1)
if self.use_label_encoder:
y_pred = self.enc_.inverse_transform(y_pred)
return y_pred
def get_output(self, layer, X):
if isinstance(layer, basestring):
layer = self.layers_[layer]
fn_cache = getattr(self, '_get_output_fn_cache', None)
if fn_cache is None:
fn_cache = {}
self._get_output_fn_cache = fn_cache
if layer not in fn_cache:
xs = self.layers_[0].input_var.type()
get_activity = theano.function([xs], get_output(layer, xs))
fn_cache[layer] = get_activity
else:
get_activity = fn_cache[layer]
outputs = []
for Xb, yb in self.batch_iterator_test(X):
outputs.append(get_activity(Xb))
return np.vstack(outputs)
def score(self, X, y):
score = r2_score if self.regression else accuracy_score
return float(score(self.predict(X), y))
def get_all_layers(self):
return self.layers_.values()
def get_all_params(self, **kwargs):
layers = self.get_all_layers()
params = sum([l.get_params(**kwargs) for l in layers], [])
return unique(params)
def get_all_params_values(self):
return_value = OrderedDict()
for name, layer in self.layers_.items():
return_value[name] = [p.get_value() for p in layer.get_params()]
return return_value
def load_params_from(self, source):
self.initialize()
if isinstance(source, basestring):
with open(source, 'rb') as f:
source = pickle.load(f)
if isinstance(source, NeuralNet):
source = source.get_all_params_values()
success = "Loaded parameters to layer '{}' (shape {})."
failure = ("Could not load parameters to layer '{}' because "
"shapes did not match: {} vs {}.")
for key, values in source.items():
layer = self.layers_.get(key)
if layer is not None:
for p1, p2v in zip(layer.get_params(), values):
shape1 = p1.get_value().shape
shape2 = p2v.shape
shape1s = 'x'.join(map(str, shape1))
shape2s = 'x'.join(map(str, shape2))
if shape1 == shape2:
p1.set_value(p2v)
if self.verbose:
print(success.format(
key, shape1s, shape2s))
else:
if self.verbose:
print(failure.format(
key, shape1s, shape2s))
def save_params_to(self, fname):
params = self.get_all_params_values()
with open(fname, 'wb') as f:
pickle.dump(params, f, -1)
def load_weights_from(self, source):
warn("The 'load_weights_from' method will be removed in nolearn 0.6. "
"Please use 'load_params_from' instead.")
if isinstance(source, list):
raise ValueError(
"Loading weights from a list of parameter values is no "
"longer supported. Please send me something like the "
"return value of 'net.get_all_params_values()' instead.")
return self.load_params_from(source)
def save_weights_to(self, fname):
warn("The 'save_weights_to' method will be removed in nolearn 0.6. "
"Please use 'save_params_to' instead.")
return self.save_params_to(fname)
def __setstate__(self, state): # BBB for pickles that don't have the graph
self.__dict__.update(state)
self.initialize()
def get_params(self, deep=True):
params = super(NeuralNet, self).get_params(deep=deep)
# Incidentally, Lasagne layers have a 'get_params' too, which
# for sklearn's 'clone' means it would treat it in a special
# way when cloning. Wrapping the list of layers in a custom
# list type does the trick here, but of course it's crazy:
params['layers'] = _list(params['layers'])
return _dict(params)
def _get_param_names(self):
# This allows us to have **kwargs in __init__ (woot!):
param_names = super(NeuralNet, self)._get_param_names()
return param_names + self._kwarg_keys
|
dnouri/nolearn
|
nolearn/lasagne/base.py
|
NeuralNet.initialize_layers
|
python
|
def initialize_layers(self, layers=None):
if layers is not None:
self.layers = layers
self.layers_ = Layers()
#If a Layer, or a list of Layers was passed in
if isinstance(self.layers[0], Layer):
for out_layer in self.layers:
for i, layer in enumerate(get_all_layers(out_layer)):
if layer not in self.layers_.values():
name = layer.name or self._layer_name(layer.__class__, i)
self.layers_[name] = layer
if self._get_params_for(name) != {}:
raise ValueError(
"You can't use keyword params when passing a Lasagne "
"instance object as the 'layers' parameter of "
"'NeuralNet'."
)
self._output_layers = self.layers
return self.layers
# 'self.layers' are a list of '(Layer class, kwargs)', so
# we'll have to actually instantiate the layers given the
# arguments:
layer = None
for i, layer_def in enumerate(self.layers):
if isinstance(layer_def[1], dict):
# Newer format: (Layer, {'layer': 'kwargs'})
layer_factory, layer_kw = layer_def
layer_kw = layer_kw.copy()
else:
# The legacy format: ('name', Layer)
layer_name, layer_factory = layer_def
layer_kw = {'name': layer_name}
if isinstance(layer_factory, str):
layer_factory = locate(layer_factory)
assert layer_factory is not None
if 'name' not in layer_kw:
layer_kw['name'] = self._layer_name(layer_factory, i)
more_params = self._get_params_for(layer_kw['name'])
layer_kw.update(more_params)
if layer_kw['name'] in self.layers_:
raise ValueError(
"Two layers with name {}.".format(layer_kw['name']))
# Any layers that aren't subclasses of InputLayer are
# assumed to require an 'incoming' paramter. By default,
# we'll use the previous layer as input:
try:
is_input_layer = issubclass(layer_factory, InputLayer)
except TypeError:
is_input_layer = False
if not is_input_layer:
if 'incoming' in layer_kw:
layer_kw['incoming'] = self.layers_[
layer_kw['incoming']]
elif 'incomings' in layer_kw:
layer_kw['incomings'] = [
self.layers_[name] for name in layer_kw['incomings']]
else:
layer_kw['incoming'] = layer
# Deal with additional string parameters that may
# reference other layers; currently only 'mask_input'.
for param in self.layer_reference_params:
if param in layer_kw:
val = layer_kw[param]
if isinstance(val, basestring):
layer_kw[param] = self.layers_[val]
for attr in ('W', 'b'):
if isinstance(layer_kw.get(attr), str):
name = layer_kw[attr]
layer_kw[attr] = getattr(self.layers_[name], attr, None)
try:
layer_wrapper = layer_kw.pop('layer_wrapper', None)
layer = layer_factory(**layer_kw)
except TypeError as e:
msg = ("Failed to instantiate {} with args {}.\n"
"Maybe parameter names have changed?".format(
layer_factory, layer_kw))
chain_exception(TypeError(msg), e)
self.layers_[layer_kw['name']] = layer
if layer_wrapper is not None:
layer = layer_wrapper(layer)
self.layers_["LW_%s" % layer_kw['name']] = layer
self._output_layers = [layer]
return [layer]
|
Sets up the Lasagne layers
:param layers: The dictionary of layers, or a
:class:`lasagne.Layers` instance, describing the underlying
network
:return: the output layer of the underlying lasagne network.
:seealso: :ref:`layer-def`
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/base.py#L512-L616
|
[
"def chain_exception(exc1, exc2):\n exec(\"raise exc1 from exc2\")\n",
"def values(self):\n return list(super(Layers, self).values())\n"
] |
class NeuralNet(BaseEstimator):
"""A configurable Neural Network estimator based on Lasagne.
Compatible with scikit-learn estimators.
Attributes
----------
train_history_:
A list of network training info for each epoch.
Each index contains a dictionary with the following keys
* epoch - The epoch number
* train_loss_best - True if this epoch had the best training loss so far
* valid_loss_best - True if this epoch had the best validation loss so far
* train_loss - The training loss for this epoch
* valid_loss - The validation loss for this epoch
* valid_accuracy - The validation accuracy for this epoch
layers_: A dictionary of lasagne layers keyed by the layer's name, or the layer's index
layer_reference_params:
A list of Lasagne layer parameter names that may reference
other layers, excluding 'incoming' and 'incomings'.
"""
layer_reference_params = ['mask_input']
def __init__(
self,
layers,
update=nesterov_momentum,
loss=None, # BBB
objective=objective,
objective_loss_function=None,
batch_iterator_train=BatchIterator(batch_size=128),
batch_iterator_test=BatchIterator(batch_size=128),
regression=False,
max_epochs=100,
train_split=TrainSplit(eval_size=0.2),
custom_scores=None,
scores_train=None,
scores_valid=None,
X_tensor_type=None,
y_tensor_type=None,
use_label_encoder=False,
on_batch_finished=None,
on_epoch_finished=None,
on_training_started=None,
on_training_finished=None,
more_params=None,
check_input=True,
verbose=0,
**kwargs
):
"""
Initialize a Neural Network
Parameters
----------
layers:
A list of lasagne layers to compose into the final neural net.
See :ref:`layer-def`
update:
The update function to use when training. Uses the form
provided by the :mod:`lasagne.updates` implementations.
objective:
The objective function to use when training. The callable
will be passed the NeuralNetwork's :attr:`.layers_`
attribute as the first argument, and the output target as
the second argument.
max_epochs:
The number of epochs to train. This is used as the
default when calling the :meth:`.fit` method without an
epochs argument.
Other Parameters
----------------
batch_iterator_train:
The sample iterator to use while training the network.
batch_iterator_test:
The sample Iterator to use while testing and validating
the network.
regression:
Whether or not this is a regressor network. Determines
the default objective and scoring functions.
train_split:
The method used to separate training and validation
samples. See :class:`TrainSplit` for the default
implementation.
y_tensor_type:
The type of tensor to use to hold the network's output.
Typically ``T.ivector`` (the default) for classification
tasks.
on_training_started, on_batch_finished, on_epoch_finished,
on_training_finished:
A list of functions which are called during training at
the corresponding times.
The functions will be passed the NeuralNet as the first
parameter and its :attr:`.train_history_` attribute as the
second parameter.
custom_scores:
A list of callable custom scoring functions.
The functions will be passed the expected y values as the
first argument, and the predicted y_values as the second
argument.
use_label_encoder:
If true, all y_values will be encoded using a
:class:`sklearn.preprocessing.LabelEncoder` instance.
verbose:
The verbosity level of the network.
Any non-zero value will cause the network to print the
layer info at the start of training, as well as print a
log of the training history after each epoch. Larger
values will increase the amount of info shown.
more_params:
A set of more parameters to use when initializing layers
defined using the dictionary method.
Note
----
* Extra arguments can be passed to the call to the *update*
function by prepending the string ``update_`` to the
corresponding argument name,
e.g. ``update_learning_rate=0.01`` will define the
``learning_rate`` parameter of the update function.
* Extra arguments can be provided to the objective call
through the Neural Network by prepending the string
``objective_`` to the corresponding argument name.
"""
if loss is not None:
raise ValueError(
"The 'loss' parameter was removed, please use "
"'objective_loss_function' instead.") # BBB
if hasattr(objective, 'get_loss'):
raise ValueError(
"The 'Objective' class is no longer supported, please "
"use 'nolearn.lasagne.objective' or similar.") # BBB
if objective_loss_function is None:
objective_loss_function = (
squared_error if regression else categorical_crossentropy)
if hasattr(self, 'train_test_split'): # BBB
warn("The 'train_test_split' method has been deprecated, please "
"use the 'train_split' parameter instead.")
train_split = LegacyTrainTestSplit(
eval_size=kwargs.pop('eval_size', 0.2))
if 'eval_size' in kwargs: # BBB
warn("The 'eval_size' argument has been deprecated, please use "
"the 'train_split' parameter instead, e.g.\n"
"train_split=TrainSplit(eval_size=0.4)")
train_split.eval_size = kwargs.pop('eval_size')
if y_tensor_type is None:
if regression:
y_tensor_type = T.TensorType(
theano.config.floatX, (False, False))
else:
y_tensor_type = T.ivector
if X_tensor_type is not None:
raise ValueError(
"The 'X_tensor_type' parameter has been removed. "
"It's unnecessary.") # BBB
if 'custom_score' in kwargs:
warn("The 'custom_score' argument has been deprecated, please use "
"the 'custom_scores' parameter instead, which is just "
"a list of custom scores e.g.\n"
"custom_scores=[('first output', lambda y1, y2: abs(y1[0,0]-y2[0,0])), ('second output', lambda y1,y2: abs(y1[0,1]-y2[0,1]))]")
# add it to custom_scores
if custom_scores is None:
custom_scores = [kwargs.pop('custom_score')]
else:
custom_scores.append(kwargs.pop('custom_score'))
if isinstance(layers, Layer):
layers = _list([layers])
elif isinstance(layers, Iterable):
layers = _list(layers)
self.layers = layers
self.update = update
self.objective = objective
self.objective_loss_function = objective_loss_function
self.batch_iterator_train = batch_iterator_train
self.batch_iterator_test = batch_iterator_test
self.regression = regression
self.max_epochs = max_epochs
self.train_split = train_split
self.custom_scores = custom_scores
self.scores_train = scores_train or []
self.scores_valid = scores_valid or []
self.y_tensor_type = y_tensor_type
self.use_label_encoder = use_label_encoder
self.on_batch_finished = on_batch_finished or []
self.on_epoch_finished = on_epoch_finished or []
self.on_training_started = on_training_started or []
self.on_training_finished = on_training_finished or []
self.more_params = more_params or {}
self.check_input = check_input
self.verbose = verbose
if self.verbose:
# XXX: PrintLog should come before any other handlers,
# because early stopping will otherwise cause the last
# line not to be printed
self.on_epoch_finished.append(PrintLog())
self.on_training_started.append(PrintLayerInfo())
for key in kwargs.keys():
assert not hasattr(self, key)
vars(self).update(kwargs)
self._kwarg_keys = list(kwargs.keys())
self.train_history_ = []
if 'batch_iterator' in kwargs: # BBB
raise ValueError(
"The 'batch_iterator' argument has been replaced. "
"Use 'batch_iterator_train' and 'batch_iterator_test' instead."
)
def _check_for_unused_kwargs(self):
names = self.layers_.keys() + ['update', 'objective']
for k in self._kwarg_keys:
for n in names:
prefix = '{}_'.format(n)
if k.startswith(prefix):
break
else:
raise ValueError("Unused kwarg: {}".format(k))
def _check_good_input(self, X, y=None):
if isinstance(X, dict):
lengths = [len(X1) for X1 in X.values()]
if len(set(lengths)) > 1:
raise ValueError("Not all values of X are of equal length.")
x_len = lengths[0]
else:
x_len = len(X)
if y is not None:
if len(y) != x_len:
raise ValueError("X and y are not of equal length.")
if self.regression and y is not None and y.ndim == 1:
y = y.reshape(-1, 1)
return X, y
def initialize(self):
"""Initializes the network. Checks that no extra kwargs were
passed to the constructor, and compiles the train, predict,
and evaluation functions.
Subsequent calls to this function will return without any action.
"""
if getattr(self, '_initialized', False):
return
out = getattr(self, '_output_layers', None)
if out is None:
self.initialize_layers()
self._check_for_unused_kwargs()
iter_funcs = self._create_iter_funcs(
self.layers_, self.objective, self.update,
self.y_tensor_type,
)
self.train_iter_, self.eval_iter_, self.predict_iter_ = iter_funcs
self._initialized = True
def _get_params_for(self, name):
collected = {}
prefix = '{}_'.format(name)
params = vars(self)
more_params = self.more_params
for key, value in itertools.chain(params.items(), more_params.items()):
if key.startswith(prefix):
collected[key[len(prefix):]] = value
return collected
def _layer_name(self, layer_class, index):
return "{}{}".format(
layer_class.__name__.lower().replace("layer", ""), index)
def _create_iter_funcs(self, layers, objective, update, output_type):
y_batch = output_type('y_batch')
objective_kw = self._get_params_for('objective')
loss_train = objective(
layers, target=y_batch, **objective_kw)
loss_eval = objective(
layers, target=y_batch, deterministic=True, **objective_kw)
output_layer = self._output_layers
predict_proba = get_output(output_layer, None, deterministic=True)
if not self.regression:
predict = predict_proba[0].argmax(axis=1)
accuracy = T.mean(T.eq(predict, y_batch))
else:
accuracy = loss_eval
scores_train = [
s[1](predict_proba, y_batch) for s in self.scores_train]
scores_valid = [
s[1](predict_proba, y_batch) for s in self.scores_valid]
all_params = self.get_all_params(trainable=True)
grads = theano.grad(loss_train, all_params)
for idx, param in enumerate(all_params):
grad_scale = getattr(param.tag, 'grad_scale', 1)
if grad_scale != 1:
grads[idx] *= grad_scale
update_params = self._get_params_for('update')
updates = update(grads, all_params, **update_params)
input_layers = [layer for layer in layers.values()
if isinstance(layer, InputLayer)]
X_inputs = [theano.In(input_layer.input_var, name=input_layer.name)
for input_layer in input_layers]
inputs = X_inputs + [theano.In(y_batch, name="y")]
train_iter = theano.function(
inputs=inputs,
outputs=[loss_train] + scores_train,
updates=updates,
allow_input_downcast=True,
on_unused_input='ignore',
)
eval_iter = theano.function(
inputs=inputs,
outputs=[loss_eval, accuracy] + scores_valid,
allow_input_downcast=True,
on_unused_input='ignore',
)
predict_iter = theano.function(
inputs=X_inputs,
outputs=predict_proba,
allow_input_downcast=True,
on_unused_input='ignore',
)
return train_iter, eval_iter, predict_iter
def fit(self, X, y, epochs=None):
"""
Runs the training loop for a given number of epochs
:param X: The input data
:param y: The ground truth
:param epochs: The number of epochs to run, if `None` runs for the
network's :attr:`max_epochs`
:return: This instance
"""
if self.check_input:
X, y = self._check_good_input(X, y)
if self.use_label_encoder:
self.enc_ = LabelEncoder()
y = self.enc_.fit_transform(y).astype(np.int32)
self.classes_ = self.enc_.classes_
self.initialize()
try:
self.train_loop(X, y, epochs=epochs)
except KeyboardInterrupt:
pass
return self
def partial_fit(self, X, y, classes=None):
"""
Runs a single epoch using the provided data
:return: This instance
"""
return self.fit(X, y, epochs=1)
def train_loop(self, X, y, epochs=None):
epochs = epochs or self.max_epochs
X_train, X_valid, y_train, y_valid = self.train_split(X, y, self)
on_batch_finished = self.on_batch_finished
if not isinstance(on_batch_finished, (list, tuple)):
on_batch_finished = [on_batch_finished]
on_epoch_finished = self.on_epoch_finished
if not isinstance(on_epoch_finished, (list, tuple)):
on_epoch_finished = [on_epoch_finished]
on_training_started = self.on_training_started
if not isinstance(on_training_started, (list, tuple)):
on_training_started = [on_training_started]
on_training_finished = self.on_training_finished
if not isinstance(on_training_finished, (list, tuple)):
on_training_finished = [on_training_finished]
epoch = 0
best_valid_loss = (
min([row['valid_loss'] for row in self.train_history_]) if
self.train_history_ else np.inf
)
best_train_loss = (
min([row['train_loss'] for row in self.train_history_]) if
self.train_history_ else np.inf
)
for func in on_training_started:
func(self, self.train_history_)
num_epochs_past = len(self.train_history_)
while epoch < epochs:
epoch += 1
train_outputs = []
valid_outputs = []
if self.custom_scores:
custom_scores = [[] for _ in self.custom_scores]
else:
custom_scores = []
t0 = time()
batch_train_sizes = []
for Xb, yb in self.batch_iterator_train(X_train, y_train):
train_outputs.append(
self.apply_batch_func(self.train_iter_, Xb, yb))
batch_train_sizes.append(len(Xb))
for func in on_batch_finished:
func(self, self.train_history_)
batch_valid_sizes = []
for Xb, yb in self.batch_iterator_test(X_valid, y_valid):
valid_outputs.append(
self.apply_batch_func(self.eval_iter_, Xb, yb))
batch_valid_sizes.append(len(Xb))
if self.custom_scores:
y_prob = self.apply_batch_func(self.predict_iter_, Xb)
y_prob = y_prob[0] if len(y_prob) == 1 else y_prob
for custom_scorer, custom_score in zip(
self.custom_scores, custom_scores):
custom_score.append(custom_scorer[1](yb, y_prob))
train_outputs = np.array(train_outputs, dtype=object).T
train_outputs = [
np.average(
[np.mean(row) for row in col],
weights=batch_train_sizes,
)
for col in train_outputs
]
if valid_outputs:
valid_outputs = np.array(valid_outputs, dtype=object).T
valid_outputs = [
np.average(
[np.mean(row) for row in col],
weights=batch_valid_sizes,
)
for col in valid_outputs
]
if custom_scores:
avg_custom_scores = np.average(
custom_scores, weights=batch_valid_sizes, axis=1)
if train_outputs[0] < best_train_loss:
best_train_loss = train_outputs[0]
if valid_outputs and valid_outputs[0] < best_valid_loss:
best_valid_loss = valid_outputs[0]
info = {
'epoch': num_epochs_past + epoch,
'train_loss': train_outputs[0],
'train_loss_best': best_train_loss == train_outputs[0],
'valid_loss': valid_outputs[0]
if valid_outputs else np.nan,
'valid_loss_best': best_valid_loss == valid_outputs[0]
if valid_outputs else np.nan,
'valid_accuracy': valid_outputs[1]
if valid_outputs else np.nan,
'dur': time() - t0,
}
if self.custom_scores:
for index, custom_score in enumerate(self.custom_scores):
info[custom_score[0]] = avg_custom_scores[index]
if self.scores_train:
for index, (name, func) in enumerate(self.scores_train):
info[name] = train_outputs[index + 1]
if self.scores_valid:
for index, (name, func) in enumerate(self.scores_valid):
info[name] = valid_outputs[index + 2]
self.train_history_.append(info)
try:
for func in on_epoch_finished:
func(self, self.train_history_)
except StopIteration:
break
for func in on_training_finished:
func(self, self.train_history_)
@staticmethod
def apply_batch_func(func, Xb, yb=None):
if isinstance(Xb, dict):
kwargs = dict(Xb)
if yb is not None:
kwargs['y'] = yb
return func(**kwargs)
else:
return func(Xb) if yb is None else func(Xb, yb)
def predict_proba(self, X):
probas = []
for Xb, yb in self.batch_iterator_test(X):
probas.append(self.apply_batch_func(self.predict_iter_, Xb))
output = tuple(np.vstack(o) for o in zip(*probas))
return output if len(output) > 1 else output[0]
def predict(self, X):
if self.regression:
return self.predict_proba(X)
else:
y_pred = np.argmax(self.predict_proba(X), axis=1)
if self.use_label_encoder:
y_pred = self.enc_.inverse_transform(y_pred)
return y_pred
def get_output(self, layer, X):
if isinstance(layer, basestring):
layer = self.layers_[layer]
fn_cache = getattr(self, '_get_output_fn_cache', None)
if fn_cache is None:
fn_cache = {}
self._get_output_fn_cache = fn_cache
if layer not in fn_cache:
xs = self.layers_[0].input_var.type()
get_activity = theano.function([xs], get_output(layer, xs))
fn_cache[layer] = get_activity
else:
get_activity = fn_cache[layer]
outputs = []
for Xb, yb in self.batch_iterator_test(X):
outputs.append(get_activity(Xb))
return np.vstack(outputs)
def score(self, X, y):
score = r2_score if self.regression else accuracy_score
return float(score(self.predict(X), y))
def get_all_layers(self):
return self.layers_.values()
def get_all_params(self, **kwargs):
layers = self.get_all_layers()
params = sum([l.get_params(**kwargs) for l in layers], [])
return unique(params)
def get_all_params_values(self):
return_value = OrderedDict()
for name, layer in self.layers_.items():
return_value[name] = [p.get_value() for p in layer.get_params()]
return return_value
def load_params_from(self, source):
self.initialize()
if isinstance(source, basestring):
with open(source, 'rb') as f:
source = pickle.load(f)
if isinstance(source, NeuralNet):
source = source.get_all_params_values()
success = "Loaded parameters to layer '{}' (shape {})."
failure = ("Could not load parameters to layer '{}' because "
"shapes did not match: {} vs {}.")
for key, values in source.items():
layer = self.layers_.get(key)
if layer is not None:
for p1, p2v in zip(layer.get_params(), values):
shape1 = p1.get_value().shape
shape2 = p2v.shape
shape1s = 'x'.join(map(str, shape1))
shape2s = 'x'.join(map(str, shape2))
if shape1 == shape2:
p1.set_value(p2v)
if self.verbose:
print(success.format(
key, shape1s, shape2s))
else:
if self.verbose:
print(failure.format(
key, shape1s, shape2s))
def save_params_to(self, fname):
params = self.get_all_params_values()
with open(fname, 'wb') as f:
pickle.dump(params, f, -1)
def load_weights_from(self, source):
warn("The 'load_weights_from' method will be removed in nolearn 0.6. "
"Please use 'load_params_from' instead.")
if isinstance(source, list):
raise ValueError(
"Loading weights from a list of parameter values is no "
"longer supported. Please send me something like the "
"return value of 'net.get_all_params_values()' instead.")
return self.load_params_from(source)
def save_weights_to(self, fname):
warn("The 'save_weights_to' method will be removed in nolearn 0.6. "
"Please use 'save_params_to' instead.")
return self.save_params_to(fname)
def __setstate__(self, state): # BBB for pickles that don't have the graph
self.__dict__.update(state)
self.initialize()
def get_params(self, deep=True):
params = super(NeuralNet, self).get_params(deep=deep)
# Incidentally, Lasagne layers have a 'get_params' too, which
# for sklearn's 'clone' means it would treat it in a special
# way when cloning. Wrapping the list of layers in a custom
# list type does the trick here, but of course it's crazy:
params['layers'] = _list(params['layers'])
return _dict(params)
def _get_param_names(self):
# This allows us to have **kwargs in __init__ (woot!):
param_names = super(NeuralNet, self)._get_param_names()
return param_names + self._kwarg_keys
|
dnouri/nolearn
|
nolearn/lasagne/base.py
|
NeuralNet.fit
|
python
|
def fit(self, X, y, epochs=None):
if self.check_input:
X, y = self._check_good_input(X, y)
if self.use_label_encoder:
self.enc_ = LabelEncoder()
y = self.enc_.fit_transform(y).astype(np.int32)
self.classes_ = self.enc_.classes_
self.initialize()
try:
self.train_loop(X, y, epochs=epochs)
except KeyboardInterrupt:
pass
return self
|
Runs the training loop for a given number of epochs
:param X: The input data
:param y: The ground truth
:param epochs: The number of epochs to run, if `None` runs for the
network's :attr:`max_epochs`
:return: This instance
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/base.py#L680-L703
| null |
class NeuralNet(BaseEstimator):
"""A configurable Neural Network estimator based on Lasagne.
Compatible with scikit-learn estimators.
Attributes
----------
train_history_:
A list of network training info for each epoch.
Each index contains a dictionary with the following keys
* epoch - The epoch number
* train_loss_best - True if this epoch had the best training loss so far
* valid_loss_best - True if this epoch had the best validation loss so far
* train_loss - The training loss for this epoch
* valid_loss - The validation loss for this epoch
* valid_accuracy - The validation accuracy for this epoch
layers_: A dictionary of lasagne layers keyed by the layer's name, or the layer's index
layer_reference_params:
A list of Lasagne layer parameter names that may reference
other layers, excluding 'incoming' and 'incomings'.
"""
layer_reference_params = ['mask_input']
def __init__(
self,
layers,
update=nesterov_momentum,
loss=None, # BBB
objective=objective,
objective_loss_function=None,
batch_iterator_train=BatchIterator(batch_size=128),
batch_iterator_test=BatchIterator(batch_size=128),
regression=False,
max_epochs=100,
train_split=TrainSplit(eval_size=0.2),
custom_scores=None,
scores_train=None,
scores_valid=None,
X_tensor_type=None,
y_tensor_type=None,
use_label_encoder=False,
on_batch_finished=None,
on_epoch_finished=None,
on_training_started=None,
on_training_finished=None,
more_params=None,
check_input=True,
verbose=0,
**kwargs
):
"""
Initialize a Neural Network
Parameters
----------
layers:
A list of lasagne layers to compose into the final neural net.
See :ref:`layer-def`
update:
The update function to use when training. Uses the form
provided by the :mod:`lasagne.updates` implementations.
objective:
The objective function to use when training. The callable
will be passed the NeuralNetwork's :attr:`.layers_`
attribute as the first argument, and the output target as
the second argument.
max_epochs:
The number of epochs to train. This is used as the
default when calling the :meth:`.fit` method without an
epochs argument.
Other Parameters
----------------
batch_iterator_train:
The sample iterator to use while training the network.
batch_iterator_test:
The sample Iterator to use while testing and validating
the network.
regression:
Whether or not this is a regressor network. Determines
the default objective and scoring functions.
train_split:
The method used to separate training and validation
samples. See :class:`TrainSplit` for the default
implementation.
y_tensor_type:
The type of tensor to use to hold the network's output.
Typically ``T.ivector`` (the default) for classification
tasks.
on_training_started, on_batch_finished, on_epoch_finished,
on_training_finished:
A list of functions which are called during training at
the corresponding times.
The functions will be passed the NeuralNet as the first
parameter and its :attr:`.train_history_` attribute as the
second parameter.
custom_scores:
A list of callable custom scoring functions.
The functions will be passed the expected y values as the
first argument, and the predicted y_values as the second
argument.
use_label_encoder:
If true, all y_values will be encoded using a
:class:`sklearn.preprocessing.LabelEncoder` instance.
verbose:
The verbosity level of the network.
Any non-zero value will cause the network to print the
layer info at the start of training, as well as print a
log of the training history after each epoch. Larger
values will increase the amount of info shown.
more_params:
A set of more parameters to use when initializing layers
defined using the dictionary method.
Note
----
* Extra arguments can be passed to the call to the *update*
function by prepending the string ``update_`` to the
corresponding argument name,
e.g. ``update_learning_rate=0.01`` will define the
``learning_rate`` parameter of the update function.
* Extra arguments can be provided to the objective call
through the Neural Network by prepending the string
``objective_`` to the corresponding argument name.
"""
if loss is not None:
raise ValueError(
"The 'loss' parameter was removed, please use "
"'objective_loss_function' instead.") # BBB
if hasattr(objective, 'get_loss'):
raise ValueError(
"The 'Objective' class is no longer supported, please "
"use 'nolearn.lasagne.objective' or similar.") # BBB
if objective_loss_function is None:
objective_loss_function = (
squared_error if regression else categorical_crossentropy)
if hasattr(self, 'train_test_split'): # BBB
warn("The 'train_test_split' method has been deprecated, please "
"use the 'train_split' parameter instead.")
train_split = LegacyTrainTestSplit(
eval_size=kwargs.pop('eval_size', 0.2))
if 'eval_size' in kwargs: # BBB
warn("The 'eval_size' argument has been deprecated, please use "
"the 'train_split' parameter instead, e.g.\n"
"train_split=TrainSplit(eval_size=0.4)")
train_split.eval_size = kwargs.pop('eval_size')
if y_tensor_type is None:
if regression:
y_tensor_type = T.TensorType(
theano.config.floatX, (False, False))
else:
y_tensor_type = T.ivector
if X_tensor_type is not None:
raise ValueError(
"The 'X_tensor_type' parameter has been removed. "
"It's unnecessary.") # BBB
if 'custom_score' in kwargs:
warn("The 'custom_score' argument has been deprecated, please use "
"the 'custom_scores' parameter instead, which is just "
"a list of custom scores e.g.\n"
"custom_scores=[('first output', lambda y1, y2: abs(y1[0,0]-y2[0,0])), ('second output', lambda y1,y2: abs(y1[0,1]-y2[0,1]))]")
# add it to custom_scores
if custom_scores is None:
custom_scores = [kwargs.pop('custom_score')]
else:
custom_scores.append(kwargs.pop('custom_score'))
if isinstance(layers, Layer):
layers = _list([layers])
elif isinstance(layers, Iterable):
layers = _list(layers)
self.layers = layers
self.update = update
self.objective = objective
self.objective_loss_function = objective_loss_function
self.batch_iterator_train = batch_iterator_train
self.batch_iterator_test = batch_iterator_test
self.regression = regression
self.max_epochs = max_epochs
self.train_split = train_split
self.custom_scores = custom_scores
self.scores_train = scores_train or []
self.scores_valid = scores_valid or []
self.y_tensor_type = y_tensor_type
self.use_label_encoder = use_label_encoder
self.on_batch_finished = on_batch_finished or []
self.on_epoch_finished = on_epoch_finished or []
self.on_training_started = on_training_started or []
self.on_training_finished = on_training_finished or []
self.more_params = more_params or {}
self.check_input = check_input
self.verbose = verbose
if self.verbose:
# XXX: PrintLog should come before any other handlers,
# because early stopping will otherwise cause the last
# line not to be printed
self.on_epoch_finished.append(PrintLog())
self.on_training_started.append(PrintLayerInfo())
for key in kwargs.keys():
assert not hasattr(self, key)
vars(self).update(kwargs)
self._kwarg_keys = list(kwargs.keys())
self.train_history_ = []
if 'batch_iterator' in kwargs: # BBB
raise ValueError(
"The 'batch_iterator' argument has been replaced. "
"Use 'batch_iterator_train' and 'batch_iterator_test' instead."
)
def _check_for_unused_kwargs(self):
names = self.layers_.keys() + ['update', 'objective']
for k in self._kwarg_keys:
for n in names:
prefix = '{}_'.format(n)
if k.startswith(prefix):
break
else:
raise ValueError("Unused kwarg: {}".format(k))
def _check_good_input(self, X, y=None):
if isinstance(X, dict):
lengths = [len(X1) for X1 in X.values()]
if len(set(lengths)) > 1:
raise ValueError("Not all values of X are of equal length.")
x_len = lengths[0]
else:
x_len = len(X)
if y is not None:
if len(y) != x_len:
raise ValueError("X and y are not of equal length.")
if self.regression and y is not None and y.ndim == 1:
y = y.reshape(-1, 1)
return X, y
def initialize(self):
"""Initializes the network. Checks that no extra kwargs were
passed to the constructor, and compiles the train, predict,
and evaluation functions.
Subsequent calls to this function will return without any action.
"""
if getattr(self, '_initialized', False):
return
out = getattr(self, '_output_layers', None)
if out is None:
self.initialize_layers()
self._check_for_unused_kwargs()
iter_funcs = self._create_iter_funcs(
self.layers_, self.objective, self.update,
self.y_tensor_type,
)
self.train_iter_, self.eval_iter_, self.predict_iter_ = iter_funcs
self._initialized = True
def _get_params_for(self, name):
collected = {}
prefix = '{}_'.format(name)
params = vars(self)
more_params = self.more_params
for key, value in itertools.chain(params.items(), more_params.items()):
if key.startswith(prefix):
collected[key[len(prefix):]] = value
return collected
def _layer_name(self, layer_class, index):
return "{}{}".format(
layer_class.__name__.lower().replace("layer", ""), index)
def initialize_layers(self, layers=None):
"""Sets up the Lasagne layers
:param layers: The dictionary of layers, or a
:class:`lasagne.Layers` instance, describing the underlying
network
:return: the output layer of the underlying lasagne network.
:seealso: :ref:`layer-def`
"""
if layers is not None:
self.layers = layers
self.layers_ = Layers()
#If a Layer, or a list of Layers was passed in
if isinstance(self.layers[0], Layer):
for out_layer in self.layers:
for i, layer in enumerate(get_all_layers(out_layer)):
if layer not in self.layers_.values():
name = layer.name or self._layer_name(layer.__class__, i)
self.layers_[name] = layer
if self._get_params_for(name) != {}:
raise ValueError(
"You can't use keyword params when passing a Lasagne "
"instance object as the 'layers' parameter of "
"'NeuralNet'."
)
self._output_layers = self.layers
return self.layers
# 'self.layers' are a list of '(Layer class, kwargs)', so
# we'll have to actually instantiate the layers given the
# arguments:
layer = None
for i, layer_def in enumerate(self.layers):
if isinstance(layer_def[1], dict):
# Newer format: (Layer, {'layer': 'kwargs'})
layer_factory, layer_kw = layer_def
layer_kw = layer_kw.copy()
else:
# The legacy format: ('name', Layer)
layer_name, layer_factory = layer_def
layer_kw = {'name': layer_name}
if isinstance(layer_factory, str):
layer_factory = locate(layer_factory)
assert layer_factory is not None
if 'name' not in layer_kw:
layer_kw['name'] = self._layer_name(layer_factory, i)
more_params = self._get_params_for(layer_kw['name'])
layer_kw.update(more_params)
if layer_kw['name'] in self.layers_:
raise ValueError(
"Two layers with name {}.".format(layer_kw['name']))
# Any layers that aren't subclasses of InputLayer are
# assumed to require an 'incoming' paramter. By default,
# we'll use the previous layer as input:
try:
is_input_layer = issubclass(layer_factory, InputLayer)
except TypeError:
is_input_layer = False
if not is_input_layer:
if 'incoming' in layer_kw:
layer_kw['incoming'] = self.layers_[
layer_kw['incoming']]
elif 'incomings' in layer_kw:
layer_kw['incomings'] = [
self.layers_[name] for name in layer_kw['incomings']]
else:
layer_kw['incoming'] = layer
# Deal with additional string parameters that may
# reference other layers; currently only 'mask_input'.
for param in self.layer_reference_params:
if param in layer_kw:
val = layer_kw[param]
if isinstance(val, basestring):
layer_kw[param] = self.layers_[val]
for attr in ('W', 'b'):
if isinstance(layer_kw.get(attr), str):
name = layer_kw[attr]
layer_kw[attr] = getattr(self.layers_[name], attr, None)
try:
layer_wrapper = layer_kw.pop('layer_wrapper', None)
layer = layer_factory(**layer_kw)
except TypeError as e:
msg = ("Failed to instantiate {} with args {}.\n"
"Maybe parameter names have changed?".format(
layer_factory, layer_kw))
chain_exception(TypeError(msg), e)
self.layers_[layer_kw['name']] = layer
if layer_wrapper is not None:
layer = layer_wrapper(layer)
self.layers_["LW_%s" % layer_kw['name']] = layer
self._output_layers = [layer]
return [layer]
def _create_iter_funcs(self, layers, objective, update, output_type):
y_batch = output_type('y_batch')
objective_kw = self._get_params_for('objective')
loss_train = objective(
layers, target=y_batch, **objective_kw)
loss_eval = objective(
layers, target=y_batch, deterministic=True, **objective_kw)
output_layer = self._output_layers
predict_proba = get_output(output_layer, None, deterministic=True)
if not self.regression:
predict = predict_proba[0].argmax(axis=1)
accuracy = T.mean(T.eq(predict, y_batch))
else:
accuracy = loss_eval
scores_train = [
s[1](predict_proba, y_batch) for s in self.scores_train]
scores_valid = [
s[1](predict_proba, y_batch) for s in self.scores_valid]
all_params = self.get_all_params(trainable=True)
grads = theano.grad(loss_train, all_params)
for idx, param in enumerate(all_params):
grad_scale = getattr(param.tag, 'grad_scale', 1)
if grad_scale != 1:
grads[idx] *= grad_scale
update_params = self._get_params_for('update')
updates = update(grads, all_params, **update_params)
input_layers = [layer for layer in layers.values()
if isinstance(layer, InputLayer)]
X_inputs = [theano.In(input_layer.input_var, name=input_layer.name)
for input_layer in input_layers]
inputs = X_inputs + [theano.In(y_batch, name="y")]
train_iter = theano.function(
inputs=inputs,
outputs=[loss_train] + scores_train,
updates=updates,
allow_input_downcast=True,
on_unused_input='ignore',
)
eval_iter = theano.function(
inputs=inputs,
outputs=[loss_eval, accuracy] + scores_valid,
allow_input_downcast=True,
on_unused_input='ignore',
)
predict_iter = theano.function(
inputs=X_inputs,
outputs=predict_proba,
allow_input_downcast=True,
on_unused_input='ignore',
)
return train_iter, eval_iter, predict_iter
def partial_fit(self, X, y, classes=None):
"""
Runs a single epoch using the provided data
:return: This instance
"""
return self.fit(X, y, epochs=1)
def train_loop(self, X, y, epochs=None):
epochs = epochs or self.max_epochs
X_train, X_valid, y_train, y_valid = self.train_split(X, y, self)
on_batch_finished = self.on_batch_finished
if not isinstance(on_batch_finished, (list, tuple)):
on_batch_finished = [on_batch_finished]
on_epoch_finished = self.on_epoch_finished
if not isinstance(on_epoch_finished, (list, tuple)):
on_epoch_finished = [on_epoch_finished]
on_training_started = self.on_training_started
if not isinstance(on_training_started, (list, tuple)):
on_training_started = [on_training_started]
on_training_finished = self.on_training_finished
if not isinstance(on_training_finished, (list, tuple)):
on_training_finished = [on_training_finished]
epoch = 0
best_valid_loss = (
min([row['valid_loss'] for row in self.train_history_]) if
self.train_history_ else np.inf
)
best_train_loss = (
min([row['train_loss'] for row in self.train_history_]) if
self.train_history_ else np.inf
)
for func in on_training_started:
func(self, self.train_history_)
num_epochs_past = len(self.train_history_)
while epoch < epochs:
epoch += 1
train_outputs = []
valid_outputs = []
if self.custom_scores:
custom_scores = [[] for _ in self.custom_scores]
else:
custom_scores = []
t0 = time()
batch_train_sizes = []
for Xb, yb in self.batch_iterator_train(X_train, y_train):
train_outputs.append(
self.apply_batch_func(self.train_iter_, Xb, yb))
batch_train_sizes.append(len(Xb))
for func in on_batch_finished:
func(self, self.train_history_)
batch_valid_sizes = []
for Xb, yb in self.batch_iterator_test(X_valid, y_valid):
valid_outputs.append(
self.apply_batch_func(self.eval_iter_, Xb, yb))
batch_valid_sizes.append(len(Xb))
if self.custom_scores:
y_prob = self.apply_batch_func(self.predict_iter_, Xb)
y_prob = y_prob[0] if len(y_prob) == 1 else y_prob
for custom_scorer, custom_score in zip(
self.custom_scores, custom_scores):
custom_score.append(custom_scorer[1](yb, y_prob))
train_outputs = np.array(train_outputs, dtype=object).T
train_outputs = [
np.average(
[np.mean(row) for row in col],
weights=batch_train_sizes,
)
for col in train_outputs
]
if valid_outputs:
valid_outputs = np.array(valid_outputs, dtype=object).T
valid_outputs = [
np.average(
[np.mean(row) for row in col],
weights=batch_valid_sizes,
)
for col in valid_outputs
]
if custom_scores:
avg_custom_scores = np.average(
custom_scores, weights=batch_valid_sizes, axis=1)
if train_outputs[0] < best_train_loss:
best_train_loss = train_outputs[0]
if valid_outputs and valid_outputs[0] < best_valid_loss:
best_valid_loss = valid_outputs[0]
info = {
'epoch': num_epochs_past + epoch,
'train_loss': train_outputs[0],
'train_loss_best': best_train_loss == train_outputs[0],
'valid_loss': valid_outputs[0]
if valid_outputs else np.nan,
'valid_loss_best': best_valid_loss == valid_outputs[0]
if valid_outputs else np.nan,
'valid_accuracy': valid_outputs[1]
if valid_outputs else np.nan,
'dur': time() - t0,
}
if self.custom_scores:
for index, custom_score in enumerate(self.custom_scores):
info[custom_score[0]] = avg_custom_scores[index]
if self.scores_train:
for index, (name, func) in enumerate(self.scores_train):
info[name] = train_outputs[index + 1]
if self.scores_valid:
for index, (name, func) in enumerate(self.scores_valid):
info[name] = valid_outputs[index + 2]
self.train_history_.append(info)
try:
for func in on_epoch_finished:
func(self, self.train_history_)
except StopIteration:
break
for func in on_training_finished:
func(self, self.train_history_)
@staticmethod
def apply_batch_func(func, Xb, yb=None):
if isinstance(Xb, dict):
kwargs = dict(Xb)
if yb is not None:
kwargs['y'] = yb
return func(**kwargs)
else:
return func(Xb) if yb is None else func(Xb, yb)
def predict_proba(self, X):
probas = []
for Xb, yb in self.batch_iterator_test(X):
probas.append(self.apply_batch_func(self.predict_iter_, Xb))
output = tuple(np.vstack(o) for o in zip(*probas))
return output if len(output) > 1 else output[0]
def predict(self, X):
if self.regression:
return self.predict_proba(X)
else:
y_pred = np.argmax(self.predict_proba(X), axis=1)
if self.use_label_encoder:
y_pred = self.enc_.inverse_transform(y_pred)
return y_pred
def get_output(self, layer, X):
if isinstance(layer, basestring):
layer = self.layers_[layer]
fn_cache = getattr(self, '_get_output_fn_cache', None)
if fn_cache is None:
fn_cache = {}
self._get_output_fn_cache = fn_cache
if layer not in fn_cache:
xs = self.layers_[0].input_var.type()
get_activity = theano.function([xs], get_output(layer, xs))
fn_cache[layer] = get_activity
else:
get_activity = fn_cache[layer]
outputs = []
for Xb, yb in self.batch_iterator_test(X):
outputs.append(get_activity(Xb))
return np.vstack(outputs)
def score(self, X, y):
score = r2_score if self.regression else accuracy_score
return float(score(self.predict(X), y))
def get_all_layers(self):
return self.layers_.values()
def get_all_params(self, **kwargs):
layers = self.get_all_layers()
params = sum([l.get_params(**kwargs) for l in layers], [])
return unique(params)
def get_all_params_values(self):
return_value = OrderedDict()
for name, layer in self.layers_.items():
return_value[name] = [p.get_value() for p in layer.get_params()]
return return_value
def load_params_from(self, source):
self.initialize()
if isinstance(source, basestring):
with open(source, 'rb') as f:
source = pickle.load(f)
if isinstance(source, NeuralNet):
source = source.get_all_params_values()
success = "Loaded parameters to layer '{}' (shape {})."
failure = ("Could not load parameters to layer '{}' because "
"shapes did not match: {} vs {}.")
for key, values in source.items():
layer = self.layers_.get(key)
if layer is not None:
for p1, p2v in zip(layer.get_params(), values):
shape1 = p1.get_value().shape
shape2 = p2v.shape
shape1s = 'x'.join(map(str, shape1))
shape2s = 'x'.join(map(str, shape2))
if shape1 == shape2:
p1.set_value(p2v)
if self.verbose:
print(success.format(
key, shape1s, shape2s))
else:
if self.verbose:
print(failure.format(
key, shape1s, shape2s))
def save_params_to(self, fname):
params = self.get_all_params_values()
with open(fname, 'wb') as f:
pickle.dump(params, f, -1)
def load_weights_from(self, source):
warn("The 'load_weights_from' method will be removed in nolearn 0.6. "
"Please use 'load_params_from' instead.")
if isinstance(source, list):
raise ValueError(
"Loading weights from a list of parameter values is no "
"longer supported. Please send me something like the "
"return value of 'net.get_all_params_values()' instead.")
return self.load_params_from(source)
def save_weights_to(self, fname):
warn("The 'save_weights_to' method will be removed in nolearn 0.6. "
"Please use 'save_params_to' instead.")
return self.save_params_to(fname)
def __setstate__(self, state): # BBB for pickles that don't have the graph
self.__dict__.update(state)
self.initialize()
def get_params(self, deep=True):
params = super(NeuralNet, self).get_params(deep=deep)
# Incidentally, Lasagne layers have a 'get_params' too, which
# for sklearn's 'clone' means it would treat it in a special
# way when cloning. Wrapping the list of layers in a custom
# list type does the trick here, but of course it's crazy:
params['layers'] = _list(params['layers'])
return _dict(params)
def _get_param_names(self):
# This allows us to have **kwargs in __init__ (woot!):
param_names = super(NeuralNet, self)._get_param_names()
return param_names + self._kwarg_keys
|
dnouri/nolearn
|
nolearn/lasagne/base.py
|
NeuralNet.partial_fit
|
python
|
def partial_fit(self, X, y, classes=None):
return self.fit(X, y, epochs=1)
|
Runs a single epoch using the provided data
:return: This instance
|
train
|
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/base.py#L705-L711
| null |
class NeuralNet(BaseEstimator):
"""A configurable Neural Network estimator based on Lasagne.
Compatible with scikit-learn estimators.
Attributes
----------
train_history_:
A list of network training info for each epoch.
Each index contains a dictionary with the following keys
* epoch - The epoch number
* train_loss_best - True if this epoch had the best training loss so far
* valid_loss_best - True if this epoch had the best validation loss so far
* train_loss - The training loss for this epoch
* valid_loss - The validation loss for this epoch
* valid_accuracy - The validation accuracy for this epoch
layers_: A dictionary of lasagne layers keyed by the layer's name, or the layer's index
layer_reference_params:
A list of Lasagne layer parameter names that may reference
other layers, excluding 'incoming' and 'incomings'.
"""
layer_reference_params = ['mask_input']
def __init__(
self,
layers,
update=nesterov_momentum,
loss=None, # BBB
objective=objective,
objective_loss_function=None,
batch_iterator_train=BatchIterator(batch_size=128),
batch_iterator_test=BatchIterator(batch_size=128),
regression=False,
max_epochs=100,
train_split=TrainSplit(eval_size=0.2),
custom_scores=None,
scores_train=None,
scores_valid=None,
X_tensor_type=None,
y_tensor_type=None,
use_label_encoder=False,
on_batch_finished=None,
on_epoch_finished=None,
on_training_started=None,
on_training_finished=None,
more_params=None,
check_input=True,
verbose=0,
**kwargs
):
"""
Initialize a Neural Network
Parameters
----------
layers:
A list of lasagne layers to compose into the final neural net.
See :ref:`layer-def`
update:
The update function to use when training. Uses the form
provided by the :mod:`lasagne.updates` implementations.
objective:
The objective function to use when training. The callable
will be passed the NeuralNetwork's :attr:`.layers_`
attribute as the first argument, and the output target as
the second argument.
max_epochs:
The number of epochs to train. This is used as the
default when calling the :meth:`.fit` method without an
epochs argument.
Other Parameters
----------------
batch_iterator_train:
The sample iterator to use while training the network.
batch_iterator_test:
The sample Iterator to use while testing and validating
the network.
regression:
Whether or not this is a regressor network. Determines
the default objective and scoring functions.
train_split:
The method used to separate training and validation
samples. See :class:`TrainSplit` for the default
implementation.
y_tensor_type:
The type of tensor to use to hold the network's output.
Typically ``T.ivector`` (the default) for classification
tasks.
on_training_started, on_batch_finished, on_epoch_finished,
on_training_finished:
A list of functions which are called during training at
the corresponding times.
The functions will be passed the NeuralNet as the first
parameter and its :attr:`.train_history_` attribute as the
second parameter.
custom_scores:
A list of callable custom scoring functions.
The functions will be passed the expected y values as the
first argument, and the predicted y_values as the second
argument.
use_label_encoder:
If true, all y_values will be encoded using a
:class:`sklearn.preprocessing.LabelEncoder` instance.
verbose:
The verbosity level of the network.
Any non-zero value will cause the network to print the
layer info at the start of training, as well as print a
log of the training history after each epoch. Larger
values will increase the amount of info shown.
more_params:
A set of more parameters to use when initializing layers
defined using the dictionary method.
Note
----
* Extra arguments can be passed to the call to the *update*
function by prepending the string ``update_`` to the
corresponding argument name,
e.g. ``update_learning_rate=0.01`` will define the
``learning_rate`` parameter of the update function.
* Extra arguments can be provided to the objective call
through the Neural Network by prepending the string
``objective_`` to the corresponding argument name.
"""
if loss is not None:
raise ValueError(
"The 'loss' parameter was removed, please use "
"'objective_loss_function' instead.") # BBB
if hasattr(objective, 'get_loss'):
raise ValueError(
"The 'Objective' class is no longer supported, please "
"use 'nolearn.lasagne.objective' or similar.") # BBB
if objective_loss_function is None:
objective_loss_function = (
squared_error if regression else categorical_crossentropy)
if hasattr(self, 'train_test_split'): # BBB
warn("The 'train_test_split' method has been deprecated, please "
"use the 'train_split' parameter instead.")
train_split = LegacyTrainTestSplit(
eval_size=kwargs.pop('eval_size', 0.2))
if 'eval_size' in kwargs: # BBB
warn("The 'eval_size' argument has been deprecated, please use "
"the 'train_split' parameter instead, e.g.\n"
"train_split=TrainSplit(eval_size=0.4)")
train_split.eval_size = kwargs.pop('eval_size')
if y_tensor_type is None:
if regression:
y_tensor_type = T.TensorType(
theano.config.floatX, (False, False))
else:
y_tensor_type = T.ivector
if X_tensor_type is not None:
raise ValueError(
"The 'X_tensor_type' parameter has been removed. "
"It's unnecessary.") # BBB
if 'custom_score' in kwargs:
warn("The 'custom_score' argument has been deprecated, please use "
"the 'custom_scores' parameter instead, which is just "
"a list of custom scores e.g.\n"
"custom_scores=[('first output', lambda y1, y2: abs(y1[0,0]-y2[0,0])), ('second output', lambda y1,y2: abs(y1[0,1]-y2[0,1]))]")
# add it to custom_scores
if custom_scores is None:
custom_scores = [kwargs.pop('custom_score')]
else:
custom_scores.append(kwargs.pop('custom_score'))
if isinstance(layers, Layer):
layers = _list([layers])
elif isinstance(layers, Iterable):
layers = _list(layers)
self.layers = layers
self.update = update
self.objective = objective
self.objective_loss_function = objective_loss_function
self.batch_iterator_train = batch_iterator_train
self.batch_iterator_test = batch_iterator_test
self.regression = regression
self.max_epochs = max_epochs
self.train_split = train_split
self.custom_scores = custom_scores
self.scores_train = scores_train or []
self.scores_valid = scores_valid or []
self.y_tensor_type = y_tensor_type
self.use_label_encoder = use_label_encoder
self.on_batch_finished = on_batch_finished or []
self.on_epoch_finished = on_epoch_finished or []
self.on_training_started = on_training_started or []
self.on_training_finished = on_training_finished or []
self.more_params = more_params or {}
self.check_input = check_input
self.verbose = verbose
if self.verbose:
# XXX: PrintLog should come before any other handlers,
# because early stopping will otherwise cause the last
# line not to be printed
self.on_epoch_finished.append(PrintLog())
self.on_training_started.append(PrintLayerInfo())
for key in kwargs.keys():
assert not hasattr(self, key)
vars(self).update(kwargs)
self._kwarg_keys = list(kwargs.keys())
self.train_history_ = []
if 'batch_iterator' in kwargs: # BBB
raise ValueError(
"The 'batch_iterator' argument has been replaced. "
"Use 'batch_iterator_train' and 'batch_iterator_test' instead."
)
def _check_for_unused_kwargs(self):
names = self.layers_.keys() + ['update', 'objective']
for k in self._kwarg_keys:
for n in names:
prefix = '{}_'.format(n)
if k.startswith(prefix):
break
else:
raise ValueError("Unused kwarg: {}".format(k))
def _check_good_input(self, X, y=None):
if isinstance(X, dict):
lengths = [len(X1) for X1 in X.values()]
if len(set(lengths)) > 1:
raise ValueError("Not all values of X are of equal length.")
x_len = lengths[0]
else:
x_len = len(X)
if y is not None:
if len(y) != x_len:
raise ValueError("X and y are not of equal length.")
if self.regression and y is not None and y.ndim == 1:
y = y.reshape(-1, 1)
return X, y
def initialize(self):
"""Initializes the network. Checks that no extra kwargs were
passed to the constructor, and compiles the train, predict,
and evaluation functions.
Subsequent calls to this function will return without any action.
"""
if getattr(self, '_initialized', False):
return
out = getattr(self, '_output_layers', None)
if out is None:
self.initialize_layers()
self._check_for_unused_kwargs()
iter_funcs = self._create_iter_funcs(
self.layers_, self.objective, self.update,
self.y_tensor_type,
)
self.train_iter_, self.eval_iter_, self.predict_iter_ = iter_funcs
self._initialized = True
def _get_params_for(self, name):
collected = {}
prefix = '{}_'.format(name)
params = vars(self)
more_params = self.more_params
for key, value in itertools.chain(params.items(), more_params.items()):
if key.startswith(prefix):
collected[key[len(prefix):]] = value
return collected
def _layer_name(self, layer_class, index):
return "{}{}".format(
layer_class.__name__.lower().replace("layer", ""), index)
def initialize_layers(self, layers=None):
"""Sets up the Lasagne layers
:param layers: The dictionary of layers, or a
:class:`lasagne.Layers` instance, describing the underlying
network
:return: the output layer of the underlying lasagne network.
:seealso: :ref:`layer-def`
"""
if layers is not None:
self.layers = layers
self.layers_ = Layers()
#If a Layer, or a list of Layers was passed in
if isinstance(self.layers[0], Layer):
for out_layer in self.layers:
for i, layer in enumerate(get_all_layers(out_layer)):
if layer not in self.layers_.values():
name = layer.name or self._layer_name(layer.__class__, i)
self.layers_[name] = layer
if self._get_params_for(name) != {}:
raise ValueError(
"You can't use keyword params when passing a Lasagne "
"instance object as the 'layers' parameter of "
"'NeuralNet'."
)
self._output_layers = self.layers
return self.layers
# 'self.layers' are a list of '(Layer class, kwargs)', so
# we'll have to actually instantiate the layers given the
# arguments:
layer = None
for i, layer_def in enumerate(self.layers):
if isinstance(layer_def[1], dict):
# Newer format: (Layer, {'layer': 'kwargs'})
layer_factory, layer_kw = layer_def
layer_kw = layer_kw.copy()
else:
# The legacy format: ('name', Layer)
layer_name, layer_factory = layer_def
layer_kw = {'name': layer_name}
if isinstance(layer_factory, str):
layer_factory = locate(layer_factory)
assert layer_factory is not None
if 'name' not in layer_kw:
layer_kw['name'] = self._layer_name(layer_factory, i)
more_params = self._get_params_for(layer_kw['name'])
layer_kw.update(more_params)
if layer_kw['name'] in self.layers_:
raise ValueError(
"Two layers with name {}.".format(layer_kw['name']))
# Any layers that aren't subclasses of InputLayer are
# assumed to require an 'incoming' paramter. By default,
# we'll use the previous layer as input:
try:
is_input_layer = issubclass(layer_factory, InputLayer)
except TypeError:
is_input_layer = False
if not is_input_layer:
if 'incoming' in layer_kw:
layer_kw['incoming'] = self.layers_[
layer_kw['incoming']]
elif 'incomings' in layer_kw:
layer_kw['incomings'] = [
self.layers_[name] for name in layer_kw['incomings']]
else:
layer_kw['incoming'] = layer
# Deal with additional string parameters that may
# reference other layers; currently only 'mask_input'.
for param in self.layer_reference_params:
if param in layer_kw:
val = layer_kw[param]
if isinstance(val, basestring):
layer_kw[param] = self.layers_[val]
for attr in ('W', 'b'):
if isinstance(layer_kw.get(attr), str):
name = layer_kw[attr]
layer_kw[attr] = getattr(self.layers_[name], attr, None)
try:
layer_wrapper = layer_kw.pop('layer_wrapper', None)
layer = layer_factory(**layer_kw)
except TypeError as e:
msg = ("Failed to instantiate {} with args {}.\n"
"Maybe parameter names have changed?".format(
layer_factory, layer_kw))
chain_exception(TypeError(msg), e)
self.layers_[layer_kw['name']] = layer
if layer_wrapper is not None:
layer = layer_wrapper(layer)
self.layers_["LW_%s" % layer_kw['name']] = layer
self._output_layers = [layer]
return [layer]
def _create_iter_funcs(self, layers, objective, update, output_type):
y_batch = output_type('y_batch')
objective_kw = self._get_params_for('objective')
loss_train = objective(
layers, target=y_batch, **objective_kw)
loss_eval = objective(
layers, target=y_batch, deterministic=True, **objective_kw)
output_layer = self._output_layers
predict_proba = get_output(output_layer, None, deterministic=True)
if not self.regression:
predict = predict_proba[0].argmax(axis=1)
accuracy = T.mean(T.eq(predict, y_batch))
else:
accuracy = loss_eval
scores_train = [
s[1](predict_proba, y_batch) for s in self.scores_train]
scores_valid = [
s[1](predict_proba, y_batch) for s in self.scores_valid]
all_params = self.get_all_params(trainable=True)
grads = theano.grad(loss_train, all_params)
for idx, param in enumerate(all_params):
grad_scale = getattr(param.tag, 'grad_scale', 1)
if grad_scale != 1:
grads[idx] *= grad_scale
update_params = self._get_params_for('update')
updates = update(grads, all_params, **update_params)
input_layers = [layer for layer in layers.values()
if isinstance(layer, InputLayer)]
X_inputs = [theano.In(input_layer.input_var, name=input_layer.name)
for input_layer in input_layers]
inputs = X_inputs + [theano.In(y_batch, name="y")]
train_iter = theano.function(
inputs=inputs,
outputs=[loss_train] + scores_train,
updates=updates,
allow_input_downcast=True,
on_unused_input='ignore',
)
eval_iter = theano.function(
inputs=inputs,
outputs=[loss_eval, accuracy] + scores_valid,
allow_input_downcast=True,
on_unused_input='ignore',
)
predict_iter = theano.function(
inputs=X_inputs,
outputs=predict_proba,
allow_input_downcast=True,
on_unused_input='ignore',
)
return train_iter, eval_iter, predict_iter
def fit(self, X, y, epochs=None):
"""
Runs the training loop for a given number of epochs
:param X: The input data
:param y: The ground truth
:param epochs: The number of epochs to run, if `None` runs for the
network's :attr:`max_epochs`
:return: This instance
"""
if self.check_input:
X, y = self._check_good_input(X, y)
if self.use_label_encoder:
self.enc_ = LabelEncoder()
y = self.enc_.fit_transform(y).astype(np.int32)
self.classes_ = self.enc_.classes_
self.initialize()
try:
self.train_loop(X, y, epochs=epochs)
except KeyboardInterrupt:
pass
return self
def train_loop(self, X, y, epochs=None):
epochs = epochs or self.max_epochs
X_train, X_valid, y_train, y_valid = self.train_split(X, y, self)
on_batch_finished = self.on_batch_finished
if not isinstance(on_batch_finished, (list, tuple)):
on_batch_finished = [on_batch_finished]
on_epoch_finished = self.on_epoch_finished
if not isinstance(on_epoch_finished, (list, tuple)):
on_epoch_finished = [on_epoch_finished]
on_training_started = self.on_training_started
if not isinstance(on_training_started, (list, tuple)):
on_training_started = [on_training_started]
on_training_finished = self.on_training_finished
if not isinstance(on_training_finished, (list, tuple)):
on_training_finished = [on_training_finished]
epoch = 0
best_valid_loss = (
min([row['valid_loss'] for row in self.train_history_]) if
self.train_history_ else np.inf
)
best_train_loss = (
min([row['train_loss'] for row in self.train_history_]) if
self.train_history_ else np.inf
)
for func in on_training_started:
func(self, self.train_history_)
num_epochs_past = len(self.train_history_)
while epoch < epochs:
epoch += 1
train_outputs = []
valid_outputs = []
if self.custom_scores:
custom_scores = [[] for _ in self.custom_scores]
else:
custom_scores = []
t0 = time()
batch_train_sizes = []
for Xb, yb in self.batch_iterator_train(X_train, y_train):
train_outputs.append(
self.apply_batch_func(self.train_iter_, Xb, yb))
batch_train_sizes.append(len(Xb))
for func in on_batch_finished:
func(self, self.train_history_)
batch_valid_sizes = []
for Xb, yb in self.batch_iterator_test(X_valid, y_valid):
valid_outputs.append(
self.apply_batch_func(self.eval_iter_, Xb, yb))
batch_valid_sizes.append(len(Xb))
if self.custom_scores:
y_prob = self.apply_batch_func(self.predict_iter_, Xb)
y_prob = y_prob[0] if len(y_prob) == 1 else y_prob
for custom_scorer, custom_score in zip(
self.custom_scores, custom_scores):
custom_score.append(custom_scorer[1](yb, y_prob))
train_outputs = np.array(train_outputs, dtype=object).T
train_outputs = [
np.average(
[np.mean(row) for row in col],
weights=batch_train_sizes,
)
for col in train_outputs
]
if valid_outputs:
valid_outputs = np.array(valid_outputs, dtype=object).T
valid_outputs = [
np.average(
[np.mean(row) for row in col],
weights=batch_valid_sizes,
)
for col in valid_outputs
]
if custom_scores:
avg_custom_scores = np.average(
custom_scores, weights=batch_valid_sizes, axis=1)
if train_outputs[0] < best_train_loss:
best_train_loss = train_outputs[0]
if valid_outputs and valid_outputs[0] < best_valid_loss:
best_valid_loss = valid_outputs[0]
info = {
'epoch': num_epochs_past + epoch,
'train_loss': train_outputs[0],
'train_loss_best': best_train_loss == train_outputs[0],
'valid_loss': valid_outputs[0]
if valid_outputs else np.nan,
'valid_loss_best': best_valid_loss == valid_outputs[0]
if valid_outputs else np.nan,
'valid_accuracy': valid_outputs[1]
if valid_outputs else np.nan,
'dur': time() - t0,
}
if self.custom_scores:
for index, custom_score in enumerate(self.custom_scores):
info[custom_score[0]] = avg_custom_scores[index]
if self.scores_train:
for index, (name, func) in enumerate(self.scores_train):
info[name] = train_outputs[index + 1]
if self.scores_valid:
for index, (name, func) in enumerate(self.scores_valid):
info[name] = valid_outputs[index + 2]
self.train_history_.append(info)
try:
for func in on_epoch_finished:
func(self, self.train_history_)
except StopIteration:
break
for func in on_training_finished:
func(self, self.train_history_)
@staticmethod
def apply_batch_func(func, Xb, yb=None):
if isinstance(Xb, dict):
kwargs = dict(Xb)
if yb is not None:
kwargs['y'] = yb
return func(**kwargs)
else:
return func(Xb) if yb is None else func(Xb, yb)
def predict_proba(self, X):
probas = []
for Xb, yb in self.batch_iterator_test(X):
probas.append(self.apply_batch_func(self.predict_iter_, Xb))
output = tuple(np.vstack(o) for o in zip(*probas))
return output if len(output) > 1 else output[0]
def predict(self, X):
if self.regression:
return self.predict_proba(X)
else:
y_pred = np.argmax(self.predict_proba(X), axis=1)
if self.use_label_encoder:
y_pred = self.enc_.inverse_transform(y_pred)
return y_pred
def get_output(self, layer, X):
if isinstance(layer, basestring):
layer = self.layers_[layer]
fn_cache = getattr(self, '_get_output_fn_cache', None)
if fn_cache is None:
fn_cache = {}
self._get_output_fn_cache = fn_cache
if layer not in fn_cache:
xs = self.layers_[0].input_var.type()
get_activity = theano.function([xs], get_output(layer, xs))
fn_cache[layer] = get_activity
else:
get_activity = fn_cache[layer]
outputs = []
for Xb, yb in self.batch_iterator_test(X):
outputs.append(get_activity(Xb))
return np.vstack(outputs)
def score(self, X, y):
score = r2_score if self.regression else accuracy_score
return float(score(self.predict(X), y))
def get_all_layers(self):
return self.layers_.values()
def get_all_params(self, **kwargs):
layers = self.get_all_layers()
params = sum([l.get_params(**kwargs) for l in layers], [])
return unique(params)
def get_all_params_values(self):
return_value = OrderedDict()
for name, layer in self.layers_.items():
return_value[name] = [p.get_value() for p in layer.get_params()]
return return_value
def load_params_from(self, source):
self.initialize()
if isinstance(source, basestring):
with open(source, 'rb') as f:
source = pickle.load(f)
if isinstance(source, NeuralNet):
source = source.get_all_params_values()
success = "Loaded parameters to layer '{}' (shape {})."
failure = ("Could not load parameters to layer '{}' because "
"shapes did not match: {} vs {}.")
for key, values in source.items():
layer = self.layers_.get(key)
if layer is not None:
for p1, p2v in zip(layer.get_params(), values):
shape1 = p1.get_value().shape
shape2 = p2v.shape
shape1s = 'x'.join(map(str, shape1))
shape2s = 'x'.join(map(str, shape2))
if shape1 == shape2:
p1.set_value(p2v)
if self.verbose:
print(success.format(
key, shape1s, shape2s))
else:
if self.verbose:
print(failure.format(
key, shape1s, shape2s))
def save_params_to(self, fname):
params = self.get_all_params_values()
with open(fname, 'wb') as f:
pickle.dump(params, f, -1)
def load_weights_from(self, source):
warn("The 'load_weights_from' method will be removed in nolearn 0.6. "
"Please use 'load_params_from' instead.")
if isinstance(source, list):
raise ValueError(
"Loading weights from a list of parameter values is no "
"longer supported. Please send me something like the "
"return value of 'net.get_all_params_values()' instead.")
return self.load_params_from(source)
def save_weights_to(self, fname):
warn("The 'save_weights_to' method will be removed in nolearn 0.6. "
"Please use 'save_params_to' instead.")
return self.save_params_to(fname)
def __setstate__(self, state): # BBB for pickles that don't have the graph
self.__dict__.update(state)
self.initialize()
def get_params(self, deep=True):
params = super(NeuralNet, self).get_params(deep=deep)
# Incidentally, Lasagne layers have a 'get_params' too, which
# for sklearn's 'clone' means it would treat it in a special
# way when cloning. Wrapping the list of layers in a custom
# list type does the trick here, but of course it's crazy:
params['layers'] = _list(params['layers'])
return _dict(params)
def _get_param_names(self):
# This allows us to have **kwargs in __init__ (woot!):
param_names = super(NeuralNet, self)._get_param_names()
return param_names + self._kwarg_keys
|
ericsuh/dirichlet
|
dirichlet/dirichlet.py
|
pdf
|
python
|
def pdf(alphas):
'''Returns a Dirichlet PDF function'''
alphap = alphas - 1
c = np.exp(gammaln(alphas.sum()) - gammaln(alphas).sum())
def dirichlet(xs):
'''N x K array'''
return c * (xs**alphap).prod(axis=1)
return dirichlet
|
Returns a Dirichlet PDF function
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/dirichlet.py#L95-L102
| null |
# Copyright (C) 2012 Eric J. Suh
#
# This file is subject to the terms and conditions defined in file
# 'LICENSE.txt', which is part of this source code package.
'''Dirichlet.py
Maximum likelihood estimation and likelihood ratio tests of Dirichlet
distribution models of data.
Most of this package is a port of Thomas P. Minka's wonderful Fastfit MATLAB
code. Much thanks to him for that and his clear paper "Estimating a Dirichlet
distribution". See the following URL for more information:
http://research.microsoft.com/en-us/um/people/minka/'''
import sys
import scipy as sp
import scipy.stats as stats
from scipy.special import (psi, polygamma, gammaln)
from numpy import (array, asanyarray, ones, arange, log, diag, vstack, exp,
asarray, ndarray, zeros, isscalar)
from numpy.linalg import norm
import numpy as np
from . import simplex
try:
# python 2
MAXINT = sys.maxint
except AttributeError:
# python 3
MAXINT = sys.maxsize
try:
# python 2
xrange
except NameError:
# python 3
xrange = range
__all__ = [
'pdf',
'test',
'mle',
'meanprecision',
'loglikelihood',
]
euler = -1*psi(1) # Euler-Mascheroni constant
def test(D1, D2, method='meanprecision', maxiter=None):
'''Test for statistical difference between observed proportions.
Parameters
----------
D1 : array
D2 : array
Both ``D1`` and ``D2`` must have the same number of columns, which are
the different levels or categorical possibilities. Each row of the
matrices must add up to 1.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
D : float
Test statistic, which is ``-2 * log`` of likelihood ratios.
p : float
p-value of test.
a0 : array
a1 : array
a2 : array
MLE parameters for the Dirichlet distributions fit to
``D1`` and ``D2`` together, ``D1``, and ``D2``, respectively.'''
N1, K1 = D1.shape
N2, K2 = D2.shape
if K1 != K2:
raise Exception("D1 and D2 must have the same number of columns")
D0 = vstack((D1, D2))
a0 = mle(D0, method=method, maxiter=maxiter)
a1 = mle(D1, method=method, maxiter=maxiter)
a2 = mle(D2, method=method, maxiter=maxiter)
D = 2 * (loglikelihood(D1, a1) + loglikelihood(D2, a2)
- loglikelihood(D0, a0))
return (D, stats.chi2.sf(D, K1), a0, a1, a2)
def meanprecision(a):
'''Mean and precision of Dirichlet distribution.
Parameters
----------
a : array
Parameters of Dirichlet distribution.
Returns
-------
mean : array
Numbers [0,1] of the means of the Dirichlet distribution.
precision : float
Precision or concentration parameter of the Dirichlet distribution.'''
s = a.sum()
m = a / s
return (m,s)
def loglikelihood(D, a):
'''Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).
Parameters
----------
D : 2D array
where ``N`` is the number of observations, ``K`` is the number of
parameters for the Dirichlet distribution.
a : array
Parameters for the Dirichlet distribution.
Returns
-------
logl : float
The log likelihood of the Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
return N*(gammaln(a.sum()) - gammaln(a).sum() + ((a - 1)*logp).sum())
def mle(D, tol=1e-7, method='meanprecision', maxiter=None):
'''Iteratively computes maximum likelihood Dirichlet distribution
for an observed data set, i.e. a for which log p(D|a) is maximum.
Parameters
----------
D : 2D array
``N x K`` array of numbers from [0,1] where ``N`` is the number of
observations, ``K`` is the number of parameters for the Dirichlet
distribution.
tol : float
If Euclidean distance between successive parameter arrays is less than
``tol``, calculation is taken to have converged.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
a : array
Maximum likelihood parameters for Dirichlet distribution.'''
if method == 'meanprecision':
return _meanprecision(D, tol=tol, maxiter=maxiter)
else:
return _fixedpoint(D, tol=tol, maxiter=maxiter)
def _fixedpoint(D, tol=1e-7, maxiter=None):
'''Simple fixed point iteration method for MLE of Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _ipsi(psi(a0.sum()) + logp)
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _meanprecision(D, tol=1e-7, maxiter=None):
'''Mean and precision alternating method for MLE of Dirichlet
distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
s0 = a0.sum()
if s0 < 0:
a0 = a0/s0
s0 = 1
elif s0 == 0:
a0 = ones(a.shape) / len(a)
s0 = 1
m0 = a0/s0
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _fit_s(D, a0, logp, tol=tol)
s1 = sum(a1)
a1 = _fit_m(D, a1, logp, tol=tol)
m = a1/s1
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _fit_s(D, a0, logp, tol=1e-7, maxiter=1000):
'''Assuming a fixed mean for Dirichlet distribution, maximize likelihood
for preicision a.k.a. s'''
N, K = D.shape
s1 = a0.sum()
m = a0 / s1
mlogp = (m*logp).sum()
for i in xrange(maxiter):
s0 = s1
g = psi(s1) - (m*psi(s1*m)).sum() + mlogp
h = _trigamma(s1) - ((m**2)*_trigamma(s1*m)).sum()
if g + s1 * h < 0:
s1 = 1/(1/s0 + g/h/(s0**2))
if s1 <= 0:
s1 = s0 * exp(-g/(s0*h + g)) # Newton on log s
if s1 <= 0:
s1 = 1/(1/s0 + g/((s0**2)*h + 2*s0*g)) # Newton on 1/s
if s1 <= 0:
s1 = s0 - g/h # Newton
if s1 <= 0:
raise Exception('Unable to update s from {}'.format(s0))
a = s1 * m
if abs(s1 - s0) < tol:
return a
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s1))
def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):
'''With fixed precision s, maximize mean m'''
N,K = D.shape
s = a0.sum()
for i in xrange(maxiter):
m = a0 / s
a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())
a1 = a1/a1.sum() * s
if norm(a1 - a0) < tol:
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s))
def _piecewise(x, condlist, funclist, *args, **kw):
'''Fixed version of numpy.piecewise for 0-d arrays'''
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
(isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \
(x.ndim > 0 and condlist[0].ndim == 0):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def _init_a(D):
'''Initial guess for Dirichlet alpha parameters given data D'''
E = D.mean(axis=0)
E2 = (D**2).mean(axis=0)
return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E
def _ipsi(y, tol=1.48e-9, maxiter=10):
'''Inverse of psi (digamma) using Newton's method. For the purposes
of Dirichlet MLE, since the parameters a[i] must always
satisfy a > 0, we define ipsi :: R -> (0,inf).'''
y = asanyarray(y, dtype='float')
x0 = _piecewise(y, [y >= -2.22, y < -2.22],
[(lambda x: exp(x) + 0.5), (lambda x: -1/(x+euler))])
for i in xrange(maxiter):
x1 = x0 - (psi(x0) - y)/_trigamma(x0)
if norm(x1 - x0) < tol:
return x1
x0 = x1
raise Exception(
'Unable to converge in {} iterations, value is {}'.format(maxiter, x1))
def _trigamma(x):
return polygamma(1, x)
|
ericsuh/dirichlet
|
dirichlet/dirichlet.py
|
meanprecision
|
python
|
def meanprecision(a):
'''Mean and precision of Dirichlet distribution.
Parameters
----------
a : array
Parameters of Dirichlet distribution.
Returns
-------
mean : array
Numbers [0,1] of the means of the Dirichlet distribution.
precision : float
Precision or concentration parameter of the Dirichlet distribution.'''
s = a.sum()
m = a / s
return (m,s)
|
Mean and precision of Dirichlet distribution.
Parameters
----------
a : array
Parameters of Dirichlet distribution.
Returns
-------
mean : array
Numbers [0,1] of the means of the Dirichlet distribution.
precision : float
Precision or concentration parameter of the Dirichlet distribution.
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/dirichlet.py#L104-L121
| null |
# Copyright (C) 2012 Eric J. Suh
#
# This file is subject to the terms and conditions defined in file
# 'LICENSE.txt', which is part of this source code package.
'''Dirichlet.py
Maximum likelihood estimation and likelihood ratio tests of Dirichlet
distribution models of data.
Most of this package is a port of Thomas P. Minka's wonderful Fastfit MATLAB
code. Much thanks to him for that and his clear paper "Estimating a Dirichlet
distribution". See the following URL for more information:
http://research.microsoft.com/en-us/um/people/minka/'''
import sys
import scipy as sp
import scipy.stats as stats
from scipy.special import (psi, polygamma, gammaln)
from numpy import (array, asanyarray, ones, arange, log, diag, vstack, exp,
asarray, ndarray, zeros, isscalar)
from numpy.linalg import norm
import numpy as np
from . import simplex
try:
# python 2
MAXINT = sys.maxint
except AttributeError:
# python 3
MAXINT = sys.maxsize
try:
# python 2
xrange
except NameError:
# python 3
xrange = range
__all__ = [
'pdf',
'test',
'mle',
'meanprecision',
'loglikelihood',
]
euler = -1*psi(1) # Euler-Mascheroni constant
def test(D1, D2, method='meanprecision', maxiter=None):
'''Test for statistical difference between observed proportions.
Parameters
----------
D1 : array
D2 : array
Both ``D1`` and ``D2`` must have the same number of columns, which are
the different levels or categorical possibilities. Each row of the
matrices must add up to 1.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
D : float
Test statistic, which is ``-2 * log`` of likelihood ratios.
p : float
p-value of test.
a0 : array
a1 : array
a2 : array
MLE parameters for the Dirichlet distributions fit to
``D1`` and ``D2`` together, ``D1``, and ``D2``, respectively.'''
N1, K1 = D1.shape
N2, K2 = D2.shape
if K1 != K2:
raise Exception("D1 and D2 must have the same number of columns")
D0 = vstack((D1, D2))
a0 = mle(D0, method=method, maxiter=maxiter)
a1 = mle(D1, method=method, maxiter=maxiter)
a2 = mle(D2, method=method, maxiter=maxiter)
D = 2 * (loglikelihood(D1, a1) + loglikelihood(D2, a2)
- loglikelihood(D0, a0))
return (D, stats.chi2.sf(D, K1), a0, a1, a2)
def pdf(alphas):
'''Returns a Dirichlet PDF function'''
alphap = alphas - 1
c = np.exp(gammaln(alphas.sum()) - gammaln(alphas).sum())
def dirichlet(xs):
'''N x K array'''
return c * (xs**alphap).prod(axis=1)
return dirichlet
def loglikelihood(D, a):
'''Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).
Parameters
----------
D : 2D array
where ``N`` is the number of observations, ``K`` is the number of
parameters for the Dirichlet distribution.
a : array
Parameters for the Dirichlet distribution.
Returns
-------
logl : float
The log likelihood of the Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
return N*(gammaln(a.sum()) - gammaln(a).sum() + ((a - 1)*logp).sum())
def mle(D, tol=1e-7, method='meanprecision', maxiter=None):
'''Iteratively computes maximum likelihood Dirichlet distribution
for an observed data set, i.e. a for which log p(D|a) is maximum.
Parameters
----------
D : 2D array
``N x K`` array of numbers from [0,1] where ``N`` is the number of
observations, ``K`` is the number of parameters for the Dirichlet
distribution.
tol : float
If Euclidean distance between successive parameter arrays is less than
``tol``, calculation is taken to have converged.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
a : array
Maximum likelihood parameters for Dirichlet distribution.'''
if method == 'meanprecision':
return _meanprecision(D, tol=tol, maxiter=maxiter)
else:
return _fixedpoint(D, tol=tol, maxiter=maxiter)
def _fixedpoint(D, tol=1e-7, maxiter=None):
'''Simple fixed point iteration method for MLE of Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _ipsi(psi(a0.sum()) + logp)
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _meanprecision(D, tol=1e-7, maxiter=None):
'''Mean and precision alternating method for MLE of Dirichlet
distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
s0 = a0.sum()
if s0 < 0:
a0 = a0/s0
s0 = 1
elif s0 == 0:
a0 = ones(a.shape) / len(a)
s0 = 1
m0 = a0/s0
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _fit_s(D, a0, logp, tol=tol)
s1 = sum(a1)
a1 = _fit_m(D, a1, logp, tol=tol)
m = a1/s1
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _fit_s(D, a0, logp, tol=1e-7, maxiter=1000):
'''Assuming a fixed mean for Dirichlet distribution, maximize likelihood
for preicision a.k.a. s'''
N, K = D.shape
s1 = a0.sum()
m = a0 / s1
mlogp = (m*logp).sum()
for i in xrange(maxiter):
s0 = s1
g = psi(s1) - (m*psi(s1*m)).sum() + mlogp
h = _trigamma(s1) - ((m**2)*_trigamma(s1*m)).sum()
if g + s1 * h < 0:
s1 = 1/(1/s0 + g/h/(s0**2))
if s1 <= 0:
s1 = s0 * exp(-g/(s0*h + g)) # Newton on log s
if s1 <= 0:
s1 = 1/(1/s0 + g/((s0**2)*h + 2*s0*g)) # Newton on 1/s
if s1 <= 0:
s1 = s0 - g/h # Newton
if s1 <= 0:
raise Exception('Unable to update s from {}'.format(s0))
a = s1 * m
if abs(s1 - s0) < tol:
return a
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s1))
def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):
'''With fixed precision s, maximize mean m'''
N,K = D.shape
s = a0.sum()
for i in xrange(maxiter):
m = a0 / s
a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())
a1 = a1/a1.sum() * s
if norm(a1 - a0) < tol:
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s))
def _piecewise(x, condlist, funclist, *args, **kw):
'''Fixed version of numpy.piecewise for 0-d arrays'''
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
(isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \
(x.ndim > 0 and condlist[0].ndim == 0):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def _init_a(D):
'''Initial guess for Dirichlet alpha parameters given data D'''
E = D.mean(axis=0)
E2 = (D**2).mean(axis=0)
return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E
def _ipsi(y, tol=1.48e-9, maxiter=10):
'''Inverse of psi (digamma) using Newton's method. For the purposes
of Dirichlet MLE, since the parameters a[i] must always
satisfy a > 0, we define ipsi :: R -> (0,inf).'''
y = asanyarray(y, dtype='float')
x0 = _piecewise(y, [y >= -2.22, y < -2.22],
[(lambda x: exp(x) + 0.5), (lambda x: -1/(x+euler))])
for i in xrange(maxiter):
x1 = x0 - (psi(x0) - y)/_trigamma(x0)
if norm(x1 - x0) < tol:
return x1
x0 = x1
raise Exception(
'Unable to converge in {} iterations, value is {}'.format(maxiter, x1))
def _trigamma(x):
return polygamma(1, x)
|
ericsuh/dirichlet
|
dirichlet/dirichlet.py
|
loglikelihood
|
python
|
def loglikelihood(D, a):
'''Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).
Parameters
----------
D : 2D array
where ``N`` is the number of observations, ``K`` is the number of
parameters for the Dirichlet distribution.
a : array
Parameters for the Dirichlet distribution.
Returns
-------
logl : float
The log likelihood of the Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
return N*(gammaln(a.sum()) - gammaln(a).sum() + ((a - 1)*logp).sum())
|
Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).
Parameters
----------
D : 2D array
where ``N`` is the number of observations, ``K`` is the number of
parameters for the Dirichlet distribution.
a : array
Parameters for the Dirichlet distribution.
Returns
-------
logl : float
The log likelihood of the Dirichlet distribution
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/dirichlet.py#L123-L140
| null |
# Copyright (C) 2012 Eric J. Suh
#
# This file is subject to the terms and conditions defined in file
# 'LICENSE.txt', which is part of this source code package.
'''Dirichlet.py
Maximum likelihood estimation and likelihood ratio tests of Dirichlet
distribution models of data.
Most of this package is a port of Thomas P. Minka's wonderful Fastfit MATLAB
code. Much thanks to him for that and his clear paper "Estimating a Dirichlet
distribution". See the following URL for more information:
http://research.microsoft.com/en-us/um/people/minka/'''
import sys
import scipy as sp
import scipy.stats as stats
from scipy.special import (psi, polygamma, gammaln)
from numpy import (array, asanyarray, ones, arange, log, diag, vstack, exp,
asarray, ndarray, zeros, isscalar)
from numpy.linalg import norm
import numpy as np
from . import simplex
try:
# python 2
MAXINT = sys.maxint
except AttributeError:
# python 3
MAXINT = sys.maxsize
try:
# python 2
xrange
except NameError:
# python 3
xrange = range
__all__ = [
'pdf',
'test',
'mle',
'meanprecision',
'loglikelihood',
]
euler = -1*psi(1) # Euler-Mascheroni constant
def test(D1, D2, method='meanprecision', maxiter=None):
'''Test for statistical difference between observed proportions.
Parameters
----------
D1 : array
D2 : array
Both ``D1`` and ``D2`` must have the same number of columns, which are
the different levels or categorical possibilities. Each row of the
matrices must add up to 1.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
D : float
Test statistic, which is ``-2 * log`` of likelihood ratios.
p : float
p-value of test.
a0 : array
a1 : array
a2 : array
MLE parameters for the Dirichlet distributions fit to
``D1`` and ``D2`` together, ``D1``, and ``D2``, respectively.'''
N1, K1 = D1.shape
N2, K2 = D2.shape
if K1 != K2:
raise Exception("D1 and D2 must have the same number of columns")
D0 = vstack((D1, D2))
a0 = mle(D0, method=method, maxiter=maxiter)
a1 = mle(D1, method=method, maxiter=maxiter)
a2 = mle(D2, method=method, maxiter=maxiter)
D = 2 * (loglikelihood(D1, a1) + loglikelihood(D2, a2)
- loglikelihood(D0, a0))
return (D, stats.chi2.sf(D, K1), a0, a1, a2)
def pdf(alphas):
'''Returns a Dirichlet PDF function'''
alphap = alphas - 1
c = np.exp(gammaln(alphas.sum()) - gammaln(alphas).sum())
def dirichlet(xs):
'''N x K array'''
return c * (xs**alphap).prod(axis=1)
return dirichlet
def meanprecision(a):
'''Mean and precision of Dirichlet distribution.
Parameters
----------
a : array
Parameters of Dirichlet distribution.
Returns
-------
mean : array
Numbers [0,1] of the means of the Dirichlet distribution.
precision : float
Precision or concentration parameter of the Dirichlet distribution.'''
s = a.sum()
m = a / s
return (m,s)
def mle(D, tol=1e-7, method='meanprecision', maxiter=None):
'''Iteratively computes maximum likelihood Dirichlet distribution
for an observed data set, i.e. a for which log p(D|a) is maximum.
Parameters
----------
D : 2D array
``N x K`` array of numbers from [0,1] where ``N`` is the number of
observations, ``K`` is the number of parameters for the Dirichlet
distribution.
tol : float
If Euclidean distance between successive parameter arrays is less than
``tol``, calculation is taken to have converged.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
a : array
Maximum likelihood parameters for Dirichlet distribution.'''
if method == 'meanprecision':
return _meanprecision(D, tol=tol, maxiter=maxiter)
else:
return _fixedpoint(D, tol=tol, maxiter=maxiter)
def _fixedpoint(D, tol=1e-7, maxiter=None):
'''Simple fixed point iteration method for MLE of Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _ipsi(psi(a0.sum()) + logp)
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _meanprecision(D, tol=1e-7, maxiter=None):
'''Mean and precision alternating method for MLE of Dirichlet
distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
s0 = a0.sum()
if s0 < 0:
a0 = a0/s0
s0 = 1
elif s0 == 0:
a0 = ones(a.shape) / len(a)
s0 = 1
m0 = a0/s0
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _fit_s(D, a0, logp, tol=tol)
s1 = sum(a1)
a1 = _fit_m(D, a1, logp, tol=tol)
m = a1/s1
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _fit_s(D, a0, logp, tol=1e-7, maxiter=1000):
'''Assuming a fixed mean for Dirichlet distribution, maximize likelihood
for preicision a.k.a. s'''
N, K = D.shape
s1 = a0.sum()
m = a0 / s1
mlogp = (m*logp).sum()
for i in xrange(maxiter):
s0 = s1
g = psi(s1) - (m*psi(s1*m)).sum() + mlogp
h = _trigamma(s1) - ((m**2)*_trigamma(s1*m)).sum()
if g + s1 * h < 0:
s1 = 1/(1/s0 + g/h/(s0**2))
if s1 <= 0:
s1 = s0 * exp(-g/(s0*h + g)) # Newton on log s
if s1 <= 0:
s1 = 1/(1/s0 + g/((s0**2)*h + 2*s0*g)) # Newton on 1/s
if s1 <= 0:
s1 = s0 - g/h # Newton
if s1 <= 0:
raise Exception('Unable to update s from {}'.format(s0))
a = s1 * m
if abs(s1 - s0) < tol:
return a
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s1))
def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):
'''With fixed precision s, maximize mean m'''
N,K = D.shape
s = a0.sum()
for i in xrange(maxiter):
m = a0 / s
a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())
a1 = a1/a1.sum() * s
if norm(a1 - a0) < tol:
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s))
def _piecewise(x, condlist, funclist, *args, **kw):
'''Fixed version of numpy.piecewise for 0-d arrays'''
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
(isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \
(x.ndim > 0 and condlist[0].ndim == 0):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def _init_a(D):
'''Initial guess for Dirichlet alpha parameters given data D'''
E = D.mean(axis=0)
E2 = (D**2).mean(axis=0)
return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E
def _ipsi(y, tol=1.48e-9, maxiter=10):
'''Inverse of psi (digamma) using Newton's method. For the purposes
of Dirichlet MLE, since the parameters a[i] must always
satisfy a > 0, we define ipsi :: R -> (0,inf).'''
y = asanyarray(y, dtype='float')
x0 = _piecewise(y, [y >= -2.22, y < -2.22],
[(lambda x: exp(x) + 0.5), (lambda x: -1/(x+euler))])
for i in xrange(maxiter):
x1 = x0 - (psi(x0) - y)/_trigamma(x0)
if norm(x1 - x0) < tol:
return x1
x0 = x1
raise Exception(
'Unable to converge in {} iterations, value is {}'.format(maxiter, x1))
def _trigamma(x):
return polygamma(1, x)
|
ericsuh/dirichlet
|
dirichlet/dirichlet.py
|
mle
|
python
|
def mle(D, tol=1e-7, method='meanprecision', maxiter=None):
'''Iteratively computes maximum likelihood Dirichlet distribution
for an observed data set, i.e. a for which log p(D|a) is maximum.
Parameters
----------
D : 2D array
``N x K`` array of numbers from [0,1] where ``N`` is the number of
observations, ``K`` is the number of parameters for the Dirichlet
distribution.
tol : float
If Euclidean distance between successive parameter arrays is less than
``tol``, calculation is taken to have converged.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
a : array
Maximum likelihood parameters for Dirichlet distribution.'''
if method == 'meanprecision':
return _meanprecision(D, tol=tol, maxiter=maxiter)
else:
return _fixedpoint(D, tol=tol, maxiter=maxiter)
|
Iteratively computes maximum likelihood Dirichlet distribution
for an observed data set, i.e. a for which log p(D|a) is maximum.
Parameters
----------
D : 2D array
``N x K`` array of numbers from [0,1] where ``N`` is the number of
observations, ``K`` is the number of parameters for the Dirichlet
distribution.
tol : float
If Euclidean distance between successive parameter arrays is less than
``tol``, calculation is taken to have converged.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
a : array
Maximum likelihood parameters for Dirichlet distribution.
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/dirichlet.py#L142-L171
|
[
"def _meanprecision(D, tol=1e-7, maxiter=None):\n '''Mean and precision alternating method for MLE of Dirichlet\n distribution'''\n N, K = D.shape\n logp = log(D).mean(axis=0)\n a0 = _init_a(D)\n s0 = a0.sum()\n if s0 < 0:\n a0 = a0/s0\n s0 = 1\n elif s0 == 0:\n a0 = ones(a.shape) / len(a)\n s0 = 1\n m0 = a0/s0\n\n # Start updating\n if maxiter is None:\n maxiter = MAXINT\n for i in xrange(maxiter):\n a1 = _fit_s(D, a0, logp, tol=tol)\n s1 = sum(a1)\n a1 = _fit_m(D, a1, logp, tol=tol)\n m = a1/s1\n # if norm(a1-a0) < tol:\n if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster\n return a1\n a0 = a1\n raise Exception('Failed to converge after {} iterations, values are {}.'\n .format(maxiter, a1))\n",
"def _fixedpoint(D, tol=1e-7, maxiter=None):\n '''Simple fixed point iteration method for MLE of Dirichlet distribution'''\n N, K = D.shape\n logp = log(D).mean(axis=0)\n a0 = _init_a(D)\n\n # Start updating\n if maxiter is None:\n maxiter = MAXINT\n for i in xrange(maxiter):\n a1 = _ipsi(psi(a0.sum()) + logp)\n # if norm(a1-a0) < tol:\n if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster\n return a1\n a0 = a1\n raise Exception('Failed to converge after {} iterations, values are {}.'\n .format(maxiter, a1))\n"
] |
# Copyright (C) 2012 Eric J. Suh
#
# This file is subject to the terms and conditions defined in file
# 'LICENSE.txt', which is part of this source code package.
'''Dirichlet.py
Maximum likelihood estimation and likelihood ratio tests of Dirichlet
distribution models of data.
Most of this package is a port of Thomas P. Minka's wonderful Fastfit MATLAB
code. Much thanks to him for that and his clear paper "Estimating a Dirichlet
distribution". See the following URL for more information:
http://research.microsoft.com/en-us/um/people/minka/'''
import sys
import scipy as sp
import scipy.stats as stats
from scipy.special import (psi, polygamma, gammaln)
from numpy import (array, asanyarray, ones, arange, log, diag, vstack, exp,
asarray, ndarray, zeros, isscalar)
from numpy.linalg import norm
import numpy as np
from . import simplex
try:
# python 2
MAXINT = sys.maxint
except AttributeError:
# python 3
MAXINT = sys.maxsize
try:
# python 2
xrange
except NameError:
# python 3
xrange = range
__all__ = [
'pdf',
'test',
'mle',
'meanprecision',
'loglikelihood',
]
euler = -1*psi(1) # Euler-Mascheroni constant
def test(D1, D2, method='meanprecision', maxiter=None):
'''Test for statistical difference between observed proportions.
Parameters
----------
D1 : array
D2 : array
Both ``D1`` and ``D2`` must have the same number of columns, which are
the different levels or categorical possibilities. Each row of the
matrices must add up to 1.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
D : float
Test statistic, which is ``-2 * log`` of likelihood ratios.
p : float
p-value of test.
a0 : array
a1 : array
a2 : array
MLE parameters for the Dirichlet distributions fit to
``D1`` and ``D2`` together, ``D1``, and ``D2``, respectively.'''
N1, K1 = D1.shape
N2, K2 = D2.shape
if K1 != K2:
raise Exception("D1 and D2 must have the same number of columns")
D0 = vstack((D1, D2))
a0 = mle(D0, method=method, maxiter=maxiter)
a1 = mle(D1, method=method, maxiter=maxiter)
a2 = mle(D2, method=method, maxiter=maxiter)
D = 2 * (loglikelihood(D1, a1) + loglikelihood(D2, a2)
- loglikelihood(D0, a0))
return (D, stats.chi2.sf(D, K1), a0, a1, a2)
def pdf(alphas):
'''Returns a Dirichlet PDF function'''
alphap = alphas - 1
c = np.exp(gammaln(alphas.sum()) - gammaln(alphas).sum())
def dirichlet(xs):
'''N x K array'''
return c * (xs**alphap).prod(axis=1)
return dirichlet
def meanprecision(a):
'''Mean and precision of Dirichlet distribution.
Parameters
----------
a : array
Parameters of Dirichlet distribution.
Returns
-------
mean : array
Numbers [0,1] of the means of the Dirichlet distribution.
precision : float
Precision or concentration parameter of the Dirichlet distribution.'''
s = a.sum()
m = a / s
return (m,s)
def loglikelihood(D, a):
'''Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).
Parameters
----------
D : 2D array
where ``N`` is the number of observations, ``K`` is the number of
parameters for the Dirichlet distribution.
a : array
Parameters for the Dirichlet distribution.
Returns
-------
logl : float
The log likelihood of the Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
return N*(gammaln(a.sum()) - gammaln(a).sum() + ((a - 1)*logp).sum())
def _fixedpoint(D, tol=1e-7, maxiter=None):
'''Simple fixed point iteration method for MLE of Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _ipsi(psi(a0.sum()) + logp)
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _meanprecision(D, tol=1e-7, maxiter=None):
'''Mean and precision alternating method for MLE of Dirichlet
distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
s0 = a0.sum()
if s0 < 0:
a0 = a0/s0
s0 = 1
elif s0 == 0:
a0 = ones(a.shape) / len(a)
s0 = 1
m0 = a0/s0
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _fit_s(D, a0, logp, tol=tol)
s1 = sum(a1)
a1 = _fit_m(D, a1, logp, tol=tol)
m = a1/s1
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _fit_s(D, a0, logp, tol=1e-7, maxiter=1000):
'''Assuming a fixed mean for Dirichlet distribution, maximize likelihood
for preicision a.k.a. s'''
N, K = D.shape
s1 = a0.sum()
m = a0 / s1
mlogp = (m*logp).sum()
for i in xrange(maxiter):
s0 = s1
g = psi(s1) - (m*psi(s1*m)).sum() + mlogp
h = _trigamma(s1) - ((m**2)*_trigamma(s1*m)).sum()
if g + s1 * h < 0:
s1 = 1/(1/s0 + g/h/(s0**2))
if s1 <= 0:
s1 = s0 * exp(-g/(s0*h + g)) # Newton on log s
if s1 <= 0:
s1 = 1/(1/s0 + g/((s0**2)*h + 2*s0*g)) # Newton on 1/s
if s1 <= 0:
s1 = s0 - g/h # Newton
if s1 <= 0:
raise Exception('Unable to update s from {}'.format(s0))
a = s1 * m
if abs(s1 - s0) < tol:
return a
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s1))
def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):
'''With fixed precision s, maximize mean m'''
N,K = D.shape
s = a0.sum()
for i in xrange(maxiter):
m = a0 / s
a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())
a1 = a1/a1.sum() * s
if norm(a1 - a0) < tol:
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s))
def _piecewise(x, condlist, funclist, *args, **kw):
'''Fixed version of numpy.piecewise for 0-d arrays'''
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
(isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \
(x.ndim > 0 and condlist[0].ndim == 0):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def _init_a(D):
'''Initial guess for Dirichlet alpha parameters given data D'''
E = D.mean(axis=0)
E2 = (D**2).mean(axis=0)
return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E
def _ipsi(y, tol=1.48e-9, maxiter=10):
'''Inverse of psi (digamma) using Newton's method. For the purposes
of Dirichlet MLE, since the parameters a[i] must always
satisfy a > 0, we define ipsi :: R -> (0,inf).'''
y = asanyarray(y, dtype='float')
x0 = _piecewise(y, [y >= -2.22, y < -2.22],
[(lambda x: exp(x) + 0.5), (lambda x: -1/(x+euler))])
for i in xrange(maxiter):
x1 = x0 - (psi(x0) - y)/_trigamma(x0)
if norm(x1 - x0) < tol:
return x1
x0 = x1
raise Exception(
'Unable to converge in {} iterations, value is {}'.format(maxiter, x1))
def _trigamma(x):
return polygamma(1, x)
|
ericsuh/dirichlet
|
dirichlet/dirichlet.py
|
_fixedpoint
|
python
|
def _fixedpoint(D, tol=1e-7, maxiter=None):
'''Simple fixed point iteration method for MLE of Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _ipsi(psi(a0.sum()) + logp)
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
|
Simple fixed point iteration method for MLE of Dirichlet distribution
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/dirichlet.py#L173-L189
|
[
"def loglikelihood(D, a):\n '''Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).\n\n Parameters\n ----------\n D : 2D array\n where ``N`` is the number of observations, ``K`` is the number of\n parameters for the Dirichlet distribution.\n a : array\n Parameters for the Dirichlet distribution.\n\n Returns\n -------\n logl : float\n The log likelihood of the Dirichlet distribution'''\n N, K = D.shape\n logp = log(D).mean(axis=0)\n return N*(gammaln(a.sum()) - gammaln(a).sum() + ((a - 1)*logp).sum())\n",
"def _init_a(D):\n '''Initial guess for Dirichlet alpha parameters given data D'''\n E = D.mean(axis=0)\n E2 = (D**2).mean(axis=0)\n return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E\n",
"def _ipsi(y, tol=1.48e-9, maxiter=10):\n '''Inverse of psi (digamma) using Newton's method. For the purposes\n of Dirichlet MLE, since the parameters a[i] must always\n satisfy a > 0, we define ipsi :: R -> (0,inf).'''\n y = asanyarray(y, dtype='float')\n x0 = _piecewise(y, [y >= -2.22, y < -2.22],\n [(lambda x: exp(x) + 0.5), (lambda x: -1/(x+euler))])\n for i in xrange(maxiter):\n x1 = x0 - (psi(x0) - y)/_trigamma(x0)\n if norm(x1 - x0) < tol:\n return x1\n x0 = x1\n raise Exception(\n 'Unable to converge in {} iterations, value is {}'.format(maxiter, x1))\n"
] |
# Copyright (C) 2012 Eric J. Suh
#
# This file is subject to the terms and conditions defined in file
# 'LICENSE.txt', which is part of this source code package.
'''Dirichlet.py
Maximum likelihood estimation and likelihood ratio tests of Dirichlet
distribution models of data.
Most of this package is a port of Thomas P. Minka's wonderful Fastfit MATLAB
code. Much thanks to him for that and his clear paper "Estimating a Dirichlet
distribution". See the following URL for more information:
http://research.microsoft.com/en-us/um/people/minka/'''
import sys
import scipy as sp
import scipy.stats as stats
from scipy.special import (psi, polygamma, gammaln)
from numpy import (array, asanyarray, ones, arange, log, diag, vstack, exp,
asarray, ndarray, zeros, isscalar)
from numpy.linalg import norm
import numpy as np
from . import simplex
try:
# python 2
MAXINT = sys.maxint
except AttributeError:
# python 3
MAXINT = sys.maxsize
try:
# python 2
xrange
except NameError:
# python 3
xrange = range
__all__ = [
'pdf',
'test',
'mle',
'meanprecision',
'loglikelihood',
]
euler = -1*psi(1) # Euler-Mascheroni constant
def test(D1, D2, method='meanprecision', maxiter=None):
'''Test for statistical difference between observed proportions.
Parameters
----------
D1 : array
D2 : array
Both ``D1`` and ``D2`` must have the same number of columns, which are
the different levels or categorical possibilities. Each row of the
matrices must add up to 1.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
D : float
Test statistic, which is ``-2 * log`` of likelihood ratios.
p : float
p-value of test.
a0 : array
a1 : array
a2 : array
MLE parameters for the Dirichlet distributions fit to
``D1`` and ``D2`` together, ``D1``, and ``D2``, respectively.'''
N1, K1 = D1.shape
N2, K2 = D2.shape
if K1 != K2:
raise Exception("D1 and D2 must have the same number of columns")
D0 = vstack((D1, D2))
a0 = mle(D0, method=method, maxiter=maxiter)
a1 = mle(D1, method=method, maxiter=maxiter)
a2 = mle(D2, method=method, maxiter=maxiter)
D = 2 * (loglikelihood(D1, a1) + loglikelihood(D2, a2)
- loglikelihood(D0, a0))
return (D, stats.chi2.sf(D, K1), a0, a1, a2)
def pdf(alphas):
'''Returns a Dirichlet PDF function'''
alphap = alphas - 1
c = np.exp(gammaln(alphas.sum()) - gammaln(alphas).sum())
def dirichlet(xs):
'''N x K array'''
return c * (xs**alphap).prod(axis=1)
return dirichlet
def meanprecision(a):
'''Mean and precision of Dirichlet distribution.
Parameters
----------
a : array
Parameters of Dirichlet distribution.
Returns
-------
mean : array
Numbers [0,1] of the means of the Dirichlet distribution.
precision : float
Precision or concentration parameter of the Dirichlet distribution.'''
s = a.sum()
m = a / s
return (m,s)
def loglikelihood(D, a):
'''Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).
Parameters
----------
D : 2D array
where ``N`` is the number of observations, ``K`` is the number of
parameters for the Dirichlet distribution.
a : array
Parameters for the Dirichlet distribution.
Returns
-------
logl : float
The log likelihood of the Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
return N*(gammaln(a.sum()) - gammaln(a).sum() + ((a - 1)*logp).sum())
def mle(D, tol=1e-7, method='meanprecision', maxiter=None):
'''Iteratively computes maximum likelihood Dirichlet distribution
for an observed data set, i.e. a for which log p(D|a) is maximum.
Parameters
----------
D : 2D array
``N x K`` array of numbers from [0,1] where ``N`` is the number of
observations, ``K`` is the number of parameters for the Dirichlet
distribution.
tol : float
If Euclidean distance between successive parameter arrays is less than
``tol``, calculation is taken to have converged.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
a : array
Maximum likelihood parameters for Dirichlet distribution.'''
if method == 'meanprecision':
return _meanprecision(D, tol=tol, maxiter=maxiter)
else:
return _fixedpoint(D, tol=tol, maxiter=maxiter)
def _meanprecision(D, tol=1e-7, maxiter=None):
'''Mean and precision alternating method for MLE of Dirichlet
distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
s0 = a0.sum()
if s0 < 0:
a0 = a0/s0
s0 = 1
elif s0 == 0:
a0 = ones(a.shape) / len(a)
s0 = 1
m0 = a0/s0
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _fit_s(D, a0, logp, tol=tol)
s1 = sum(a1)
a1 = _fit_m(D, a1, logp, tol=tol)
m = a1/s1
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _fit_s(D, a0, logp, tol=1e-7, maxiter=1000):
'''Assuming a fixed mean for Dirichlet distribution, maximize likelihood
for preicision a.k.a. s'''
N, K = D.shape
s1 = a0.sum()
m = a0 / s1
mlogp = (m*logp).sum()
for i in xrange(maxiter):
s0 = s1
g = psi(s1) - (m*psi(s1*m)).sum() + mlogp
h = _trigamma(s1) - ((m**2)*_trigamma(s1*m)).sum()
if g + s1 * h < 0:
s1 = 1/(1/s0 + g/h/(s0**2))
if s1 <= 0:
s1 = s0 * exp(-g/(s0*h + g)) # Newton on log s
if s1 <= 0:
s1 = 1/(1/s0 + g/((s0**2)*h + 2*s0*g)) # Newton on 1/s
if s1 <= 0:
s1 = s0 - g/h # Newton
if s1 <= 0:
raise Exception('Unable to update s from {}'.format(s0))
a = s1 * m
if abs(s1 - s0) < tol:
return a
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s1))
def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):
'''With fixed precision s, maximize mean m'''
N,K = D.shape
s = a0.sum()
for i in xrange(maxiter):
m = a0 / s
a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())
a1 = a1/a1.sum() * s
if norm(a1 - a0) < tol:
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s))
def _piecewise(x, condlist, funclist, *args, **kw):
'''Fixed version of numpy.piecewise for 0-d arrays'''
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
(isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \
(x.ndim > 0 and condlist[0].ndim == 0):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def _init_a(D):
'''Initial guess for Dirichlet alpha parameters given data D'''
E = D.mean(axis=0)
E2 = (D**2).mean(axis=0)
return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E
def _ipsi(y, tol=1.48e-9, maxiter=10):
'''Inverse of psi (digamma) using Newton's method. For the purposes
of Dirichlet MLE, since the parameters a[i] must always
satisfy a > 0, we define ipsi :: R -> (0,inf).'''
y = asanyarray(y, dtype='float')
x0 = _piecewise(y, [y >= -2.22, y < -2.22],
[(lambda x: exp(x) + 0.5), (lambda x: -1/(x+euler))])
for i in xrange(maxiter):
x1 = x0 - (psi(x0) - y)/_trigamma(x0)
if norm(x1 - x0) < tol:
return x1
x0 = x1
raise Exception(
'Unable to converge in {} iterations, value is {}'.format(maxiter, x1))
def _trigamma(x):
return polygamma(1, x)
|
ericsuh/dirichlet
|
dirichlet/dirichlet.py
|
_meanprecision
|
python
|
def _meanprecision(D, tol=1e-7, maxiter=None):
'''Mean and precision alternating method for MLE of Dirichlet
distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
s0 = a0.sum()
if s0 < 0:
a0 = a0/s0
s0 = 1
elif s0 == 0:
a0 = ones(a.shape) / len(a)
s0 = 1
m0 = a0/s0
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _fit_s(D, a0, logp, tol=tol)
s1 = sum(a1)
a1 = _fit_m(D, a1, logp, tol=tol)
m = a1/s1
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
|
Mean and precision alternating method for MLE of Dirichlet
distribution
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/dirichlet.py#L191-L219
|
[
"def loglikelihood(D, a):\n '''Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).\n\n Parameters\n ----------\n D : 2D array\n where ``N`` is the number of observations, ``K`` is the number of\n parameters for the Dirichlet distribution.\n a : array\n Parameters for the Dirichlet distribution.\n\n Returns\n -------\n logl : float\n The log likelihood of the Dirichlet distribution'''\n N, K = D.shape\n logp = log(D).mean(axis=0)\n return N*(gammaln(a.sum()) - gammaln(a).sum() + ((a - 1)*logp).sum())\n",
"def _init_a(D):\n '''Initial guess for Dirichlet alpha parameters given data D'''\n E = D.mean(axis=0)\n E2 = (D**2).mean(axis=0)\n return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E\n",
"def _fit_s(D, a0, logp, tol=1e-7, maxiter=1000):\n '''Assuming a fixed mean for Dirichlet distribution, maximize likelihood\n for preicision a.k.a. s'''\n N, K = D.shape\n s1 = a0.sum()\n m = a0 / s1\n mlogp = (m*logp).sum()\n for i in xrange(maxiter):\n s0 = s1\n g = psi(s1) - (m*psi(s1*m)).sum() + mlogp\n h = _trigamma(s1) - ((m**2)*_trigamma(s1*m)).sum()\n\n if g + s1 * h < 0:\n s1 = 1/(1/s0 + g/h/(s0**2))\n if s1 <= 0:\n s1 = s0 * exp(-g/(s0*h + g)) # Newton on log s\n if s1 <= 0:\n s1 = 1/(1/s0 + g/((s0**2)*h + 2*s0*g)) # Newton on 1/s\n if s1 <= 0:\n s1 = s0 - g/h # Newton\n if s1 <= 0:\n raise Exception('Unable to update s from {}'.format(s0))\n\n a = s1 * m\n if abs(s1 - s0) < tol:\n return a\n\n raise Exception('Failed to converge after {} iterations, s is {}'\n .format(maxiter, s1))\n",
"def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):\n '''With fixed precision s, maximize mean m'''\n N,K = D.shape\n s = a0.sum()\n\n for i in xrange(maxiter):\n m = a0 / s\n a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())\n a1 = a1/a1.sum() * s\n\n if norm(a1 - a0) < tol:\n return a1\n a0 = a1\n\n raise Exception('Failed to converge after {} iterations, s is {}'\n .format(maxiter, s))\n"
] |
# Copyright (C) 2012 Eric J. Suh
#
# This file is subject to the terms and conditions defined in file
# 'LICENSE.txt', which is part of this source code package.
'''Dirichlet.py
Maximum likelihood estimation and likelihood ratio tests of Dirichlet
distribution models of data.
Most of this package is a port of Thomas P. Minka's wonderful Fastfit MATLAB
code. Much thanks to him for that and his clear paper "Estimating a Dirichlet
distribution". See the following URL for more information:
http://research.microsoft.com/en-us/um/people/minka/'''
import sys
import scipy as sp
import scipy.stats as stats
from scipy.special import (psi, polygamma, gammaln)
from numpy import (array, asanyarray, ones, arange, log, diag, vstack, exp,
asarray, ndarray, zeros, isscalar)
from numpy.linalg import norm
import numpy as np
from . import simplex
try:
# python 2
MAXINT = sys.maxint
except AttributeError:
# python 3
MAXINT = sys.maxsize
try:
# python 2
xrange
except NameError:
# python 3
xrange = range
__all__ = [
'pdf',
'test',
'mle',
'meanprecision',
'loglikelihood',
]
euler = -1*psi(1) # Euler-Mascheroni constant
def test(D1, D2, method='meanprecision', maxiter=None):
'''Test for statistical difference between observed proportions.
Parameters
----------
D1 : array
D2 : array
Both ``D1`` and ``D2`` must have the same number of columns, which are
the different levels or categorical possibilities. Each row of the
matrices must add up to 1.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
D : float
Test statistic, which is ``-2 * log`` of likelihood ratios.
p : float
p-value of test.
a0 : array
a1 : array
a2 : array
MLE parameters for the Dirichlet distributions fit to
``D1`` and ``D2`` together, ``D1``, and ``D2``, respectively.'''
N1, K1 = D1.shape
N2, K2 = D2.shape
if K1 != K2:
raise Exception("D1 and D2 must have the same number of columns")
D0 = vstack((D1, D2))
a0 = mle(D0, method=method, maxiter=maxiter)
a1 = mle(D1, method=method, maxiter=maxiter)
a2 = mle(D2, method=method, maxiter=maxiter)
D = 2 * (loglikelihood(D1, a1) + loglikelihood(D2, a2)
- loglikelihood(D0, a0))
return (D, stats.chi2.sf(D, K1), a0, a1, a2)
def pdf(alphas):
'''Returns a Dirichlet PDF function'''
alphap = alphas - 1
c = np.exp(gammaln(alphas.sum()) - gammaln(alphas).sum())
def dirichlet(xs):
'''N x K array'''
return c * (xs**alphap).prod(axis=1)
return dirichlet
def meanprecision(a):
'''Mean and precision of Dirichlet distribution.
Parameters
----------
a : array
Parameters of Dirichlet distribution.
Returns
-------
mean : array
Numbers [0,1] of the means of the Dirichlet distribution.
precision : float
Precision or concentration parameter of the Dirichlet distribution.'''
s = a.sum()
m = a / s
return (m,s)
def loglikelihood(D, a):
'''Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).
Parameters
----------
D : 2D array
where ``N`` is the number of observations, ``K`` is the number of
parameters for the Dirichlet distribution.
a : array
Parameters for the Dirichlet distribution.
Returns
-------
logl : float
The log likelihood of the Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
return N*(gammaln(a.sum()) - gammaln(a).sum() + ((a - 1)*logp).sum())
def mle(D, tol=1e-7, method='meanprecision', maxiter=None):
'''Iteratively computes maximum likelihood Dirichlet distribution
for an observed data set, i.e. a for which log p(D|a) is maximum.
Parameters
----------
D : 2D array
``N x K`` array of numbers from [0,1] where ``N`` is the number of
observations, ``K`` is the number of parameters for the Dirichlet
distribution.
tol : float
If Euclidean distance between successive parameter arrays is less than
``tol``, calculation is taken to have converged.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
a : array
Maximum likelihood parameters for Dirichlet distribution.'''
if method == 'meanprecision':
return _meanprecision(D, tol=tol, maxiter=maxiter)
else:
return _fixedpoint(D, tol=tol, maxiter=maxiter)
def _fixedpoint(D, tol=1e-7, maxiter=None):
'''Simple fixed point iteration method for MLE of Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _ipsi(psi(a0.sum()) + logp)
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _fit_s(D, a0, logp, tol=1e-7, maxiter=1000):
'''Assuming a fixed mean for Dirichlet distribution, maximize likelihood
for preicision a.k.a. s'''
N, K = D.shape
s1 = a0.sum()
m = a0 / s1
mlogp = (m*logp).sum()
for i in xrange(maxiter):
s0 = s1
g = psi(s1) - (m*psi(s1*m)).sum() + mlogp
h = _trigamma(s1) - ((m**2)*_trigamma(s1*m)).sum()
if g + s1 * h < 0:
s1 = 1/(1/s0 + g/h/(s0**2))
if s1 <= 0:
s1 = s0 * exp(-g/(s0*h + g)) # Newton on log s
if s1 <= 0:
s1 = 1/(1/s0 + g/((s0**2)*h + 2*s0*g)) # Newton on 1/s
if s1 <= 0:
s1 = s0 - g/h # Newton
if s1 <= 0:
raise Exception('Unable to update s from {}'.format(s0))
a = s1 * m
if abs(s1 - s0) < tol:
return a
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s1))
def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):
'''With fixed precision s, maximize mean m'''
N,K = D.shape
s = a0.sum()
for i in xrange(maxiter):
m = a0 / s
a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())
a1 = a1/a1.sum() * s
if norm(a1 - a0) < tol:
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s))
def _piecewise(x, condlist, funclist, *args, **kw):
'''Fixed version of numpy.piecewise for 0-d arrays'''
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
(isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \
(x.ndim > 0 and condlist[0].ndim == 0):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def _init_a(D):
'''Initial guess for Dirichlet alpha parameters given data D'''
E = D.mean(axis=0)
E2 = (D**2).mean(axis=0)
return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E
def _ipsi(y, tol=1.48e-9, maxiter=10):
'''Inverse of psi (digamma) using Newton's method. For the purposes
of Dirichlet MLE, since the parameters a[i] must always
satisfy a > 0, we define ipsi :: R -> (0,inf).'''
y = asanyarray(y, dtype='float')
x0 = _piecewise(y, [y >= -2.22, y < -2.22],
[(lambda x: exp(x) + 0.5), (lambda x: -1/(x+euler))])
for i in xrange(maxiter):
x1 = x0 - (psi(x0) - y)/_trigamma(x0)
if norm(x1 - x0) < tol:
return x1
x0 = x1
raise Exception(
'Unable to converge in {} iterations, value is {}'.format(maxiter, x1))
def _trigamma(x):
return polygamma(1, x)
|
ericsuh/dirichlet
|
dirichlet/dirichlet.py
|
_fit_s
|
python
|
def _fit_s(D, a0, logp, tol=1e-7, maxiter=1000):
'''Assuming a fixed mean for Dirichlet distribution, maximize likelihood
for preicision a.k.a. s'''
N, K = D.shape
s1 = a0.sum()
m = a0 / s1
mlogp = (m*logp).sum()
for i in xrange(maxiter):
s0 = s1
g = psi(s1) - (m*psi(s1*m)).sum() + mlogp
h = _trigamma(s1) - ((m**2)*_trigamma(s1*m)).sum()
if g + s1 * h < 0:
s1 = 1/(1/s0 + g/h/(s0**2))
if s1 <= 0:
s1 = s0 * exp(-g/(s0*h + g)) # Newton on log s
if s1 <= 0:
s1 = 1/(1/s0 + g/((s0**2)*h + 2*s0*g)) # Newton on 1/s
if s1 <= 0:
s1 = s0 - g/h # Newton
if s1 <= 0:
raise Exception('Unable to update s from {}'.format(s0))
a = s1 * m
if abs(s1 - s0) < tol:
return a
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s1))
|
Assuming a fixed mean for Dirichlet distribution, maximize likelihood
for preicision a.k.a. s
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/dirichlet.py#L221-L249
| null |
# Copyright (C) 2012 Eric J. Suh
#
# This file is subject to the terms and conditions defined in file
# 'LICENSE.txt', which is part of this source code package.
'''Dirichlet.py
Maximum likelihood estimation and likelihood ratio tests of Dirichlet
distribution models of data.
Most of this package is a port of Thomas P. Minka's wonderful Fastfit MATLAB
code. Much thanks to him for that and his clear paper "Estimating a Dirichlet
distribution". See the following URL for more information:
http://research.microsoft.com/en-us/um/people/minka/'''
import sys
import scipy as sp
import scipy.stats as stats
from scipy.special import (psi, polygamma, gammaln)
from numpy import (array, asanyarray, ones, arange, log, diag, vstack, exp,
asarray, ndarray, zeros, isscalar)
from numpy.linalg import norm
import numpy as np
from . import simplex
try:
# python 2
MAXINT = sys.maxint
except AttributeError:
# python 3
MAXINT = sys.maxsize
try:
# python 2
xrange
except NameError:
# python 3
xrange = range
__all__ = [
'pdf',
'test',
'mle',
'meanprecision',
'loglikelihood',
]
euler = -1*psi(1) # Euler-Mascheroni constant
def test(D1, D2, method='meanprecision', maxiter=None):
'''Test for statistical difference between observed proportions.
Parameters
----------
D1 : array
D2 : array
Both ``D1`` and ``D2`` must have the same number of columns, which are
the different levels or categorical possibilities. Each row of the
matrices must add up to 1.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
D : float
Test statistic, which is ``-2 * log`` of likelihood ratios.
p : float
p-value of test.
a0 : array
a1 : array
a2 : array
MLE parameters for the Dirichlet distributions fit to
``D1`` and ``D2`` together, ``D1``, and ``D2``, respectively.'''
N1, K1 = D1.shape
N2, K2 = D2.shape
if K1 != K2:
raise Exception("D1 and D2 must have the same number of columns")
D0 = vstack((D1, D2))
a0 = mle(D0, method=method, maxiter=maxiter)
a1 = mle(D1, method=method, maxiter=maxiter)
a2 = mle(D2, method=method, maxiter=maxiter)
D = 2 * (loglikelihood(D1, a1) + loglikelihood(D2, a2)
- loglikelihood(D0, a0))
return (D, stats.chi2.sf(D, K1), a0, a1, a2)
def pdf(alphas):
'''Returns a Dirichlet PDF function'''
alphap = alphas - 1
c = np.exp(gammaln(alphas.sum()) - gammaln(alphas).sum())
def dirichlet(xs):
'''N x K array'''
return c * (xs**alphap).prod(axis=1)
return dirichlet
def meanprecision(a):
'''Mean and precision of Dirichlet distribution.
Parameters
----------
a : array
Parameters of Dirichlet distribution.
Returns
-------
mean : array
Numbers [0,1] of the means of the Dirichlet distribution.
precision : float
Precision or concentration parameter of the Dirichlet distribution.'''
s = a.sum()
m = a / s
return (m,s)
def loglikelihood(D, a):
'''Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).
Parameters
----------
D : 2D array
where ``N`` is the number of observations, ``K`` is the number of
parameters for the Dirichlet distribution.
a : array
Parameters for the Dirichlet distribution.
Returns
-------
logl : float
The log likelihood of the Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
return N*(gammaln(a.sum()) - gammaln(a).sum() + ((a - 1)*logp).sum())
def mle(D, tol=1e-7, method='meanprecision', maxiter=None):
'''Iteratively computes maximum likelihood Dirichlet distribution
for an observed data set, i.e. a for which log p(D|a) is maximum.
Parameters
----------
D : 2D array
``N x K`` array of numbers from [0,1] where ``N`` is the number of
observations, ``K`` is the number of parameters for the Dirichlet
distribution.
tol : float
If Euclidean distance between successive parameter arrays is less than
``tol``, calculation is taken to have converged.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
a : array
Maximum likelihood parameters for Dirichlet distribution.'''
if method == 'meanprecision':
return _meanprecision(D, tol=tol, maxiter=maxiter)
else:
return _fixedpoint(D, tol=tol, maxiter=maxiter)
def _fixedpoint(D, tol=1e-7, maxiter=None):
'''Simple fixed point iteration method for MLE of Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _ipsi(psi(a0.sum()) + logp)
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _meanprecision(D, tol=1e-7, maxiter=None):
'''Mean and precision alternating method for MLE of Dirichlet
distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
s0 = a0.sum()
if s0 < 0:
a0 = a0/s0
s0 = 1
elif s0 == 0:
a0 = ones(a.shape) / len(a)
s0 = 1
m0 = a0/s0
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _fit_s(D, a0, logp, tol=tol)
s1 = sum(a1)
a1 = _fit_m(D, a1, logp, tol=tol)
m = a1/s1
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):
'''With fixed precision s, maximize mean m'''
N,K = D.shape
s = a0.sum()
for i in xrange(maxiter):
m = a0 / s
a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())
a1 = a1/a1.sum() * s
if norm(a1 - a0) < tol:
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s))
def _piecewise(x, condlist, funclist, *args, **kw):
'''Fixed version of numpy.piecewise for 0-d arrays'''
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
(isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \
(x.ndim > 0 and condlist[0].ndim == 0):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def _init_a(D):
'''Initial guess for Dirichlet alpha parameters given data D'''
E = D.mean(axis=0)
E2 = (D**2).mean(axis=0)
return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E
def _ipsi(y, tol=1.48e-9, maxiter=10):
'''Inverse of psi (digamma) using Newton's method. For the purposes
of Dirichlet MLE, since the parameters a[i] must always
satisfy a > 0, we define ipsi :: R -> (0,inf).'''
y = asanyarray(y, dtype='float')
x0 = _piecewise(y, [y >= -2.22, y < -2.22],
[(lambda x: exp(x) + 0.5), (lambda x: -1/(x+euler))])
for i in xrange(maxiter):
x1 = x0 - (psi(x0) - y)/_trigamma(x0)
if norm(x1 - x0) < tol:
return x1
x0 = x1
raise Exception(
'Unable to converge in {} iterations, value is {}'.format(maxiter, x1))
def _trigamma(x):
return polygamma(1, x)
|
ericsuh/dirichlet
|
dirichlet/dirichlet.py
|
_fit_m
|
python
|
def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):
'''With fixed precision s, maximize mean m'''
N,K = D.shape
s = a0.sum()
for i in xrange(maxiter):
m = a0 / s
a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())
a1 = a1/a1.sum() * s
if norm(a1 - a0) < tol:
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s))
|
With fixed precision s, maximize mean m
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/dirichlet.py#L251-L266
| null |
# Copyright (C) 2012 Eric J. Suh
#
# This file is subject to the terms and conditions defined in file
# 'LICENSE.txt', which is part of this source code package.
'''Dirichlet.py
Maximum likelihood estimation and likelihood ratio tests of Dirichlet
distribution models of data.
Most of this package is a port of Thomas P. Minka's wonderful Fastfit MATLAB
code. Much thanks to him for that and his clear paper "Estimating a Dirichlet
distribution". See the following URL for more information:
http://research.microsoft.com/en-us/um/people/minka/'''
import sys
import scipy as sp
import scipy.stats as stats
from scipy.special import (psi, polygamma, gammaln)
from numpy import (array, asanyarray, ones, arange, log, diag, vstack, exp,
asarray, ndarray, zeros, isscalar)
from numpy.linalg import norm
import numpy as np
from . import simplex
try:
# python 2
MAXINT = sys.maxint
except AttributeError:
# python 3
MAXINT = sys.maxsize
try:
# python 2
xrange
except NameError:
# python 3
xrange = range
__all__ = [
'pdf',
'test',
'mle',
'meanprecision',
'loglikelihood',
]
euler = -1*psi(1) # Euler-Mascheroni constant
def test(D1, D2, method='meanprecision', maxiter=None):
'''Test for statistical difference between observed proportions.
Parameters
----------
D1 : array
D2 : array
Both ``D1`` and ``D2`` must have the same number of columns, which are
the different levels or categorical possibilities. Each row of the
matrices must add up to 1.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
D : float
Test statistic, which is ``-2 * log`` of likelihood ratios.
p : float
p-value of test.
a0 : array
a1 : array
a2 : array
MLE parameters for the Dirichlet distributions fit to
``D1`` and ``D2`` together, ``D1``, and ``D2``, respectively.'''
N1, K1 = D1.shape
N2, K2 = D2.shape
if K1 != K2:
raise Exception("D1 and D2 must have the same number of columns")
D0 = vstack((D1, D2))
a0 = mle(D0, method=method, maxiter=maxiter)
a1 = mle(D1, method=method, maxiter=maxiter)
a2 = mle(D2, method=method, maxiter=maxiter)
D = 2 * (loglikelihood(D1, a1) + loglikelihood(D2, a2)
- loglikelihood(D0, a0))
return (D, stats.chi2.sf(D, K1), a0, a1, a2)
def pdf(alphas):
'''Returns a Dirichlet PDF function'''
alphap = alphas - 1
c = np.exp(gammaln(alphas.sum()) - gammaln(alphas).sum())
def dirichlet(xs):
'''N x K array'''
return c * (xs**alphap).prod(axis=1)
return dirichlet
def meanprecision(a):
'''Mean and precision of Dirichlet distribution.
Parameters
----------
a : array
Parameters of Dirichlet distribution.
Returns
-------
mean : array
Numbers [0,1] of the means of the Dirichlet distribution.
precision : float
Precision or concentration parameter of the Dirichlet distribution.'''
s = a.sum()
m = a / s
return (m,s)
def loglikelihood(D, a):
'''Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).
Parameters
----------
D : 2D array
where ``N`` is the number of observations, ``K`` is the number of
parameters for the Dirichlet distribution.
a : array
Parameters for the Dirichlet distribution.
Returns
-------
logl : float
The log likelihood of the Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
return N*(gammaln(a.sum()) - gammaln(a).sum() + ((a - 1)*logp).sum())
def mle(D, tol=1e-7, method='meanprecision', maxiter=None):
'''Iteratively computes maximum likelihood Dirichlet distribution
for an observed data set, i.e. a for which log p(D|a) is maximum.
Parameters
----------
D : 2D array
``N x K`` array of numbers from [0,1] where ``N`` is the number of
observations, ``K`` is the number of parameters for the Dirichlet
distribution.
tol : float
If Euclidean distance between successive parameter arrays is less than
``tol``, calculation is taken to have converged.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
a : array
Maximum likelihood parameters for Dirichlet distribution.'''
if method == 'meanprecision':
return _meanprecision(D, tol=tol, maxiter=maxiter)
else:
return _fixedpoint(D, tol=tol, maxiter=maxiter)
def _fixedpoint(D, tol=1e-7, maxiter=None):
'''Simple fixed point iteration method for MLE of Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _ipsi(psi(a0.sum()) + logp)
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _meanprecision(D, tol=1e-7, maxiter=None):
'''Mean and precision alternating method for MLE of Dirichlet
distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
s0 = a0.sum()
if s0 < 0:
a0 = a0/s0
s0 = 1
elif s0 == 0:
a0 = ones(a.shape) / len(a)
s0 = 1
m0 = a0/s0
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _fit_s(D, a0, logp, tol=tol)
s1 = sum(a1)
a1 = _fit_m(D, a1, logp, tol=tol)
m = a1/s1
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _fit_s(D, a0, logp, tol=1e-7, maxiter=1000):
'''Assuming a fixed mean for Dirichlet distribution, maximize likelihood
for preicision a.k.a. s'''
N, K = D.shape
s1 = a0.sum()
m = a0 / s1
mlogp = (m*logp).sum()
for i in xrange(maxiter):
s0 = s1
g = psi(s1) - (m*psi(s1*m)).sum() + mlogp
h = _trigamma(s1) - ((m**2)*_trigamma(s1*m)).sum()
if g + s1 * h < 0:
s1 = 1/(1/s0 + g/h/(s0**2))
if s1 <= 0:
s1 = s0 * exp(-g/(s0*h + g)) # Newton on log s
if s1 <= 0:
s1 = 1/(1/s0 + g/((s0**2)*h + 2*s0*g)) # Newton on 1/s
if s1 <= 0:
s1 = s0 - g/h # Newton
if s1 <= 0:
raise Exception('Unable to update s from {}'.format(s0))
a = s1 * m
if abs(s1 - s0) < tol:
return a
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s1))
def _piecewise(x, condlist, funclist, *args, **kw):
'''Fixed version of numpy.piecewise for 0-d arrays'''
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
(isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \
(x.ndim > 0 and condlist[0].ndim == 0):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def _init_a(D):
'''Initial guess for Dirichlet alpha parameters given data D'''
E = D.mean(axis=0)
E2 = (D**2).mean(axis=0)
return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E
def _ipsi(y, tol=1.48e-9, maxiter=10):
'''Inverse of psi (digamma) using Newton's method. For the purposes
of Dirichlet MLE, since the parameters a[i] must always
satisfy a > 0, we define ipsi :: R -> (0,inf).'''
y = asanyarray(y, dtype='float')
x0 = _piecewise(y, [y >= -2.22, y < -2.22],
[(lambda x: exp(x) + 0.5), (lambda x: -1/(x+euler))])
for i in xrange(maxiter):
x1 = x0 - (psi(x0) - y)/_trigamma(x0)
if norm(x1 - x0) < tol:
return x1
x0 = x1
raise Exception(
'Unable to converge in {} iterations, value is {}'.format(maxiter, x1))
def _trigamma(x):
return polygamma(1, x)
|
ericsuh/dirichlet
|
dirichlet/dirichlet.py
|
_piecewise
|
python
|
def _piecewise(x, condlist, funclist, *args, **kw):
'''Fixed version of numpy.piecewise for 0-d arrays'''
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
(isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \
(x.ndim > 0 and condlist[0].ndim == 0):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
|
Fixed version of numpy.piecewise for 0-d arrays
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/dirichlet.py#L268-L316
| null |
# Copyright (C) 2012 Eric J. Suh
#
# This file is subject to the terms and conditions defined in file
# 'LICENSE.txt', which is part of this source code package.
'''Dirichlet.py
Maximum likelihood estimation and likelihood ratio tests of Dirichlet
distribution models of data.
Most of this package is a port of Thomas P. Minka's wonderful Fastfit MATLAB
code. Much thanks to him for that and his clear paper "Estimating a Dirichlet
distribution". See the following URL for more information:
http://research.microsoft.com/en-us/um/people/minka/'''
import sys
import scipy as sp
import scipy.stats as stats
from scipy.special import (psi, polygamma, gammaln)
from numpy import (array, asanyarray, ones, arange, log, diag, vstack, exp,
asarray, ndarray, zeros, isscalar)
from numpy.linalg import norm
import numpy as np
from . import simplex
try:
# python 2
MAXINT = sys.maxint
except AttributeError:
# python 3
MAXINT = sys.maxsize
try:
# python 2
xrange
except NameError:
# python 3
xrange = range
__all__ = [
'pdf',
'test',
'mle',
'meanprecision',
'loglikelihood',
]
euler = -1*psi(1) # Euler-Mascheroni constant
def test(D1, D2, method='meanprecision', maxiter=None):
'''Test for statistical difference between observed proportions.
Parameters
----------
D1 : array
D2 : array
Both ``D1`` and ``D2`` must have the same number of columns, which are
the different levels or categorical possibilities. Each row of the
matrices must add up to 1.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
D : float
Test statistic, which is ``-2 * log`` of likelihood ratios.
p : float
p-value of test.
a0 : array
a1 : array
a2 : array
MLE parameters for the Dirichlet distributions fit to
``D1`` and ``D2`` together, ``D1``, and ``D2``, respectively.'''
N1, K1 = D1.shape
N2, K2 = D2.shape
if K1 != K2:
raise Exception("D1 and D2 must have the same number of columns")
D0 = vstack((D1, D2))
a0 = mle(D0, method=method, maxiter=maxiter)
a1 = mle(D1, method=method, maxiter=maxiter)
a2 = mle(D2, method=method, maxiter=maxiter)
D = 2 * (loglikelihood(D1, a1) + loglikelihood(D2, a2)
- loglikelihood(D0, a0))
return (D, stats.chi2.sf(D, K1), a0, a1, a2)
def pdf(alphas):
'''Returns a Dirichlet PDF function'''
alphap = alphas - 1
c = np.exp(gammaln(alphas.sum()) - gammaln(alphas).sum())
def dirichlet(xs):
'''N x K array'''
return c * (xs**alphap).prod(axis=1)
return dirichlet
def meanprecision(a):
'''Mean and precision of Dirichlet distribution.
Parameters
----------
a : array
Parameters of Dirichlet distribution.
Returns
-------
mean : array
Numbers [0,1] of the means of the Dirichlet distribution.
precision : float
Precision or concentration parameter of the Dirichlet distribution.'''
s = a.sum()
m = a / s
return (m,s)
def loglikelihood(D, a):
'''Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).
Parameters
----------
D : 2D array
where ``N`` is the number of observations, ``K`` is the number of
parameters for the Dirichlet distribution.
a : array
Parameters for the Dirichlet distribution.
Returns
-------
logl : float
The log likelihood of the Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
return N*(gammaln(a.sum()) - gammaln(a).sum() + ((a - 1)*logp).sum())
def mle(D, tol=1e-7, method='meanprecision', maxiter=None):
'''Iteratively computes maximum likelihood Dirichlet distribution
for an observed data set, i.e. a for which log p(D|a) is maximum.
Parameters
----------
D : 2D array
``N x K`` array of numbers from [0,1] where ``N`` is the number of
observations, ``K`` is the number of parameters for the Dirichlet
distribution.
tol : float
If Euclidean distance between successive parameter arrays is less than
``tol``, calculation is taken to have converged.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
a : array
Maximum likelihood parameters for Dirichlet distribution.'''
if method == 'meanprecision':
return _meanprecision(D, tol=tol, maxiter=maxiter)
else:
return _fixedpoint(D, tol=tol, maxiter=maxiter)
def _fixedpoint(D, tol=1e-7, maxiter=None):
'''Simple fixed point iteration method for MLE of Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _ipsi(psi(a0.sum()) + logp)
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _meanprecision(D, tol=1e-7, maxiter=None):
'''Mean and precision alternating method for MLE of Dirichlet
distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
s0 = a0.sum()
if s0 < 0:
a0 = a0/s0
s0 = 1
elif s0 == 0:
a0 = ones(a.shape) / len(a)
s0 = 1
m0 = a0/s0
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _fit_s(D, a0, logp, tol=tol)
s1 = sum(a1)
a1 = _fit_m(D, a1, logp, tol=tol)
m = a1/s1
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _fit_s(D, a0, logp, tol=1e-7, maxiter=1000):
'''Assuming a fixed mean for Dirichlet distribution, maximize likelihood
for preicision a.k.a. s'''
N, K = D.shape
s1 = a0.sum()
m = a0 / s1
mlogp = (m*logp).sum()
for i in xrange(maxiter):
s0 = s1
g = psi(s1) - (m*psi(s1*m)).sum() + mlogp
h = _trigamma(s1) - ((m**2)*_trigamma(s1*m)).sum()
if g + s1 * h < 0:
s1 = 1/(1/s0 + g/h/(s0**2))
if s1 <= 0:
s1 = s0 * exp(-g/(s0*h + g)) # Newton on log s
if s1 <= 0:
s1 = 1/(1/s0 + g/((s0**2)*h + 2*s0*g)) # Newton on 1/s
if s1 <= 0:
s1 = s0 - g/h # Newton
if s1 <= 0:
raise Exception('Unable to update s from {}'.format(s0))
a = s1 * m
if abs(s1 - s0) < tol:
return a
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s1))
def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):
'''With fixed precision s, maximize mean m'''
N,K = D.shape
s = a0.sum()
for i in xrange(maxiter):
m = a0 / s
a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())
a1 = a1/a1.sum() * s
if norm(a1 - a0) < tol:
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s))
def _init_a(D):
'''Initial guess for Dirichlet alpha parameters given data D'''
E = D.mean(axis=0)
E2 = (D**2).mean(axis=0)
return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E
def _ipsi(y, tol=1.48e-9, maxiter=10):
'''Inverse of psi (digamma) using Newton's method. For the purposes
of Dirichlet MLE, since the parameters a[i] must always
satisfy a > 0, we define ipsi :: R -> (0,inf).'''
y = asanyarray(y, dtype='float')
x0 = _piecewise(y, [y >= -2.22, y < -2.22],
[(lambda x: exp(x) + 0.5), (lambda x: -1/(x+euler))])
for i in xrange(maxiter):
x1 = x0 - (psi(x0) - y)/_trigamma(x0)
if norm(x1 - x0) < tol:
return x1
x0 = x1
raise Exception(
'Unable to converge in {} iterations, value is {}'.format(maxiter, x1))
def _trigamma(x):
return polygamma(1, x)
|
ericsuh/dirichlet
|
dirichlet/dirichlet.py
|
_init_a
|
python
|
def _init_a(D):
'''Initial guess for Dirichlet alpha parameters given data D'''
E = D.mean(axis=0)
E2 = (D**2).mean(axis=0)
return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E
|
Initial guess for Dirichlet alpha parameters given data D
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/dirichlet.py#L318-L322
| null |
# Copyright (C) 2012 Eric J. Suh
#
# This file is subject to the terms and conditions defined in file
# 'LICENSE.txt', which is part of this source code package.
'''Dirichlet.py
Maximum likelihood estimation and likelihood ratio tests of Dirichlet
distribution models of data.
Most of this package is a port of Thomas P. Minka's wonderful Fastfit MATLAB
code. Much thanks to him for that and his clear paper "Estimating a Dirichlet
distribution". See the following URL for more information:
http://research.microsoft.com/en-us/um/people/minka/'''
import sys
import scipy as sp
import scipy.stats as stats
from scipy.special import (psi, polygamma, gammaln)
from numpy import (array, asanyarray, ones, arange, log, diag, vstack, exp,
asarray, ndarray, zeros, isscalar)
from numpy.linalg import norm
import numpy as np
from . import simplex
try:
# python 2
MAXINT = sys.maxint
except AttributeError:
# python 3
MAXINT = sys.maxsize
try:
# python 2
xrange
except NameError:
# python 3
xrange = range
__all__ = [
'pdf',
'test',
'mle',
'meanprecision',
'loglikelihood',
]
euler = -1*psi(1) # Euler-Mascheroni constant
def test(D1, D2, method='meanprecision', maxiter=None):
'''Test for statistical difference between observed proportions.
Parameters
----------
D1 : array
D2 : array
Both ``D1`` and ``D2`` must have the same number of columns, which are
the different levels or categorical possibilities. Each row of the
matrices must add up to 1.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
D : float
Test statistic, which is ``-2 * log`` of likelihood ratios.
p : float
p-value of test.
a0 : array
a1 : array
a2 : array
MLE parameters for the Dirichlet distributions fit to
``D1`` and ``D2`` together, ``D1``, and ``D2``, respectively.'''
N1, K1 = D1.shape
N2, K2 = D2.shape
if K1 != K2:
raise Exception("D1 and D2 must have the same number of columns")
D0 = vstack((D1, D2))
a0 = mle(D0, method=method, maxiter=maxiter)
a1 = mle(D1, method=method, maxiter=maxiter)
a2 = mle(D2, method=method, maxiter=maxiter)
D = 2 * (loglikelihood(D1, a1) + loglikelihood(D2, a2)
- loglikelihood(D0, a0))
return (D, stats.chi2.sf(D, K1), a0, a1, a2)
def pdf(alphas):
'''Returns a Dirichlet PDF function'''
alphap = alphas - 1
c = np.exp(gammaln(alphas.sum()) - gammaln(alphas).sum())
def dirichlet(xs):
'''N x K array'''
return c * (xs**alphap).prod(axis=1)
return dirichlet
def meanprecision(a):
'''Mean and precision of Dirichlet distribution.
Parameters
----------
a : array
Parameters of Dirichlet distribution.
Returns
-------
mean : array
Numbers [0,1] of the means of the Dirichlet distribution.
precision : float
Precision or concentration parameter of the Dirichlet distribution.'''
s = a.sum()
m = a / s
return (m,s)
def loglikelihood(D, a):
'''Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).
Parameters
----------
D : 2D array
where ``N`` is the number of observations, ``K`` is the number of
parameters for the Dirichlet distribution.
a : array
Parameters for the Dirichlet distribution.
Returns
-------
logl : float
The log likelihood of the Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
return N*(gammaln(a.sum()) - gammaln(a).sum() + ((a - 1)*logp).sum())
def mle(D, tol=1e-7, method='meanprecision', maxiter=None):
'''Iteratively computes maximum likelihood Dirichlet distribution
for an observed data set, i.e. a for which log p(D|a) is maximum.
Parameters
----------
D : 2D array
``N x K`` array of numbers from [0,1] where ``N`` is the number of
observations, ``K`` is the number of parameters for the Dirichlet
distribution.
tol : float
If Euclidean distance between successive parameter arrays is less than
``tol``, calculation is taken to have converged.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
a : array
Maximum likelihood parameters for Dirichlet distribution.'''
if method == 'meanprecision':
return _meanprecision(D, tol=tol, maxiter=maxiter)
else:
return _fixedpoint(D, tol=tol, maxiter=maxiter)
def _fixedpoint(D, tol=1e-7, maxiter=None):
'''Simple fixed point iteration method for MLE of Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _ipsi(psi(a0.sum()) + logp)
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _meanprecision(D, tol=1e-7, maxiter=None):
'''Mean and precision alternating method for MLE of Dirichlet
distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
s0 = a0.sum()
if s0 < 0:
a0 = a0/s0
s0 = 1
elif s0 == 0:
a0 = ones(a.shape) / len(a)
s0 = 1
m0 = a0/s0
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _fit_s(D, a0, logp, tol=tol)
s1 = sum(a1)
a1 = _fit_m(D, a1, logp, tol=tol)
m = a1/s1
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _fit_s(D, a0, logp, tol=1e-7, maxiter=1000):
'''Assuming a fixed mean for Dirichlet distribution, maximize likelihood
for preicision a.k.a. s'''
N, K = D.shape
s1 = a0.sum()
m = a0 / s1
mlogp = (m*logp).sum()
for i in xrange(maxiter):
s0 = s1
g = psi(s1) - (m*psi(s1*m)).sum() + mlogp
h = _trigamma(s1) - ((m**2)*_trigamma(s1*m)).sum()
if g + s1 * h < 0:
s1 = 1/(1/s0 + g/h/(s0**2))
if s1 <= 0:
s1 = s0 * exp(-g/(s0*h + g)) # Newton on log s
if s1 <= 0:
s1 = 1/(1/s0 + g/((s0**2)*h + 2*s0*g)) # Newton on 1/s
if s1 <= 0:
s1 = s0 - g/h # Newton
if s1 <= 0:
raise Exception('Unable to update s from {}'.format(s0))
a = s1 * m
if abs(s1 - s0) < tol:
return a
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s1))
def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):
'''With fixed precision s, maximize mean m'''
N,K = D.shape
s = a0.sum()
for i in xrange(maxiter):
m = a0 / s
a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())
a1 = a1/a1.sum() * s
if norm(a1 - a0) < tol:
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s))
def _piecewise(x, condlist, funclist, *args, **kw):
'''Fixed version of numpy.piecewise for 0-d arrays'''
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
(isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \
(x.ndim > 0 and condlist[0].ndim == 0):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def _ipsi(y, tol=1.48e-9, maxiter=10):
'''Inverse of psi (digamma) using Newton's method. For the purposes
of Dirichlet MLE, since the parameters a[i] must always
satisfy a > 0, we define ipsi :: R -> (0,inf).'''
y = asanyarray(y, dtype='float')
x0 = _piecewise(y, [y >= -2.22, y < -2.22],
[(lambda x: exp(x) + 0.5), (lambda x: -1/(x+euler))])
for i in xrange(maxiter):
x1 = x0 - (psi(x0) - y)/_trigamma(x0)
if norm(x1 - x0) < tol:
return x1
x0 = x1
raise Exception(
'Unable to converge in {} iterations, value is {}'.format(maxiter, x1))
def _trigamma(x):
return polygamma(1, x)
|
ericsuh/dirichlet
|
dirichlet/dirichlet.py
|
_ipsi
|
python
|
def _ipsi(y, tol=1.48e-9, maxiter=10):
'''Inverse of psi (digamma) using Newton's method. For the purposes
of Dirichlet MLE, since the parameters a[i] must always
satisfy a > 0, we define ipsi :: R -> (0,inf).'''
y = asanyarray(y, dtype='float')
x0 = _piecewise(y, [y >= -2.22, y < -2.22],
[(lambda x: exp(x) + 0.5), (lambda x: -1/(x+euler))])
for i in xrange(maxiter):
x1 = x0 - (psi(x0) - y)/_trigamma(x0)
if norm(x1 - x0) < tol:
return x1
x0 = x1
raise Exception(
'Unable to converge in {} iterations, value is {}'.format(maxiter, x1))
|
Inverse of psi (digamma) using Newton's method. For the purposes
of Dirichlet MLE, since the parameters a[i] must always
satisfy a > 0, we define ipsi :: R -> (0,inf).
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/dirichlet.py#L324-L337
|
[
"def _trigamma(x):\n return polygamma(1, x)\n",
"def _piecewise(x, condlist, funclist, *args, **kw):\n '''Fixed version of numpy.piecewise for 0-d arrays'''\n x = asanyarray(x)\n n2 = len(funclist)\n if isscalar(condlist) or \\\n (isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \\\n (x.ndim > 0 and condlist[0].ndim == 0):\n condlist = [condlist]\n condlist = [asarray(c, dtype=bool) for c in condlist]\n n = len(condlist)\n\n zerod = False\n # This is a hack to work around problems with NumPy's\n # handling of 0-d arrays and boolean indexing with\n # numpy.bool_ scalars\n if x.ndim == 0:\n x = x[None]\n zerod = True\n newcondlist = []\n for k in range(n):\n if condlist[k].ndim == 0:\n condition = condlist[k][None]\n else:\n condition = condlist[k]\n newcondlist.append(condition)\n condlist = newcondlist\n\n if n == n2-1: # compute the \"otherwise\" condition.\n totlist = condlist[0]\n for k in range(1, n):\n totlist |= condlist[k]\n condlist.append(~totlist)\n n += 1\n if (n != n2):\n raise ValueError(\n \"function list and condition list must be the same\")\n\n y = zeros(x.shape, x.dtype)\n for k in range(n):\n item = funclist[k]\n if not callable(item):\n y[condlist[k]] = item\n else:\n vals = x[condlist[k]]\n if vals.size > 0:\n y[condlist[k]] = item(vals, *args, **kw)\n if zerod:\n y = y.squeeze()\n return y\n"
] |
# Copyright (C) 2012 Eric J. Suh
#
# This file is subject to the terms and conditions defined in file
# 'LICENSE.txt', which is part of this source code package.
'''Dirichlet.py
Maximum likelihood estimation and likelihood ratio tests of Dirichlet
distribution models of data.
Most of this package is a port of Thomas P. Minka's wonderful Fastfit MATLAB
code. Much thanks to him for that and his clear paper "Estimating a Dirichlet
distribution". See the following URL for more information:
http://research.microsoft.com/en-us/um/people/minka/'''
import sys
import scipy as sp
import scipy.stats as stats
from scipy.special import (psi, polygamma, gammaln)
from numpy import (array, asanyarray, ones, arange, log, diag, vstack, exp,
asarray, ndarray, zeros, isscalar)
from numpy.linalg import norm
import numpy as np
from . import simplex
try:
# python 2
MAXINT = sys.maxint
except AttributeError:
# python 3
MAXINT = sys.maxsize
try:
# python 2
xrange
except NameError:
# python 3
xrange = range
__all__ = [
'pdf',
'test',
'mle',
'meanprecision',
'loglikelihood',
]
euler = -1*psi(1) # Euler-Mascheroni constant
def test(D1, D2, method='meanprecision', maxiter=None):
'''Test for statistical difference between observed proportions.
Parameters
----------
D1 : array
D2 : array
Both ``D1`` and ``D2`` must have the same number of columns, which are
the different levels or categorical possibilities. Each row of the
matrices must add up to 1.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
D : float
Test statistic, which is ``-2 * log`` of likelihood ratios.
p : float
p-value of test.
a0 : array
a1 : array
a2 : array
MLE parameters for the Dirichlet distributions fit to
``D1`` and ``D2`` together, ``D1``, and ``D2``, respectively.'''
N1, K1 = D1.shape
N2, K2 = D2.shape
if K1 != K2:
raise Exception("D1 and D2 must have the same number of columns")
D0 = vstack((D1, D2))
a0 = mle(D0, method=method, maxiter=maxiter)
a1 = mle(D1, method=method, maxiter=maxiter)
a2 = mle(D2, method=method, maxiter=maxiter)
D = 2 * (loglikelihood(D1, a1) + loglikelihood(D2, a2)
- loglikelihood(D0, a0))
return (D, stats.chi2.sf(D, K1), a0, a1, a2)
def pdf(alphas):
'''Returns a Dirichlet PDF function'''
alphap = alphas - 1
c = np.exp(gammaln(alphas.sum()) - gammaln(alphas).sum())
def dirichlet(xs):
'''N x K array'''
return c * (xs**alphap).prod(axis=1)
return dirichlet
def meanprecision(a):
'''Mean and precision of Dirichlet distribution.
Parameters
----------
a : array
Parameters of Dirichlet distribution.
Returns
-------
mean : array
Numbers [0,1] of the means of the Dirichlet distribution.
precision : float
Precision or concentration parameter of the Dirichlet distribution.'''
s = a.sum()
m = a / s
return (m,s)
def loglikelihood(D, a):
'''Compute log likelihood of Dirichlet distribution, i.e. log p(D|a).
Parameters
----------
D : 2D array
where ``N`` is the number of observations, ``K`` is the number of
parameters for the Dirichlet distribution.
a : array
Parameters for the Dirichlet distribution.
Returns
-------
logl : float
The log likelihood of the Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
return N*(gammaln(a.sum()) - gammaln(a).sum() + ((a - 1)*logp).sum())
def mle(D, tol=1e-7, method='meanprecision', maxiter=None):
'''Iteratively computes maximum likelihood Dirichlet distribution
for an observed data set, i.e. a for which log p(D|a) is maximum.
Parameters
----------
D : 2D array
``N x K`` array of numbers from [0,1] where ``N`` is the number of
observations, ``K`` is the number of parameters for the Dirichlet
distribution.
tol : float
If Euclidean distance between successive parameter arrays is less than
``tol``, calculation is taken to have converged.
method : string
One of ``'fixedpoint'`` and ``'meanprecision'``, designates method by
which to find MLE Dirichlet distribution. Default is
``'meanprecision'``, which is faster.
maxiter : int
Maximum number of iterations to take calculations. Default is
``sys.maxint``.
Returns
-------
a : array
Maximum likelihood parameters for Dirichlet distribution.'''
if method == 'meanprecision':
return _meanprecision(D, tol=tol, maxiter=maxiter)
else:
return _fixedpoint(D, tol=tol, maxiter=maxiter)
def _fixedpoint(D, tol=1e-7, maxiter=None):
'''Simple fixed point iteration method for MLE of Dirichlet distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _ipsi(psi(a0.sum()) + logp)
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _meanprecision(D, tol=1e-7, maxiter=None):
'''Mean and precision alternating method for MLE of Dirichlet
distribution'''
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
s0 = a0.sum()
if s0 < 0:
a0 = a0/s0
s0 = 1
elif s0 == 0:
a0 = ones(a.shape) / len(a)
s0 = 1
m0 = a0/s0
# Start updating
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _fit_s(D, a0, logp, tol=tol)
s1 = sum(a1)
a1 = _fit_m(D, a1, logp, tol=tol)
m = a1/s1
# if norm(a1-a0) < tol:
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, values are {}.'
.format(maxiter, a1))
def _fit_s(D, a0, logp, tol=1e-7, maxiter=1000):
'''Assuming a fixed mean for Dirichlet distribution, maximize likelihood
for preicision a.k.a. s'''
N, K = D.shape
s1 = a0.sum()
m = a0 / s1
mlogp = (m*logp).sum()
for i in xrange(maxiter):
s0 = s1
g = psi(s1) - (m*psi(s1*m)).sum() + mlogp
h = _trigamma(s1) - ((m**2)*_trigamma(s1*m)).sum()
if g + s1 * h < 0:
s1 = 1/(1/s0 + g/h/(s0**2))
if s1 <= 0:
s1 = s0 * exp(-g/(s0*h + g)) # Newton on log s
if s1 <= 0:
s1 = 1/(1/s0 + g/((s0**2)*h + 2*s0*g)) # Newton on 1/s
if s1 <= 0:
s1 = s0 - g/h # Newton
if s1 <= 0:
raise Exception('Unable to update s from {}'.format(s0))
a = s1 * m
if abs(s1 - s0) < tol:
return a
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s1))
def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):
'''With fixed precision s, maximize mean m'''
N,K = D.shape
s = a0.sum()
for i in xrange(maxiter):
m = a0 / s
a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())
a1 = a1/a1.sum() * s
if norm(a1 - a0) < tol:
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s))
def _piecewise(x, condlist, funclist, *args, **kw):
'''Fixed version of numpy.piecewise for 0-d arrays'''
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
(isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \
(x.ndim > 0 and condlist[0].ndim == 0):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def _init_a(D):
'''Initial guess for Dirichlet alpha parameters given data D'''
E = D.mean(axis=0)
E2 = (D**2).mean(axis=0)
return ((E[0] - E2[0])/(E2[0]-E[0]**2)) * E
def _trigamma(x):
return polygamma(1, x)
|
ericsuh/dirichlet
|
dirichlet/simplex.py
|
cartesian
|
python
|
def cartesian(points):
'''Converts array of barycentric coordinates on a 2-simplex to an array of
Cartesian coordinates on a 2D triangle in the first quadrant, i.e.::
>>> cartesian((1,0,0))
array([0, 0])
>>> cartesian((0,1,0))
array([0, 1])
>>> cartesian((0,0,1))
array([0.5, 0.8660254037844386]) # == [0.5, sqrt(3)/2]
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:returns: Cartesian coordinate points.
:rtype: N x 2 ndarray.'''
points = np.asanyarray(points)
ndim = points.ndim # will use this to have similar output shape to input
if ndim == 1:
points = points.reshape((1,points.size))
d = points.sum(axis=1) # in case values aren't normalized
x = 0.5*(2*points[:,1] + points[:,2])/d
y = (np.sqrt(3.0)/2) * points[:,2]/d
out = np.vstack([x,y]).T
if ndim == 1:
return out.reshape((2,))
return out
|
Converts array of barycentric coordinates on a 2-simplex to an array of
Cartesian coordinates on a 2D triangle in the first quadrant, i.e.::
>>> cartesian((1,0,0))
array([0, 0])
>>> cartesian((0,1,0))
array([0, 1])
>>> cartesian((0,0,1))
array([0.5, 0.8660254037844386]) # == [0.5, sqrt(3)/2]
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:returns: Cartesian coordinate points.
:rtype: N x 2 ndarray.
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/simplex.py#L13-L38
| null |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
__all__ = [
'cartesian',
'barycentric',
'scatter',
'contour',
'contourf',
]
def barycentric(points):
'''Inverse of :func:`cartesian`.'''
points = np.asanyarray(points)
ndim = points.ndim
if ndim == 1:
points = points.reshape((1,points.size))
c = (2/np.sqrt(3.0))*points[:,1]
b = (2*points[:,0] - c)/2.0
a = 1.0 - c - b
out = np.vstack([a,b,c]).T
if ndim == 1:
return out.reshape((3,))
return out
def scatter(points, vertexlabels=None, **kwargs):
'''Scatter plot of barycentric 2-simplex points on a 2D triangle.
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.scatter`.
:type **kwargs: keyword arguments.'''
if vertexlabels is None:
vertexlabels = ('1','2','3')
projected = cartesian(points)
plt.scatter(projected[:,0], projected[:,1], **kwargs)
_draw_axes(vertexlabels)
return plt.gcf()
def contour(f, vertexlabels=None, **kwargs):
'''Contour line plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
:param f: Function to evaluate on N x 3 ndarray of coordinates
:type f: ``ufunc``
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.tricontour`.
:type **kwargs: keyword arguments.'''
return _contour(f, vertexlabels, contourfunc=plt.tricontour, **kwargs)
def contourf(f, vertexlabels=None, **kwargs):
'''Filled contour plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
Function signature is identical to :func:`contour` with the caveat that
``**kwargs`` are passed on to :func:`plt.tricontourf`.'''
return _contour(f, vertexlabels, contourfunc=plt.tricontourf, **kwargs)
def _contour(f, vertexlabels=None, contourfunc=None, **kwargs):
'''Workhorse function for the above, where ``contourfunc`` is the contour
plotting function to use for actual plotting.'''
if contourfunc is None:
contourfunc = plt.tricontour
if vertexlabels is None:
vertexlabels = ('1','2','3')
x = np.linspace(0, 1, 100)
y = np.linspace(0, np.sqrt(3.0)/2.0, 100)
points2d = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
points3d = barycentric(points2d)
valid = (points3d.sum(axis=1) == 1.0) & ((0.0 <= points3d).all(axis=1))
points2d = points2d[np.where(valid),:][0]
points3d = points3d[np.where(valid),:][0]
z = f(points3d)
contourfunc(points2d[:,0], points2d[:,1], z, **kwargs)
_draw_axes(vertexlabels)
return plt.gcf()
def _draw_axes(vertexlabels):
l1 = matplotlib.lines.Line2D([0,0.5,1.0,0],
[0, np.sqrt(3)/2, 0, 0],
color='k')
axes = plt.gca()
axes.add_line(l1)
axes.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
axes.text(-0.05, -0.05, vertexlabels[0])
axes.text(1.05, -0.05, vertexlabels[1])
axes.text(0.5, np.sqrt(3) / 2 + 0.05, vertexlabels[2])
axes.set_xlim(-0.2, 1.2)
axes.set_ylim(-0.2, 1.2)
axes.set_aspect('equal')
return axes
|
ericsuh/dirichlet
|
dirichlet/simplex.py
|
barycentric
|
python
|
def barycentric(points):
'''Inverse of :func:`cartesian`.'''
points = np.asanyarray(points)
ndim = points.ndim
if ndim == 1:
points = points.reshape((1,points.size))
c = (2/np.sqrt(3.0))*points[:,1]
b = (2*points[:,0] - c)/2.0
a = 1.0 - c - b
out = np.vstack([a,b,c]).T
if ndim == 1:
return out.reshape((3,))
return out
|
Inverse of :func:`cartesian`.
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/simplex.py#L40-L52
| null |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
__all__ = [
'cartesian',
'barycentric',
'scatter',
'contour',
'contourf',
]
def cartesian(points):
'''Converts array of barycentric coordinates on a 2-simplex to an array of
Cartesian coordinates on a 2D triangle in the first quadrant, i.e.::
>>> cartesian((1,0,0))
array([0, 0])
>>> cartesian((0,1,0))
array([0, 1])
>>> cartesian((0,0,1))
array([0.5, 0.8660254037844386]) # == [0.5, sqrt(3)/2]
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:returns: Cartesian coordinate points.
:rtype: N x 2 ndarray.'''
points = np.asanyarray(points)
ndim = points.ndim # will use this to have similar output shape to input
if ndim == 1:
points = points.reshape((1,points.size))
d = points.sum(axis=1) # in case values aren't normalized
x = 0.5*(2*points[:,1] + points[:,2])/d
y = (np.sqrt(3.0)/2) * points[:,2]/d
out = np.vstack([x,y]).T
if ndim == 1:
return out.reshape((2,))
return out
def scatter(points, vertexlabels=None, **kwargs):
'''Scatter plot of barycentric 2-simplex points on a 2D triangle.
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.scatter`.
:type **kwargs: keyword arguments.'''
if vertexlabels is None:
vertexlabels = ('1','2','3')
projected = cartesian(points)
plt.scatter(projected[:,0], projected[:,1], **kwargs)
_draw_axes(vertexlabels)
return plt.gcf()
def contour(f, vertexlabels=None, **kwargs):
'''Contour line plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
:param f: Function to evaluate on N x 3 ndarray of coordinates
:type f: ``ufunc``
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.tricontour`.
:type **kwargs: keyword arguments.'''
return _contour(f, vertexlabels, contourfunc=plt.tricontour, **kwargs)
def contourf(f, vertexlabels=None, **kwargs):
'''Filled contour plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
Function signature is identical to :func:`contour` with the caveat that
``**kwargs`` are passed on to :func:`plt.tricontourf`.'''
return _contour(f, vertexlabels, contourfunc=plt.tricontourf, **kwargs)
def _contour(f, vertexlabels=None, contourfunc=None, **kwargs):
'''Workhorse function for the above, where ``contourfunc`` is the contour
plotting function to use for actual plotting.'''
if contourfunc is None:
contourfunc = plt.tricontour
if vertexlabels is None:
vertexlabels = ('1','2','3')
x = np.linspace(0, 1, 100)
y = np.linspace(0, np.sqrt(3.0)/2.0, 100)
points2d = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
points3d = barycentric(points2d)
valid = (points3d.sum(axis=1) == 1.0) & ((0.0 <= points3d).all(axis=1))
points2d = points2d[np.where(valid),:][0]
points3d = points3d[np.where(valid),:][0]
z = f(points3d)
contourfunc(points2d[:,0], points2d[:,1], z, **kwargs)
_draw_axes(vertexlabels)
return plt.gcf()
def _draw_axes(vertexlabels):
l1 = matplotlib.lines.Line2D([0,0.5,1.0,0],
[0, np.sqrt(3)/2, 0, 0],
color='k')
axes = plt.gca()
axes.add_line(l1)
axes.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
axes.text(-0.05, -0.05, vertexlabels[0])
axes.text(1.05, -0.05, vertexlabels[1])
axes.text(0.5, np.sqrt(3) / 2 + 0.05, vertexlabels[2])
axes.set_xlim(-0.2, 1.2)
axes.set_ylim(-0.2, 1.2)
axes.set_aspect('equal')
return axes
|
ericsuh/dirichlet
|
dirichlet/simplex.py
|
scatter
|
python
|
def scatter(points, vertexlabels=None, **kwargs):
'''Scatter plot of barycentric 2-simplex points on a 2D triangle.
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.scatter`.
:type **kwargs: keyword arguments.'''
if vertexlabels is None:
vertexlabels = ('1','2','3')
projected = cartesian(points)
plt.scatter(projected[:,0], projected[:,1], **kwargs)
_draw_axes(vertexlabels)
return plt.gcf()
|
Scatter plot of barycentric 2-simplex points on a 2D triangle.
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.scatter`.
:type **kwargs: keyword arguments.
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/simplex.py#L54-L72
|
[
"def cartesian(points):\n '''Converts array of barycentric coordinates on a 2-simplex to an array of\n Cartesian coordinates on a 2D triangle in the first quadrant, i.e.::\n\n >>> cartesian((1,0,0))\n array([0, 0])\n >>> cartesian((0,1,0))\n array([0, 1])\n >>> cartesian((0,0,1))\n array([0.5, 0.8660254037844386]) # == [0.5, sqrt(3)/2]\n\n :param points: Points on a 2-simplex.\n :type points: N x 3 list or ndarray.\n :returns: Cartesian coordinate points.\n :rtype: N x 2 ndarray.'''\n points = np.asanyarray(points)\n ndim = points.ndim # will use this to have similar output shape to input\n if ndim == 1:\n points = points.reshape((1,points.size))\n d = points.sum(axis=1) # in case values aren't normalized\n x = 0.5*(2*points[:,1] + points[:,2])/d\n y = (np.sqrt(3.0)/2) * points[:,2]/d\n out = np.vstack([x,y]).T\n if ndim == 1:\n return out.reshape((2,))\n return out\n",
"def _draw_axes(vertexlabels):\n l1 = matplotlib.lines.Line2D([0,0.5,1.0,0],\n [0, np.sqrt(3)/2, 0, 0],\n color='k')\n axes = plt.gca()\n axes.add_line(l1)\n axes.xaxis.set_major_locator(matplotlib.ticker.NullLocator())\n axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())\n axes.text(-0.05, -0.05, vertexlabels[0])\n axes.text(1.05, -0.05, vertexlabels[1])\n axes.text(0.5, np.sqrt(3) / 2 + 0.05, vertexlabels[2])\n axes.set_xlim(-0.2, 1.2)\n axes.set_ylim(-0.2, 1.2)\n axes.set_aspect('equal')\n return axes\n"
] |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
__all__ = [
'cartesian',
'barycentric',
'scatter',
'contour',
'contourf',
]
def cartesian(points):
'''Converts array of barycentric coordinates on a 2-simplex to an array of
Cartesian coordinates on a 2D triangle in the first quadrant, i.e.::
>>> cartesian((1,0,0))
array([0, 0])
>>> cartesian((0,1,0))
array([0, 1])
>>> cartesian((0,0,1))
array([0.5, 0.8660254037844386]) # == [0.5, sqrt(3)/2]
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:returns: Cartesian coordinate points.
:rtype: N x 2 ndarray.'''
points = np.asanyarray(points)
ndim = points.ndim # will use this to have similar output shape to input
if ndim == 1:
points = points.reshape((1,points.size))
d = points.sum(axis=1) # in case values aren't normalized
x = 0.5*(2*points[:,1] + points[:,2])/d
y = (np.sqrt(3.0)/2) * points[:,2]/d
out = np.vstack([x,y]).T
if ndim == 1:
return out.reshape((2,))
return out
def barycentric(points):
'''Inverse of :func:`cartesian`.'''
points = np.asanyarray(points)
ndim = points.ndim
if ndim == 1:
points = points.reshape((1,points.size))
c = (2/np.sqrt(3.0))*points[:,1]
b = (2*points[:,0] - c)/2.0
a = 1.0 - c - b
out = np.vstack([a,b,c]).T
if ndim == 1:
return out.reshape((3,))
return out
def contour(f, vertexlabels=None, **kwargs):
'''Contour line plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
:param f: Function to evaluate on N x 3 ndarray of coordinates
:type f: ``ufunc``
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.tricontour`.
:type **kwargs: keyword arguments.'''
return _contour(f, vertexlabels, contourfunc=plt.tricontour, **kwargs)
def contourf(f, vertexlabels=None, **kwargs):
'''Filled contour plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
Function signature is identical to :func:`contour` with the caveat that
``**kwargs`` are passed on to :func:`plt.tricontourf`.'''
return _contour(f, vertexlabels, contourfunc=plt.tricontourf, **kwargs)
def _contour(f, vertexlabels=None, contourfunc=None, **kwargs):
'''Workhorse function for the above, where ``contourfunc`` is the contour
plotting function to use for actual plotting.'''
if contourfunc is None:
contourfunc = plt.tricontour
if vertexlabels is None:
vertexlabels = ('1','2','3')
x = np.linspace(0, 1, 100)
y = np.linspace(0, np.sqrt(3.0)/2.0, 100)
points2d = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
points3d = barycentric(points2d)
valid = (points3d.sum(axis=1) == 1.0) & ((0.0 <= points3d).all(axis=1))
points2d = points2d[np.where(valid),:][0]
points3d = points3d[np.where(valid),:][0]
z = f(points3d)
contourfunc(points2d[:,0], points2d[:,1], z, **kwargs)
_draw_axes(vertexlabels)
return plt.gcf()
def _draw_axes(vertexlabels):
l1 = matplotlib.lines.Line2D([0,0.5,1.0,0],
[0, np.sqrt(3)/2, 0, 0],
color='k')
axes = plt.gca()
axes.add_line(l1)
axes.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
axes.text(-0.05, -0.05, vertexlabels[0])
axes.text(1.05, -0.05, vertexlabels[1])
axes.text(0.5, np.sqrt(3) / 2 + 0.05, vertexlabels[2])
axes.set_xlim(-0.2, 1.2)
axes.set_ylim(-0.2, 1.2)
axes.set_aspect('equal')
return axes
|
ericsuh/dirichlet
|
dirichlet/simplex.py
|
contour
|
python
|
def contour(f, vertexlabels=None, **kwargs):
'''Contour line plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
:param f: Function to evaluate on N x 3 ndarray of coordinates
:type f: ``ufunc``
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.tricontour`.
:type **kwargs: keyword arguments.'''
return _contour(f, vertexlabels, contourfunc=plt.tricontour, **kwargs)
|
Contour line plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
:param f: Function to evaluate on N x 3 ndarray of coordinates
:type f: ``ufunc``
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.tricontour`.
:type **kwargs: keyword arguments.
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/simplex.py#L74-L86
|
[
"def _contour(f, vertexlabels=None, contourfunc=None, **kwargs):\n '''Workhorse function for the above, where ``contourfunc`` is the contour\n plotting function to use for actual plotting.'''\n\n if contourfunc is None:\n contourfunc = plt.tricontour\n if vertexlabels is None:\n vertexlabels = ('1','2','3')\n x = np.linspace(0, 1, 100)\n y = np.linspace(0, np.sqrt(3.0)/2.0, 100)\n points2d = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])\n points3d = barycentric(points2d)\n valid = (points3d.sum(axis=1) == 1.0) & ((0.0 <= points3d).all(axis=1))\n points2d = points2d[np.where(valid),:][0]\n points3d = points3d[np.where(valid),:][0]\n z = f(points3d)\n contourfunc(points2d[:,0], points2d[:,1], z, **kwargs)\n _draw_axes(vertexlabels)\n return plt.gcf()\n"
] |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
__all__ = [
'cartesian',
'barycentric',
'scatter',
'contour',
'contourf',
]
def cartesian(points):
'''Converts array of barycentric coordinates on a 2-simplex to an array of
Cartesian coordinates on a 2D triangle in the first quadrant, i.e.::
>>> cartesian((1,0,0))
array([0, 0])
>>> cartesian((0,1,0))
array([0, 1])
>>> cartesian((0,0,1))
array([0.5, 0.8660254037844386]) # == [0.5, sqrt(3)/2]
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:returns: Cartesian coordinate points.
:rtype: N x 2 ndarray.'''
points = np.asanyarray(points)
ndim = points.ndim # will use this to have similar output shape to input
if ndim == 1:
points = points.reshape((1,points.size))
d = points.sum(axis=1) # in case values aren't normalized
x = 0.5*(2*points[:,1] + points[:,2])/d
y = (np.sqrt(3.0)/2) * points[:,2]/d
out = np.vstack([x,y]).T
if ndim == 1:
return out.reshape((2,))
return out
def barycentric(points):
'''Inverse of :func:`cartesian`.'''
points = np.asanyarray(points)
ndim = points.ndim
if ndim == 1:
points = points.reshape((1,points.size))
c = (2/np.sqrt(3.0))*points[:,1]
b = (2*points[:,0] - c)/2.0
a = 1.0 - c - b
out = np.vstack([a,b,c]).T
if ndim == 1:
return out.reshape((3,))
return out
def scatter(points, vertexlabels=None, **kwargs):
'''Scatter plot of barycentric 2-simplex points on a 2D triangle.
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.scatter`.
:type **kwargs: keyword arguments.'''
if vertexlabels is None:
vertexlabels = ('1','2','3')
projected = cartesian(points)
plt.scatter(projected[:,0], projected[:,1], **kwargs)
_draw_axes(vertexlabels)
return plt.gcf()
def contourf(f, vertexlabels=None, **kwargs):
'''Filled contour plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
Function signature is identical to :func:`contour` with the caveat that
``**kwargs`` are passed on to :func:`plt.tricontourf`.'''
return _contour(f, vertexlabels, contourfunc=plt.tricontourf, **kwargs)
def _contour(f, vertexlabels=None, contourfunc=None, **kwargs):
'''Workhorse function for the above, where ``contourfunc`` is the contour
plotting function to use for actual plotting.'''
if contourfunc is None:
contourfunc = plt.tricontour
if vertexlabels is None:
vertexlabels = ('1','2','3')
x = np.linspace(0, 1, 100)
y = np.linspace(0, np.sqrt(3.0)/2.0, 100)
points2d = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
points3d = barycentric(points2d)
valid = (points3d.sum(axis=1) == 1.0) & ((0.0 <= points3d).all(axis=1))
points2d = points2d[np.where(valid),:][0]
points3d = points3d[np.where(valid),:][0]
z = f(points3d)
contourfunc(points2d[:,0], points2d[:,1], z, **kwargs)
_draw_axes(vertexlabels)
return plt.gcf()
def _draw_axes(vertexlabels):
l1 = matplotlib.lines.Line2D([0,0.5,1.0,0],
[0, np.sqrt(3)/2, 0, 0],
color='k')
axes = plt.gca()
axes.add_line(l1)
axes.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
axes.text(-0.05, -0.05, vertexlabels[0])
axes.text(1.05, -0.05, vertexlabels[1])
axes.text(0.5, np.sqrt(3) / 2 + 0.05, vertexlabels[2])
axes.set_xlim(-0.2, 1.2)
axes.set_ylim(-0.2, 1.2)
axes.set_aspect('equal')
return axes
|
ericsuh/dirichlet
|
dirichlet/simplex.py
|
contourf
|
python
|
def contourf(f, vertexlabels=None, **kwargs):
'''Filled contour plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
Function signature is identical to :func:`contour` with the caveat that
``**kwargs`` are passed on to :func:`plt.tricontourf`.'''
return _contour(f, vertexlabels, contourfunc=plt.tricontourf, **kwargs)
|
Filled contour plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
Function signature is identical to :func:`contour` with the caveat that
``**kwargs`` are passed on to :func:`plt.tricontourf`.
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/simplex.py#L88-L94
|
[
"def _contour(f, vertexlabels=None, contourfunc=None, **kwargs):\n '''Workhorse function for the above, where ``contourfunc`` is the contour\n plotting function to use for actual plotting.'''\n\n if contourfunc is None:\n contourfunc = plt.tricontour\n if vertexlabels is None:\n vertexlabels = ('1','2','3')\n x = np.linspace(0, 1, 100)\n y = np.linspace(0, np.sqrt(3.0)/2.0, 100)\n points2d = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])\n points3d = barycentric(points2d)\n valid = (points3d.sum(axis=1) == 1.0) & ((0.0 <= points3d).all(axis=1))\n points2d = points2d[np.where(valid),:][0]\n points3d = points3d[np.where(valid),:][0]\n z = f(points3d)\n contourfunc(points2d[:,0], points2d[:,1], z, **kwargs)\n _draw_axes(vertexlabels)\n return plt.gcf()\n"
] |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
__all__ = [
'cartesian',
'barycentric',
'scatter',
'contour',
'contourf',
]
def cartesian(points):
'''Converts array of barycentric coordinates on a 2-simplex to an array of
Cartesian coordinates on a 2D triangle in the first quadrant, i.e.::
>>> cartesian((1,0,0))
array([0, 0])
>>> cartesian((0,1,0))
array([0, 1])
>>> cartesian((0,0,1))
array([0.5, 0.8660254037844386]) # == [0.5, sqrt(3)/2]
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:returns: Cartesian coordinate points.
:rtype: N x 2 ndarray.'''
points = np.asanyarray(points)
ndim = points.ndim # will use this to have similar output shape to input
if ndim == 1:
points = points.reshape((1,points.size))
d = points.sum(axis=1) # in case values aren't normalized
x = 0.5*(2*points[:,1] + points[:,2])/d
y = (np.sqrt(3.0)/2) * points[:,2]/d
out = np.vstack([x,y]).T
if ndim == 1:
return out.reshape((2,))
return out
def barycentric(points):
'''Inverse of :func:`cartesian`.'''
points = np.asanyarray(points)
ndim = points.ndim
if ndim == 1:
points = points.reshape((1,points.size))
c = (2/np.sqrt(3.0))*points[:,1]
b = (2*points[:,0] - c)/2.0
a = 1.0 - c - b
out = np.vstack([a,b,c]).T
if ndim == 1:
return out.reshape((3,))
return out
def scatter(points, vertexlabels=None, **kwargs):
'''Scatter plot of barycentric 2-simplex points on a 2D triangle.
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.scatter`.
:type **kwargs: keyword arguments.'''
if vertexlabels is None:
vertexlabels = ('1','2','3')
projected = cartesian(points)
plt.scatter(projected[:,0], projected[:,1], **kwargs)
_draw_axes(vertexlabels)
return plt.gcf()
def contour(f, vertexlabels=None, **kwargs):
'''Contour line plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
:param f: Function to evaluate on N x 3 ndarray of coordinates
:type f: ``ufunc``
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.tricontour`.
:type **kwargs: keyword arguments.'''
return _contour(f, vertexlabels, contourfunc=plt.tricontour, **kwargs)
def _contour(f, vertexlabels=None, contourfunc=None, **kwargs):
'''Workhorse function for the above, where ``contourfunc`` is the contour
plotting function to use for actual plotting.'''
if contourfunc is None:
contourfunc = plt.tricontour
if vertexlabels is None:
vertexlabels = ('1','2','3')
x = np.linspace(0, 1, 100)
y = np.linspace(0, np.sqrt(3.0)/2.0, 100)
points2d = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
points3d = barycentric(points2d)
valid = (points3d.sum(axis=1) == 1.0) & ((0.0 <= points3d).all(axis=1))
points2d = points2d[np.where(valid),:][0]
points3d = points3d[np.where(valid),:][0]
z = f(points3d)
contourfunc(points2d[:,0], points2d[:,1], z, **kwargs)
_draw_axes(vertexlabels)
return plt.gcf()
def _draw_axes(vertexlabels):
l1 = matplotlib.lines.Line2D([0,0.5,1.0,0],
[0, np.sqrt(3)/2, 0, 0],
color='k')
axes = plt.gca()
axes.add_line(l1)
axes.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
axes.text(-0.05, -0.05, vertexlabels[0])
axes.text(1.05, -0.05, vertexlabels[1])
axes.text(0.5, np.sqrt(3) / 2 + 0.05, vertexlabels[2])
axes.set_xlim(-0.2, 1.2)
axes.set_ylim(-0.2, 1.2)
axes.set_aspect('equal')
return axes
|
ericsuh/dirichlet
|
dirichlet/simplex.py
|
_contour
|
python
|
def _contour(f, vertexlabels=None, contourfunc=None, **kwargs):
'''Workhorse function for the above, where ``contourfunc`` is the contour
plotting function to use for actual plotting.'''
if contourfunc is None:
contourfunc = plt.tricontour
if vertexlabels is None:
vertexlabels = ('1','2','3')
x = np.linspace(0, 1, 100)
y = np.linspace(0, np.sqrt(3.0)/2.0, 100)
points2d = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
points3d = barycentric(points2d)
valid = (points3d.sum(axis=1) == 1.0) & ((0.0 <= points3d).all(axis=1))
points2d = points2d[np.where(valid),:][0]
points3d = points3d[np.where(valid),:][0]
z = f(points3d)
contourfunc(points2d[:,0], points2d[:,1], z, **kwargs)
_draw_axes(vertexlabels)
return plt.gcf()
|
Workhorse function for the above, where ``contourfunc`` is the contour
plotting function to use for actual plotting.
|
train
|
https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/simplex.py#L96-L114
|
[
"def barycentric(points):\n '''Inverse of :func:`cartesian`.'''\n points = np.asanyarray(points)\n ndim = points.ndim\n if ndim == 1:\n points = points.reshape((1,points.size))\n c = (2/np.sqrt(3.0))*points[:,1]\n b = (2*points[:,0] - c)/2.0\n a = 1.0 - c - b\n out = np.vstack([a,b,c]).T\n if ndim == 1:\n return out.reshape((3,))\n return out\n",
"def _draw_axes(vertexlabels):\n l1 = matplotlib.lines.Line2D([0,0.5,1.0,0],\n [0, np.sqrt(3)/2, 0, 0],\n color='k')\n axes = plt.gca()\n axes.add_line(l1)\n axes.xaxis.set_major_locator(matplotlib.ticker.NullLocator())\n axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())\n axes.text(-0.05, -0.05, vertexlabels[0])\n axes.text(1.05, -0.05, vertexlabels[1])\n axes.text(0.5, np.sqrt(3) / 2 + 0.05, vertexlabels[2])\n axes.set_xlim(-0.2, 1.2)\n axes.set_ylim(-0.2, 1.2)\n axes.set_aspect('equal')\n return axes\n"
] |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
__all__ = [
'cartesian',
'barycentric',
'scatter',
'contour',
'contourf',
]
def cartesian(points):
'''Converts array of barycentric coordinates on a 2-simplex to an array of
Cartesian coordinates on a 2D triangle in the first quadrant, i.e.::
>>> cartesian((1,0,0))
array([0, 0])
>>> cartesian((0,1,0))
array([0, 1])
>>> cartesian((0,0,1))
array([0.5, 0.8660254037844386]) # == [0.5, sqrt(3)/2]
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:returns: Cartesian coordinate points.
:rtype: N x 2 ndarray.'''
points = np.asanyarray(points)
ndim = points.ndim # will use this to have similar output shape to input
if ndim == 1:
points = points.reshape((1,points.size))
d = points.sum(axis=1) # in case values aren't normalized
x = 0.5*(2*points[:,1] + points[:,2])/d
y = (np.sqrt(3.0)/2) * points[:,2]/d
out = np.vstack([x,y]).T
if ndim == 1:
return out.reshape((2,))
return out
def barycentric(points):
'''Inverse of :func:`cartesian`.'''
points = np.asanyarray(points)
ndim = points.ndim
if ndim == 1:
points = points.reshape((1,points.size))
c = (2/np.sqrt(3.0))*points[:,1]
b = (2*points[:,0] - c)/2.0
a = 1.0 - c - b
out = np.vstack([a,b,c]).T
if ndim == 1:
return out.reshape((3,))
return out
def scatter(points, vertexlabels=None, **kwargs):
'''Scatter plot of barycentric 2-simplex points on a 2D triangle.
:param points: Points on a 2-simplex.
:type points: N x 3 list or ndarray.
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.scatter`.
:type **kwargs: keyword arguments.'''
if vertexlabels is None:
vertexlabels = ('1','2','3')
projected = cartesian(points)
plt.scatter(projected[:,0], projected[:,1], **kwargs)
_draw_axes(vertexlabels)
return plt.gcf()
def contour(f, vertexlabels=None, **kwargs):
'''Contour line plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
:param f: Function to evaluate on N x 3 ndarray of coordinates
:type f: ``ufunc``
:param vertexlabels: Labels for corners of plot in the order
``(a, b, c)`` where ``a == (1,0,0)``, ``b == (0,1,0)``,
``c == (0,0,1)``.
:type vertexlabels: 3-tuple of strings.
:param **kwargs: Arguments to :func:`plt.tricontour`.
:type **kwargs: keyword arguments.'''
return _contour(f, vertexlabels, contourfunc=plt.tricontour, **kwargs)
def contourf(f, vertexlabels=None, **kwargs):
'''Filled contour plot on a 2D triangle of a function evaluated at
barycentric 2-simplex points.
Function signature is identical to :func:`contour` with the caveat that
``**kwargs`` are passed on to :func:`plt.tricontourf`.'''
return _contour(f, vertexlabels, contourfunc=plt.tricontourf, **kwargs)
def _draw_axes(vertexlabels):
l1 = matplotlib.lines.Line2D([0,0.5,1.0,0],
[0, np.sqrt(3)/2, 0, 0],
color='k')
axes = plt.gca()
axes.add_line(l1)
axes.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
axes.text(-0.05, -0.05, vertexlabels[0])
axes.text(1.05, -0.05, vertexlabels[1])
axes.text(0.5, np.sqrt(3) / 2 + 0.05, vertexlabels[2])
axes.set_xlim(-0.2, 1.2)
axes.set_ylim(-0.2, 1.2)
axes.set_aspect('equal')
return axes
|
cognitect/transit-python
|
transit/reader.py
|
Reader.register
|
python
|
def register(self, key_or_tag, f_val):
self.reader.decoder.register(key_or_tag, f_val)
|
Register a custom transit tag and decoder/parser function for use
during reads.
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/reader.py#L45-L49
| null |
class Reader(object):
"""The top-level object for reading in Transit data and converting it to
Python objects. During initialization, you must specify the protocol used
for unmarshalling the data- json or msgpack.
"""
def __init__(self, protocol="json"):
if protocol in ("json", "json_verbose"):
self.reader = JsonUnmarshaler()
elif protocol == "msgpack":
self.reader = MsgPackUnmarshaler()
self.unpacker = self.reader.unpacker
else:
raise ValueError("'" + protocol + "' is not a supported. " +
"Protocol must be:" +
"'json', 'json_verbose', or 'msgpack'.")
def read(self, stream):
"""Given a readable file descriptor object (something `load`able by
msgpack or json), read the data, and return the Python representation
of the contents. One-shot reader.
"""
return self.reader.load(stream)
def readeach(self, stream, **kwargs):
"""Temporary hook for API while streaming reads are in experimental
phase. Read each object from stream as available with generator.
JSON blocks indefinitely waiting on JSON entities to arrive. MsgPack
requires unpacker property to be fed stream using unpacker.feed()
method.
"""
for o in self.reader.loadeach(stream):
yield o
|
cognitect/transit-python
|
transit/reader.py
|
Reader.readeach
|
python
|
def readeach(self, stream, **kwargs):
for o in self.reader.loadeach(stream):
yield o
|
Temporary hook for API while streaming reads are in experimental
phase. Read each object from stream as available with generator.
JSON blocks indefinitely waiting on JSON entities to arrive. MsgPack
requires unpacker property to be fed stream using unpacker.feed()
method.
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/reader.py#L51-L59
|
[
"def loadeach(self, stream):\n for o in sosjson.items(stream, object_pairs_hook=OrderedDict):\n yield self.decoder.decode(o)\n"
] |
class Reader(object):
"""The top-level object for reading in Transit data and converting it to
Python objects. During initialization, you must specify the protocol used
for unmarshalling the data- json or msgpack.
"""
def __init__(self, protocol="json"):
if protocol in ("json", "json_verbose"):
self.reader = JsonUnmarshaler()
elif protocol == "msgpack":
self.reader = MsgPackUnmarshaler()
self.unpacker = self.reader.unpacker
else:
raise ValueError("'" + protocol + "' is not a supported. " +
"Protocol must be:" +
"'json', 'json_verbose', or 'msgpack'.")
def read(self, stream):
"""Given a readable file descriptor object (something `load`able by
msgpack or json), read the data, and return the Python representation
of the contents. One-shot reader.
"""
return self.reader.load(stream)
def register(self, key_or_tag, f_val):
"""Register a custom transit tag and decoder/parser function for use
during reads.
"""
self.reader.decoder.register(key_or_tag, f_val)
|
cognitect/transit-python
|
transit/rolling_cache.py
|
RollingCache.decode
|
python
|
def decode(self, name, as_map_key=False):
if is_cache_key(name) and (name in self.key_to_value):
return self.key_to_value[name]
return self.encache(name) if is_cacheable(name, as_map_key) else name
|
Always returns the name
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/rolling_cache.py#L61-L65
|
[
"def is_cache_key(name):\n return len(name) and (name[0] == SUB and name != MAP_AS_ARR)\n"
] |
class RollingCache(object):
"""This is the internal cache used by python-transit for cacheing and
expanding map keys during writing and reading. The cache enables transit
to minimize the amount of duplicate data sent over the wire, effectively
compressing down the overall payload size. The cache is not intended to
be used directly.
"""
def __init__(self):
self.key_to_value = {}
self.value_to_key = {}
# if index rolls over... (bug)
def encode(self, name, as_map_key=False):
"""Returns the name the first time and the key after that"""
if name in self.key_to_value:
return self.key_to_value[name]
return self.encache(name) if is_cacheable(name, as_map_key) else name
def size(self):
return len(self.key_to_value)
def is_cache_full(self):
return len(self.key_to_value) > CACHE_SIZE
def encache(self, name):
if self.is_cache_full():
self.clear()
elif name in self.value_to_key:
return self.value_to_key[name]
key = encode_key(len(self.key_to_value))
self.key_to_value[key] = name
self.value_to_key[name] = key
return name
def clear(self):
self.value_to_key = {}
|
cognitect/transit-python
|
transit/rolling_cache.py
|
RollingCache.encode
|
python
|
def encode(self, name, as_map_key=False):
if name in self.key_to_value:
return self.key_to_value[name]
return self.encache(name) if is_cacheable(name, as_map_key) else name
|
Returns the name the first time and the key after that
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/rolling_cache.py#L67-L71
|
[
"def is_cacheable(string, as_map_key=False):\n return string and len(string) >= MIN_SIZE_CACHEABLE \\\n and (as_map_key \\\n or (string[:2] in [\"~#\", \"~$\", \"~:\"]))\n"
] |
class RollingCache(object):
"""This is the internal cache used by python-transit for cacheing and
expanding map keys during writing and reading. The cache enables transit
to minimize the amount of duplicate data sent over the wire, effectively
compressing down the overall payload size. The cache is not intended to
be used directly.
"""
def __init__(self):
self.key_to_value = {}
self.value_to_key = {}
# if index rolls over... (bug)
def decode(self, name, as_map_key=False):
"""Always returns the name"""
if is_cache_key(name) and (name in self.key_to_value):
return self.key_to_value[name]
return self.encache(name) if is_cacheable(name, as_map_key) else name
def size(self):
return len(self.key_to_value)
def is_cache_full(self):
return len(self.key_to_value) > CACHE_SIZE
def encache(self, name):
if self.is_cache_full():
self.clear()
elif name in self.value_to_key:
return self.value_to_key[name]
key = encode_key(len(self.key_to_value))
self.key_to_value[key] = name
self.value_to_key[name] = key
return name
def clear(self):
self.value_to_key = {}
|
cognitect/transit-python
|
transit/sosjson.py
|
read_chunk
|
python
|
def read_chunk(stream):
chunk = stream.read(1)
while chunk in SKIP:
chunk = stream.read(1)
if chunk == "\"":
chunk += stream.read(1)
while not chunk.endswith("\""):
if chunk[-1] == ESCAPE:
chunk += stream.read(2)
else:
chunk += stream.read(1)
return chunk
|
Ignore whitespace outside of strings. If we hit a string, read it in
its entirety.
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/sosjson.py#L25-L39
| null |
## copyright 2014 cognitect. all rights reserved.
##
## licensed under the apache license, version 2.0 (the "license");
## you may not use this file except in compliance with the license.
## you may obtain a copy of the license at
##
## http://www.apache.org/licenses/license-2.0
##
## unless required by applicable law or agreed to in writing, software
## distributed under the license is distributed on an "as-is" basis,
## without warranties or conditions of any kind, either express or implied.
## see the license for the specific language governing permissions and
## limitations under the license.
# Simple object streaming in Python - just reads one complete JSON object
# at a time and returns json.loads of that string.
# Ugly implementation at moment
from copy import copy
import json
SKIP = [" ", "\n", "\t"]
ESCAPE = "\\"
def items(stream, **kwargs):
"""External facing items. Will return item from stream as available.
Currently waits in loop waiting for next item. Can pass keywords that
json.loads accepts (such as object_pairs_hook)
"""
for s in yield_json(stream):
yield json.loads(s, **kwargs)
def yield_json(stream):
"""Uses array and object delimiter counts for balancing.
"""
buff = u""
arr_count = 0
obj_count = 0
while True:
buff += read_chunk(stream)
# If we finish parsing all objs or arrays, yield a finished JSON
# entity.
if buff.endswith('{'):
obj_count += 1
if buff.endswith('['):
arr_count += 1
if buff.endswith(']'):
arr_count -= 1
if obj_count == arr_count == 0:
json_item = copy(buff)
buff = u""
yield json_item
if buff.endswith('}'):
obj_count -= 1
if obj_count == arr_count == 0:
json_item = copy(buff)
buff = u""
yield json_item
|
cognitect/transit-python
|
transit/sosjson.py
|
items
|
python
|
def items(stream, **kwargs):
for s in yield_json(stream):
yield json.loads(s, **kwargs)
|
External facing items. Will return item from stream as available.
Currently waits in loop waiting for next item. Can pass keywords that
json.loads accepts (such as object_pairs_hook)
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/sosjson.py#L42-L48
|
[
"def yield_json(stream):\n \"\"\"Uses array and object delimiter counts for balancing.\n \"\"\"\n buff = u\"\"\n arr_count = 0\n obj_count = 0\n while True:\n buff += read_chunk(stream)\n\n # If we finish parsing all objs or arrays, yield a finished JSON\n # entity.\n if buff.endswith('{'):\n obj_count += 1\n if buff.endswith('['):\n arr_count += 1\n if buff.endswith(']'):\n arr_count -= 1\n if obj_count == arr_count == 0:\n json_item = copy(buff)\n buff = u\"\"\n yield json_item\n if buff.endswith('}'):\n obj_count -= 1\n if obj_count == arr_count == 0:\n json_item = copy(buff)\n buff = u\"\"\n yield json_item\n"
] |
## copyright 2014 cognitect. all rights reserved.
##
## licensed under the apache license, version 2.0 (the "license");
## you may not use this file except in compliance with the license.
## you may obtain a copy of the license at
##
## http://www.apache.org/licenses/license-2.0
##
## unless required by applicable law or agreed to in writing, software
## distributed under the license is distributed on an "as-is" basis,
## without warranties or conditions of any kind, either express or implied.
## see the license for the specific language governing permissions and
## limitations under the license.
# Simple object streaming in Python - just reads one complete JSON object
# at a time and returns json.loads of that string.
# Ugly implementation at moment
from copy import copy
import json
SKIP = [" ", "\n", "\t"]
ESCAPE = "\\"
def read_chunk(stream):
"""Ignore whitespace outside of strings. If we hit a string, read it in
its entirety.
"""
chunk = stream.read(1)
while chunk in SKIP:
chunk = stream.read(1)
if chunk == "\"":
chunk += stream.read(1)
while not chunk.endswith("\""):
if chunk[-1] == ESCAPE:
chunk += stream.read(2)
else:
chunk += stream.read(1)
return chunk
def yield_json(stream):
"""Uses array and object delimiter counts for balancing.
"""
buff = u""
arr_count = 0
obj_count = 0
while True:
buff += read_chunk(stream)
# If we finish parsing all objs or arrays, yield a finished JSON
# entity.
if buff.endswith('{'):
obj_count += 1
if buff.endswith('['):
arr_count += 1
if buff.endswith(']'):
arr_count -= 1
if obj_count == arr_count == 0:
json_item = copy(buff)
buff = u""
yield json_item
if buff.endswith('}'):
obj_count -= 1
if obj_count == arr_count == 0:
json_item = copy(buff)
buff = u""
yield json_item
|
cognitect/transit-python
|
transit/sosjson.py
|
yield_json
|
python
|
def yield_json(stream):
buff = u""
arr_count = 0
obj_count = 0
while True:
buff += read_chunk(stream)
# If we finish parsing all objs or arrays, yield a finished JSON
# entity.
if buff.endswith('{'):
obj_count += 1
if buff.endswith('['):
arr_count += 1
if buff.endswith(']'):
arr_count -= 1
if obj_count == arr_count == 0:
json_item = copy(buff)
buff = u""
yield json_item
if buff.endswith('}'):
obj_count -= 1
if obj_count == arr_count == 0:
json_item = copy(buff)
buff = u""
yield json_item
|
Uses array and object delimiter counts for balancing.
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/sosjson.py#L51-L77
|
[
"def read_chunk(stream):\n \"\"\"Ignore whitespace outside of strings. If we hit a string, read it in\n its entirety.\n \"\"\"\n chunk = stream.read(1)\n while chunk in SKIP:\n chunk = stream.read(1)\n if chunk == \"\\\"\":\n chunk += stream.read(1)\n while not chunk.endswith(\"\\\"\"):\n if chunk[-1] == ESCAPE:\n chunk += stream.read(2)\n else:\n chunk += stream.read(1)\n return chunk\n"
] |
## copyright 2014 cognitect. all rights reserved.
##
## licensed under the apache license, version 2.0 (the "license");
## you may not use this file except in compliance with the license.
## you may obtain a copy of the license at
##
## http://www.apache.org/licenses/license-2.0
##
## unless required by applicable law or agreed to in writing, software
## distributed under the license is distributed on an "as-is" basis,
## without warranties or conditions of any kind, either express or implied.
## see the license for the specific language governing permissions and
## limitations under the license.
# Simple object streaming in Python - just reads one complete JSON object
# at a time and returns json.loads of that string.
# Ugly implementation at moment
from copy import copy
import json
SKIP = [" ", "\n", "\t"]
ESCAPE = "\\"
def read_chunk(stream):
"""Ignore whitespace outside of strings. If we hit a string, read it in
its entirety.
"""
chunk = stream.read(1)
while chunk in SKIP:
chunk = stream.read(1)
if chunk == "\"":
chunk += stream.read(1)
while not chunk.endswith("\""):
if chunk[-1] == ESCAPE:
chunk += stream.read(2)
else:
chunk += stream.read(1)
return chunk
def items(stream, **kwargs):
"""External facing items. Will return item from stream as available.
Currently waits in loop waiting for next item. Can pass keywords that
json.loads accepts (such as object_pairs_hook)
"""
for s in yield_json(stream):
yield json.loads(s, **kwargs)
|
cognitect/transit-python
|
transit/writer.py
|
Marshaler.are_stringable_keys
|
python
|
def are_stringable_keys(self, m):
for x in m.keys():
if len(self.handlers[x].tag(x)) != 1:
return False
return True
|
Test whether the keys within a map are stringable - a simple map,
that can be optimized and whose keys can be cached
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/writer.py#L112-L119
| null |
class Marshaler(object):
"""The base Marshaler from which all Marshalers inherit.
The Marshaler specifies how to emit Transit data given encodeable Python
objects. The end of this process is specialized by other Marshalers to
covert the final result into an on-the-wire payload (JSON or MsgPack).
"""
def __init__(self, opts={}):
self.opts = opts
self._init_handlers()
def _init_handlers(self):
self.handlers = WriteHandler()
def emit_nil(self, _, as_map_key, cache):
return self.emit_string(ESC, "_", None, True, cache) if as_map_key else self.emit_object(None)
def emit_string(self, prefix, tag, string, as_map_key, cache):
encoded = cache.encode(str(prefix)+tag+string, as_map_key)
# TODO: Remove this optimization for the time being - it breaks cache
#if "cache_enabled" in self.opts and is_cacheable(encoded, as_map_key):
# return self.emit_object(cache.value_to_key[encoded], as_map_key)
return self.emit_object(encoded, as_map_key)
def emit_boolean(self, b, as_map_key, cache):
return self.emit_string(ESC, "?", b, True, cache) if as_map_key else self.emit_object(b)
def emit_int(self, tag, i, rep, as_map_key, cache):
if isinstance(rep, int):
if i <= self.opts["max_int"] and i >= self.opts["min_int"]:
return self.emit_object(i, as_map_key)
else:
return self.emit_string(ESC, tag, str(rep), as_map_key, cache)
else:
return self.emit_string(ESC, tag, rep, as_map_key, cache)
def emit_double(self, d, as_map_key, cache):
return self.emit_string(ESC, "d", d, True, cache) if as_map_key else self.emit_object(d)
def emit_array(self, a, _, cache):
self.emit_array_start(len(a))
for x in a:
self.marshal(x, False, cache)
self.emit_array_end()
def emit_map(self, m, _, cache):# use map as object from above, have to overwrite default parser.
self.emit_map_start(len(m))
for k, v in m.items():
self.marshal(k, True, cache)
self.marshal(v, False, cache)
self.emit_map_end()
def emit_cmap(self, m, _, cache):
self.emit_map_start(1)
self.emit_string(ESC, "#", "cmap", True, cache)
self.marshal(flatten_map(m), False, cache)
self.emit_map_end()
def emit_tagged(self, tag, rep, cache):
self.emit_array_start(2)
self.emit_string(ESC, "#", tag, False, cache)
self.marshal(rep, False, cache)
self.emit_array_end()
def emit_encoded(self, tag, handler, obj, as_map_key, cache):
rep = handler.rep(obj)
if len(tag) == 1:
if isinstance(rep, pyversion.string_types):
self.emit_string(ESC, tag, rep, as_map_key, cache)
elif as_map_key or self.opts["prefer_strings"]:
rep = handler.string_rep(obj)
if isinstance(rep, pyversion.string_types):
self.emit_string(ESC, tag, rep, as_map_key, cache)
else:
raise AssertionError("Cannot be encoded as string: " + str({"tag": tag,
"rep": rep,
"obj": obj}))
else:
self.emit_tagged(tag, rep, cache)
elif as_map_key:
raise AssertionError("Cannot be used as a map key: " + str({"tag": tag,
"rep": rep,
"obj": obj}))
else:
self.emit_tagged(tag, rep, cache)
def marshal(self, obj, as_map_key, cache):
"""Marshal an individual obj, potentially as part of another container
object (like a list/dictionary/etc). Specify if this object is a key
to a map/dict, and pass in the current cache being used.
This method should only be called by a top-level marshalling call
and should not be considered an entry-point for integration.
"""
handler = self.handlers[obj]
tag = handler.tag(obj)
f = marshal_dispatch.get(tag)
if f:
f(self, obj, handler.string_rep(obj) if as_map_key else handler.rep(obj), as_map_key, cache)
else:
self.emit_encoded(tag, handler, obj, as_map_key, cache)
def marshal_top(self, obj, cache=None):
"""Given a complete object that needs to be marshaled into Transit
data, and optionally a cache, dispatch accordingly, and flush the data
directly into the IO stream.
"""
if not cache:
cache = RollingCache()
handler = self.handlers[obj]
tag = handler.tag(obj)
if tag:
if len(tag) == 1:
self.marshal(TaggedValue(QUOTE, obj), False, cache)
else:
self.marshal(obj, False, cache)
self.flush()
else:
raise AssertionError("Handler must provide a non-nil tag: " + str(handler))
def dispatch_map(self, rep, as_map_key, cache):
"""Used to determine and dipatch the writing of a map - a simple
map with strings as keys, or a complex map, whose keys are also
compound types.
"""
if self.are_stringable_keys(rep):
return self.emit_map(rep, as_map_key, cache)
return self.emit_cmap(rep, as_map_key, cache)
def register(self, obj_type, handler_class):
"""Register custom converters for object types present in your
application. This allows you to extend Transit to encode new types.
You must specify the obj type to be encoded, and the handler class
that should be used by this marshaller.
"""
self.handlers[obj_type] = handler_class
|
cognitect/transit-python
|
transit/writer.py
|
Marshaler.marshal
|
python
|
def marshal(self, obj, as_map_key, cache):
handler = self.handlers[obj]
tag = handler.tag(obj)
f = marshal_dispatch.get(tag)
if f:
f(self, obj, handler.string_rep(obj) if as_map_key else handler.rep(obj), as_map_key, cache)
else:
self.emit_encoded(tag, handler, obj, as_map_key, cache)
|
Marshal an individual obj, potentially as part of another container
object (like a list/dictionary/etc). Specify if this object is a key
to a map/dict, and pass in the current cache being used.
This method should only be called by a top-level marshalling call
and should not be considered an entry-point for integration.
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/writer.py#L193-L207
|
[
"def emit_encoded(self, tag, handler, obj, as_map_key, cache):\n rep = handler.rep(obj)\n if len(tag) == 1:\n if isinstance(rep, pyversion.string_types):\n self.emit_string(ESC, tag, rep, as_map_key, cache)\n elif as_map_key or self.opts[\"prefer_strings\"]:\n rep = handler.string_rep(obj)\n if isinstance(rep, pyversion.string_types):\n self.emit_string(ESC, tag, rep, as_map_key, cache)\n else:\n raise AssertionError(\"Cannot be encoded as string: \" + str({\"tag\": tag,\n \"rep\": rep,\n \"obj\": obj}))\n else:\n self.emit_tagged(tag, rep, cache)\n elif as_map_key:\n raise AssertionError(\"Cannot be used as a map key: \" + str({\"tag\": tag,\n \"rep\": rep,\n \"obj\": obj}))\n else:\n self.emit_tagged(tag, rep, cache)\n"
] |
class Marshaler(object):
"""The base Marshaler from which all Marshalers inherit.
The Marshaler specifies how to emit Transit data given encodeable Python
objects. The end of this process is specialized by other Marshalers to
covert the final result into an on-the-wire payload (JSON or MsgPack).
"""
def __init__(self, opts={}):
self.opts = opts
self._init_handlers()
def _init_handlers(self):
self.handlers = WriteHandler()
def are_stringable_keys(self, m):
"""Test whether the keys within a map are stringable - a simple map,
that can be optimized and whose keys can be cached
"""
for x in m.keys():
if len(self.handlers[x].tag(x)) != 1:
return False
return True
def emit_nil(self, _, as_map_key, cache):
return self.emit_string(ESC, "_", None, True, cache) if as_map_key else self.emit_object(None)
def emit_string(self, prefix, tag, string, as_map_key, cache):
encoded = cache.encode(str(prefix)+tag+string, as_map_key)
# TODO: Remove this optimization for the time being - it breaks cache
#if "cache_enabled" in self.opts and is_cacheable(encoded, as_map_key):
# return self.emit_object(cache.value_to_key[encoded], as_map_key)
return self.emit_object(encoded, as_map_key)
def emit_boolean(self, b, as_map_key, cache):
return self.emit_string(ESC, "?", b, True, cache) if as_map_key else self.emit_object(b)
def emit_int(self, tag, i, rep, as_map_key, cache):
if isinstance(rep, int):
if i <= self.opts["max_int"] and i >= self.opts["min_int"]:
return self.emit_object(i, as_map_key)
else:
return self.emit_string(ESC, tag, str(rep), as_map_key, cache)
else:
return self.emit_string(ESC, tag, rep, as_map_key, cache)
def emit_double(self, d, as_map_key, cache):
return self.emit_string(ESC, "d", d, True, cache) if as_map_key else self.emit_object(d)
def emit_array(self, a, _, cache):
self.emit_array_start(len(a))
for x in a:
self.marshal(x, False, cache)
self.emit_array_end()
def emit_map(self, m, _, cache):# use map as object from above, have to overwrite default parser.
self.emit_map_start(len(m))
for k, v in m.items():
self.marshal(k, True, cache)
self.marshal(v, False, cache)
self.emit_map_end()
def emit_cmap(self, m, _, cache):
self.emit_map_start(1)
self.emit_string(ESC, "#", "cmap", True, cache)
self.marshal(flatten_map(m), False, cache)
self.emit_map_end()
def emit_tagged(self, tag, rep, cache):
self.emit_array_start(2)
self.emit_string(ESC, "#", tag, False, cache)
self.marshal(rep, False, cache)
self.emit_array_end()
def emit_encoded(self, tag, handler, obj, as_map_key, cache):
rep = handler.rep(obj)
if len(tag) == 1:
if isinstance(rep, pyversion.string_types):
self.emit_string(ESC, tag, rep, as_map_key, cache)
elif as_map_key or self.opts["prefer_strings"]:
rep = handler.string_rep(obj)
if isinstance(rep, pyversion.string_types):
self.emit_string(ESC, tag, rep, as_map_key, cache)
else:
raise AssertionError("Cannot be encoded as string: " + str({"tag": tag,
"rep": rep,
"obj": obj}))
else:
self.emit_tagged(tag, rep, cache)
elif as_map_key:
raise AssertionError("Cannot be used as a map key: " + str({"tag": tag,
"rep": rep,
"obj": obj}))
else:
self.emit_tagged(tag, rep, cache)
def marshal_top(self, obj, cache=None):
"""Given a complete object that needs to be marshaled into Transit
data, and optionally a cache, dispatch accordingly, and flush the data
directly into the IO stream.
"""
if not cache:
cache = RollingCache()
handler = self.handlers[obj]
tag = handler.tag(obj)
if tag:
if len(tag) == 1:
self.marshal(TaggedValue(QUOTE, obj), False, cache)
else:
self.marshal(obj, False, cache)
self.flush()
else:
raise AssertionError("Handler must provide a non-nil tag: " + str(handler))
def dispatch_map(self, rep, as_map_key, cache):
"""Used to determine and dipatch the writing of a map - a simple
map with strings as keys, or a complex map, whose keys are also
compound types.
"""
if self.are_stringable_keys(rep):
return self.emit_map(rep, as_map_key, cache)
return self.emit_cmap(rep, as_map_key, cache)
def register(self, obj_type, handler_class):
"""Register custom converters for object types present in your
application. This allows you to extend Transit to encode new types.
You must specify the obj type to be encoded, and the handler class
that should be used by this marshaller.
"""
self.handlers[obj_type] = handler_class
|
cognitect/transit-python
|
transit/writer.py
|
Marshaler.marshal_top
|
python
|
def marshal_top(self, obj, cache=None):
if not cache:
cache = RollingCache()
handler = self.handlers[obj]
tag = handler.tag(obj)
if tag:
if len(tag) == 1:
self.marshal(TaggedValue(QUOTE, obj), False, cache)
else:
self.marshal(obj, False, cache)
self.flush()
else:
raise AssertionError("Handler must provide a non-nil tag: " + str(handler))
|
Given a complete object that needs to be marshaled into Transit
data, and optionally a cache, dispatch accordingly, and flush the data
directly into the IO stream.
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/writer.py#L209-L227
|
[
"def marshal(self, obj, as_map_key, cache):\n \"\"\"Marshal an individual obj, potentially as part of another container\n object (like a list/dictionary/etc). Specify if this object is a key\n to a map/dict, and pass in the current cache being used.\n This method should only be called by a top-level marshalling call\n and should not be considered an entry-point for integration.\n \"\"\"\n handler = self.handlers[obj]\n tag = handler.tag(obj)\n f = marshal_dispatch.get(tag)\n\n if f:\n f(self, obj, handler.string_rep(obj) if as_map_key else handler.rep(obj), as_map_key, cache)\n else:\n self.emit_encoded(tag, handler, obj, as_map_key, cache)\n"
] |
class Marshaler(object):
"""The base Marshaler from which all Marshalers inherit.
The Marshaler specifies how to emit Transit data given encodeable Python
objects. The end of this process is specialized by other Marshalers to
covert the final result into an on-the-wire payload (JSON or MsgPack).
"""
def __init__(self, opts={}):
self.opts = opts
self._init_handlers()
def _init_handlers(self):
self.handlers = WriteHandler()
def are_stringable_keys(self, m):
"""Test whether the keys within a map are stringable - a simple map,
that can be optimized and whose keys can be cached
"""
for x in m.keys():
if len(self.handlers[x].tag(x)) != 1:
return False
return True
def emit_nil(self, _, as_map_key, cache):
return self.emit_string(ESC, "_", None, True, cache) if as_map_key else self.emit_object(None)
def emit_string(self, prefix, tag, string, as_map_key, cache):
encoded = cache.encode(str(prefix)+tag+string, as_map_key)
# TODO: Remove this optimization for the time being - it breaks cache
#if "cache_enabled" in self.opts and is_cacheable(encoded, as_map_key):
# return self.emit_object(cache.value_to_key[encoded], as_map_key)
return self.emit_object(encoded, as_map_key)
def emit_boolean(self, b, as_map_key, cache):
return self.emit_string(ESC, "?", b, True, cache) if as_map_key else self.emit_object(b)
def emit_int(self, tag, i, rep, as_map_key, cache):
if isinstance(rep, int):
if i <= self.opts["max_int"] and i >= self.opts["min_int"]:
return self.emit_object(i, as_map_key)
else:
return self.emit_string(ESC, tag, str(rep), as_map_key, cache)
else:
return self.emit_string(ESC, tag, rep, as_map_key, cache)
def emit_double(self, d, as_map_key, cache):
return self.emit_string(ESC, "d", d, True, cache) if as_map_key else self.emit_object(d)
def emit_array(self, a, _, cache):
self.emit_array_start(len(a))
for x in a:
self.marshal(x, False, cache)
self.emit_array_end()
def emit_map(self, m, _, cache):# use map as object from above, have to overwrite default parser.
self.emit_map_start(len(m))
for k, v in m.items():
self.marshal(k, True, cache)
self.marshal(v, False, cache)
self.emit_map_end()
def emit_cmap(self, m, _, cache):
self.emit_map_start(1)
self.emit_string(ESC, "#", "cmap", True, cache)
self.marshal(flatten_map(m), False, cache)
self.emit_map_end()
def emit_tagged(self, tag, rep, cache):
self.emit_array_start(2)
self.emit_string(ESC, "#", tag, False, cache)
self.marshal(rep, False, cache)
self.emit_array_end()
def emit_encoded(self, tag, handler, obj, as_map_key, cache):
rep = handler.rep(obj)
if len(tag) == 1:
if isinstance(rep, pyversion.string_types):
self.emit_string(ESC, tag, rep, as_map_key, cache)
elif as_map_key or self.opts["prefer_strings"]:
rep = handler.string_rep(obj)
if isinstance(rep, pyversion.string_types):
self.emit_string(ESC, tag, rep, as_map_key, cache)
else:
raise AssertionError("Cannot be encoded as string: " + str({"tag": tag,
"rep": rep,
"obj": obj}))
else:
self.emit_tagged(tag, rep, cache)
elif as_map_key:
raise AssertionError("Cannot be used as a map key: " + str({"tag": tag,
"rep": rep,
"obj": obj}))
else:
self.emit_tagged(tag, rep, cache)
def marshal(self, obj, as_map_key, cache):
"""Marshal an individual obj, potentially as part of another container
object (like a list/dictionary/etc). Specify if this object is a key
to a map/dict, and pass in the current cache being used.
This method should only be called by a top-level marshalling call
and should not be considered an entry-point for integration.
"""
handler = self.handlers[obj]
tag = handler.tag(obj)
f = marshal_dispatch.get(tag)
if f:
f(self, obj, handler.string_rep(obj) if as_map_key else handler.rep(obj), as_map_key, cache)
else:
self.emit_encoded(tag, handler, obj, as_map_key, cache)
def dispatch_map(self, rep, as_map_key, cache):
"""Used to determine and dipatch the writing of a map - a simple
map with strings as keys, or a complex map, whose keys are also
compound types.
"""
if self.are_stringable_keys(rep):
return self.emit_map(rep, as_map_key, cache)
return self.emit_cmap(rep, as_map_key, cache)
def register(self, obj_type, handler_class):
"""Register custom converters for object types present in your
application. This allows you to extend Transit to encode new types.
You must specify the obj type to be encoded, and the handler class
that should be used by this marshaller.
"""
self.handlers[obj_type] = handler_class
|
cognitect/transit-python
|
transit/writer.py
|
Marshaler.dispatch_map
|
python
|
def dispatch_map(self, rep, as_map_key, cache):
if self.are_stringable_keys(rep):
return self.emit_map(rep, as_map_key, cache)
return self.emit_cmap(rep, as_map_key, cache)
|
Used to determine and dipatch the writing of a map - a simple
map with strings as keys, or a complex map, whose keys are also
compound types.
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/writer.py#L229-L236
|
[
"def are_stringable_keys(self, m):\n \"\"\"Test whether the keys within a map are stringable - a simple map,\n that can be optimized and whose keys can be cached\n \"\"\"\n for x in m.keys():\n if len(self.handlers[x].tag(x)) != 1:\n return False\n return True\n"
] |
class Marshaler(object):
"""The base Marshaler from which all Marshalers inherit.
The Marshaler specifies how to emit Transit data given encodeable Python
objects. The end of this process is specialized by other Marshalers to
covert the final result into an on-the-wire payload (JSON or MsgPack).
"""
def __init__(self, opts={}):
self.opts = opts
self._init_handlers()
def _init_handlers(self):
self.handlers = WriteHandler()
def are_stringable_keys(self, m):
"""Test whether the keys within a map are stringable - a simple map,
that can be optimized and whose keys can be cached
"""
for x in m.keys():
if len(self.handlers[x].tag(x)) != 1:
return False
return True
def emit_nil(self, _, as_map_key, cache):
return self.emit_string(ESC, "_", None, True, cache) if as_map_key else self.emit_object(None)
def emit_string(self, prefix, tag, string, as_map_key, cache):
encoded = cache.encode(str(prefix)+tag+string, as_map_key)
# TODO: Remove this optimization for the time being - it breaks cache
#if "cache_enabled" in self.opts and is_cacheable(encoded, as_map_key):
# return self.emit_object(cache.value_to_key[encoded], as_map_key)
return self.emit_object(encoded, as_map_key)
def emit_boolean(self, b, as_map_key, cache):
return self.emit_string(ESC, "?", b, True, cache) if as_map_key else self.emit_object(b)
def emit_int(self, tag, i, rep, as_map_key, cache):
if isinstance(rep, int):
if i <= self.opts["max_int"] and i >= self.opts["min_int"]:
return self.emit_object(i, as_map_key)
else:
return self.emit_string(ESC, tag, str(rep), as_map_key, cache)
else:
return self.emit_string(ESC, tag, rep, as_map_key, cache)
def emit_double(self, d, as_map_key, cache):
return self.emit_string(ESC, "d", d, True, cache) if as_map_key else self.emit_object(d)
def emit_array(self, a, _, cache):
self.emit_array_start(len(a))
for x in a:
self.marshal(x, False, cache)
self.emit_array_end()
def emit_map(self, m, _, cache):# use map as object from above, have to overwrite default parser.
self.emit_map_start(len(m))
for k, v in m.items():
self.marshal(k, True, cache)
self.marshal(v, False, cache)
self.emit_map_end()
def emit_cmap(self, m, _, cache):
self.emit_map_start(1)
self.emit_string(ESC, "#", "cmap", True, cache)
self.marshal(flatten_map(m), False, cache)
self.emit_map_end()
def emit_tagged(self, tag, rep, cache):
self.emit_array_start(2)
self.emit_string(ESC, "#", tag, False, cache)
self.marshal(rep, False, cache)
self.emit_array_end()
def emit_encoded(self, tag, handler, obj, as_map_key, cache):
rep = handler.rep(obj)
if len(tag) == 1:
if isinstance(rep, pyversion.string_types):
self.emit_string(ESC, tag, rep, as_map_key, cache)
elif as_map_key or self.opts["prefer_strings"]:
rep = handler.string_rep(obj)
if isinstance(rep, pyversion.string_types):
self.emit_string(ESC, tag, rep, as_map_key, cache)
else:
raise AssertionError("Cannot be encoded as string: " + str({"tag": tag,
"rep": rep,
"obj": obj}))
else:
self.emit_tagged(tag, rep, cache)
elif as_map_key:
raise AssertionError("Cannot be used as a map key: " + str({"tag": tag,
"rep": rep,
"obj": obj}))
else:
self.emit_tagged(tag, rep, cache)
def marshal(self, obj, as_map_key, cache):
"""Marshal an individual obj, potentially as part of another container
object (like a list/dictionary/etc). Specify if this object is a key
to a map/dict, and pass in the current cache being used.
This method should only be called by a top-level marshalling call
and should not be considered an entry-point for integration.
"""
handler = self.handlers[obj]
tag = handler.tag(obj)
f = marshal_dispatch.get(tag)
if f:
f(self, obj, handler.string_rep(obj) if as_map_key else handler.rep(obj), as_map_key, cache)
else:
self.emit_encoded(tag, handler, obj, as_map_key, cache)
def marshal_top(self, obj, cache=None):
"""Given a complete object that needs to be marshaled into Transit
data, and optionally a cache, dispatch accordingly, and flush the data
directly into the IO stream.
"""
if not cache:
cache = RollingCache()
handler = self.handlers[obj]
tag = handler.tag(obj)
if tag:
if len(tag) == 1:
self.marshal(TaggedValue(QUOTE, obj), False, cache)
else:
self.marshal(obj, False, cache)
self.flush()
else:
raise AssertionError("Handler must provide a non-nil tag: " + str(handler))
def register(self, obj_type, handler_class):
"""Register custom converters for object types present in your
application. This allows you to extend Transit to encode new types.
You must specify the obj type to be encoded, and the handler class
that should be used by this marshaller.
"""
self.handlers[obj_type] = handler_class
|
cognitect/transit-python
|
transit/writer.py
|
JsonMarshaler.emit_map
|
python
|
def emit_map(self, m, _, cache):
self.emit_array_start(None)
self.marshal(MAP_AS_ARR, False, cache)
for k, v in m.items():
self.marshal(k, True, cache)
self.marshal(v, False, cache)
self.emit_array_end()
|
Emits array as per default JSON spec.
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/writer.py#L353-L360
|
[
"def marshal(self, obj, as_map_key, cache):\n \"\"\"Marshal an individual obj, potentially as part of another container\n object (like a list/dictionary/etc). Specify if this object is a key\n to a map/dict, and pass in the current cache being used.\n This method should only be called by a top-level marshalling call\n and should not be considered an entry-point for integration.\n \"\"\"\n handler = self.handlers[obj]\n tag = handler.tag(obj)\n f = marshal_dispatch.get(tag)\n\n if f:\n f(self, obj, handler.string_rep(obj) if as_map_key else handler.rep(obj), as_map_key, cache)\n else:\n self.emit_encoded(tag, handler, obj, as_map_key, cache)\n",
"def emit_array_start(self, size):\n self.write_sep()\n self.io.write(u\"[\")\n self.push_level()\n",
"def emit_array_end(self):\n self.pop_level()\n self.io.write(u\"]\")\n"
] |
class JsonMarshaler(Marshaler):
"""The Marshaler tailor to JSON. To use this Marshaler, specify the
'json' protocol when creating a Writer.
"""
JSON_MAX_INT = pow(2, 53) - 1
JSON_MIN_INT = -pow(2, 53) + 1
default_opts = {"prefer_strings": True,
"max_int": JSON_MAX_INT,
"min_int": JSON_MIN_INT}
def __init__(self, io, opts={}):
self.io = io
nopts = JsonMarshaler.default_opts.copy()
nopts.update(opts)
self.started = [True]
self.is_key = [None]
Marshaler.__init__(self, nopts)
self.flush = self.io.flush
def push_level(self):
self.started.append(True)
self.is_key.append(None)
def pop_level(self):
self.started.pop()
self.is_key.pop()
def push_map(self):
self.started.append(True)
self.is_key.append(True)
def write_sep(self):
if self.started[-1]:
self.started[-1] = False
else:
last = self.is_key[-1]
if last:
self.io.write(u":")
self.is_key[-1] = False
elif last is False:
self.io.write(u",")
self.is_key[-1] = True
else:
self.io.write(u",")
def emit_array_start(self, size):
self.write_sep()
self.io.write(u"[")
self.push_level()
def emit_array_end(self):
self.pop_level()
self.io.write(u"]")
def emit_map_start(self, size):
self.write_sep()
self.io.write(u"{")
self.push_map()
def emit_map_end(self):
self.pop_level()
self.io.write(u"}")
def emit_object(self, obj, as_map_key=False):
tp = type(obj)
self.write_sep()
if tp in pyversion.string_types:
self.io.write(u"\"")
self.io.write(u"".join([(ESCAPE_DCT[c]) if c in ESCAPE_DCT else c for c in obj]))
self.io.write(u"\"")
elif pyversion.isnumber_type(tp):
self.io.write(pyversion.unicode_type(obj))
elif tp is bool:
self.io.write(u"true" if obj else u"false")
elif obj is None:
self.io.write(u"null")
else:
raise AssertionError("Don't know how to encode: " + str(obj) + " of type: " + str(type(obj)))
|
cognitect/transit-python
|
transit/decoder.py
|
Decoder.decode
|
python
|
def decode(self, node, cache=None, as_map_key=False):
if not cache:
cache = RollingCache()
return self._decode(node, cache, as_map_key)
|
Given a node of data (any supported decodeable obj - string, dict,
list), return the decoded object. Optionally set the current decode
cache [None]. If None, a new RollingCache is instantiated and used.
You may also hit to the decoder that this node is to be treated as a
map key [False]. This is used internally.
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/decoder.py#L73-L82
|
[
"def _decode(self, node, cache, as_map_key):\n tp = type(node)\n if tp is pyversion.unicode_type:\n return self.decode_string(node, cache, as_map_key)\n elif tp is bytes:\n return self.decode_string(node.decode(\"utf-8\"), cache, as_map_key)\n elif tp is dict or tp is OrderedDict:\n return self.decode_hash(node, cache, as_map_key)\n elif tp is list:\n return self.decode_list(node, cache, as_map_key)\n elif tp is str:\n return self.decode_string(unicode(node, \"utf-8\"), cache, as_map_key)\n elif tp is bool:\n return true if node else false\n return node\n"
] |
class Decoder(object):
"""The Decoder is the lowest level entry point for parsing, decoding, and
fully converting Transit data into Python objects.
During the creation of a Decoder object, you can specify custom options
in a dictionary. One such option is 'decoders'. Note that while you
can specify your own decoders and override many of the built in decoders,
some decoders are silently enforced and cannot be overriden. These are
known as Ground Decoders, and are needed to maintain bottom-tier
compatibility.
"""
def __init__(self, options={}):
self.options = default_options.copy()
self.options.update(options)
self.decoders = self.options["decoders"]
# Always ensure we control the ground decoders
self.decoders.update(ground_decoders)
def _decode(self, node, cache, as_map_key):
tp = type(node)
if tp is pyversion.unicode_type:
return self.decode_string(node, cache, as_map_key)
elif tp is bytes:
return self.decode_string(node.decode("utf-8"), cache, as_map_key)
elif tp is dict or tp is OrderedDict:
return self.decode_hash(node, cache, as_map_key)
elif tp is list:
return self.decode_list(node, cache, as_map_key)
elif tp is str:
return self.decode_string(unicode(node, "utf-8"), cache, as_map_key)
elif tp is bool:
return true if node else false
return node
def decode_list(self, node, cache, as_map_key):
"""Special case decodes map-as-array.
Otherwise lists are treated as Python lists.
Arguments follow the same convention as the top-level 'decode'
function.
"""
if node:
if node[0] == MAP_AS_ARR:
# key must be decoded before value for caching to work.
returned_dict = {}
for k, v in pairs(node[1:]):
key = self._decode(k, cache, True)
val = self._decode(v, cache, as_map_key)
returned_dict[key] = val
return transit_types.frozendict(returned_dict)
decoded = self._decode(node[0], cache, as_map_key)
if isinstance(decoded, Tag):
return self.decode_tag(decoded.tag,
self._decode(node[1], cache, as_map_key))
return tuple(self._decode(x, cache, as_map_key) for x in node)
def decode_string(self, string, cache, as_map_key):
"""Decode a string - arguments follow the same convention as the
top-level 'decode' function.
"""
if is_cache_key(string):
return self.parse_string(cache.decode(string, as_map_key),
cache, as_map_key)
if is_cacheable(string, as_map_key):
cache.encode(string, as_map_key)
return self.parse_string(string, cache, as_map_key)
def decode_tag(self, tag, rep):
decoder = self.decoders.get(tag, None)
if decoder:
return decoder.from_rep(rep)
else:
return self.options["default_decoder"].from_rep(tag, rep)
def decode_hash(self, hash, cache, as_map_key):
if len(hash) != 1:
h = {}
for k, v in hash.items():
# crude/verbose implementation, but this is only version that
# plays nice w/cache for both msgpack and json thus far.
# -- e.g., we have to specify encode/decode order for key/val
# -- explicitly, all implicit ordering has broken in corner
# -- cases, thus these extraneous seeming assignments
key = self._decode(k, cache, True)
val = self._decode(v, cache, False)
h[key] = val
return transit_types.frozendict(h)
else:
key = list(hash)[0]
value = hash[key]
key = self._decode(key, cache, True)
if isinstance(key, Tag):
return self.decode_tag(key.tag,
self._decode(value, cache, as_map_key))
return transit_types.frozendict({key: self._decode(value, cache, False)})
def parse_string(self, string, cache, as_map_key):
if string.startswith(ESC):
m = string[1]
if m in self.decoders:
return self.decoders[m].from_rep(string[2:])
elif m == ESC or m == SUB or m == RES:
return string[1:]
elif m == "#":
return Tag(string[2:])
else:
return self.options["default_decoder"].from_rep(string[1],
string[2:])
return string
def register(self, key_or_tag, obj):
"""Register a custom Transit tag and new parsing function with the
decoder. Also, you can optionally set the 'default_decoder' with
this function. Your new tag and parse/decode function will be added
to the interal dictionary of decoders for this Decoder object.
"""
if key_or_tag == "default_decoder":
self.options["default_decoder"] = obj
else:
self.decoders[key_or_tag] = obj
|
cognitect/transit-python
|
transit/decoder.py
|
Decoder.decode_list
|
python
|
def decode_list(self, node, cache, as_map_key):
if node:
if node[0] == MAP_AS_ARR:
# key must be decoded before value for caching to work.
returned_dict = {}
for k, v in pairs(node[1:]):
key = self._decode(k, cache, True)
val = self._decode(v, cache, as_map_key)
returned_dict[key] = val
return transit_types.frozendict(returned_dict)
decoded = self._decode(node[0], cache, as_map_key)
if isinstance(decoded, Tag):
return self.decode_tag(decoded.tag,
self._decode(node[1], cache, as_map_key))
return tuple(self._decode(x, cache, as_map_key) for x in node)
|
Special case decodes map-as-array.
Otherwise lists are treated as Python lists.
Arguments follow the same convention as the top-level 'decode'
function.
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/decoder.py#L100-L121
|
[
"def pairs(i):\n return izip(*[iter(i)] * 2)\n",
"def _decode(self, node, cache, as_map_key):\n tp = type(node)\n if tp is pyversion.unicode_type:\n return self.decode_string(node, cache, as_map_key)\n elif tp is bytes:\n return self.decode_string(node.decode(\"utf-8\"), cache, as_map_key)\n elif tp is dict or tp is OrderedDict:\n return self.decode_hash(node, cache, as_map_key)\n elif tp is list:\n return self.decode_list(node, cache, as_map_key)\n elif tp is str:\n return self.decode_string(unicode(node, \"utf-8\"), cache, as_map_key)\n elif tp is bool:\n return true if node else false\n return node\n",
"def decode_tag(self, tag, rep):\n decoder = self.decoders.get(tag, None)\n if decoder:\n return decoder.from_rep(rep)\n else:\n return self.options[\"default_decoder\"].from_rep(tag, rep)\n"
] |
class Decoder(object):
"""The Decoder is the lowest level entry point for parsing, decoding, and
fully converting Transit data into Python objects.
During the creation of a Decoder object, you can specify custom options
in a dictionary. One such option is 'decoders'. Note that while you
can specify your own decoders and override many of the built in decoders,
some decoders are silently enforced and cannot be overriden. These are
known as Ground Decoders, and are needed to maintain bottom-tier
compatibility.
"""
def __init__(self, options={}):
self.options = default_options.copy()
self.options.update(options)
self.decoders = self.options["decoders"]
# Always ensure we control the ground decoders
self.decoders.update(ground_decoders)
def decode(self, node, cache=None, as_map_key=False):
"""Given a node of data (any supported decodeable obj - string, dict,
list), return the decoded object. Optionally set the current decode
cache [None]. If None, a new RollingCache is instantiated and used.
You may also hit to the decoder that this node is to be treated as a
map key [False]. This is used internally.
"""
if not cache:
cache = RollingCache()
return self._decode(node, cache, as_map_key)
def _decode(self, node, cache, as_map_key):
tp = type(node)
if tp is pyversion.unicode_type:
return self.decode_string(node, cache, as_map_key)
elif tp is bytes:
return self.decode_string(node.decode("utf-8"), cache, as_map_key)
elif tp is dict or tp is OrderedDict:
return self.decode_hash(node, cache, as_map_key)
elif tp is list:
return self.decode_list(node, cache, as_map_key)
elif tp is str:
return self.decode_string(unicode(node, "utf-8"), cache, as_map_key)
elif tp is bool:
return true if node else false
return node
def decode_string(self, string, cache, as_map_key):
"""Decode a string - arguments follow the same convention as the
top-level 'decode' function.
"""
if is_cache_key(string):
return self.parse_string(cache.decode(string, as_map_key),
cache, as_map_key)
if is_cacheable(string, as_map_key):
cache.encode(string, as_map_key)
return self.parse_string(string, cache, as_map_key)
def decode_tag(self, tag, rep):
decoder = self.decoders.get(tag, None)
if decoder:
return decoder.from_rep(rep)
else:
return self.options["default_decoder"].from_rep(tag, rep)
def decode_hash(self, hash, cache, as_map_key):
if len(hash) != 1:
h = {}
for k, v in hash.items():
# crude/verbose implementation, but this is only version that
# plays nice w/cache for both msgpack and json thus far.
# -- e.g., we have to specify encode/decode order for key/val
# -- explicitly, all implicit ordering has broken in corner
# -- cases, thus these extraneous seeming assignments
key = self._decode(k, cache, True)
val = self._decode(v, cache, False)
h[key] = val
return transit_types.frozendict(h)
else:
key = list(hash)[0]
value = hash[key]
key = self._decode(key, cache, True)
if isinstance(key, Tag):
return self.decode_tag(key.tag,
self._decode(value, cache, as_map_key))
return transit_types.frozendict({key: self._decode(value, cache, False)})
def parse_string(self, string, cache, as_map_key):
if string.startswith(ESC):
m = string[1]
if m in self.decoders:
return self.decoders[m].from_rep(string[2:])
elif m == ESC or m == SUB or m == RES:
return string[1:]
elif m == "#":
return Tag(string[2:])
else:
return self.options["default_decoder"].from_rep(string[1],
string[2:])
return string
def register(self, key_or_tag, obj):
"""Register a custom Transit tag and new parsing function with the
decoder. Also, you can optionally set the 'default_decoder' with
this function. Your new tag and parse/decode function will be added
to the interal dictionary of decoders for this Decoder object.
"""
if key_or_tag == "default_decoder":
self.options["default_decoder"] = obj
else:
self.decoders[key_or_tag] = obj
|
cognitect/transit-python
|
transit/decoder.py
|
Decoder.decode_string
|
python
|
def decode_string(self, string, cache, as_map_key):
if is_cache_key(string):
return self.parse_string(cache.decode(string, as_map_key),
cache, as_map_key)
if is_cacheable(string, as_map_key):
cache.encode(string, as_map_key)
return self.parse_string(string, cache, as_map_key)
|
Decode a string - arguments follow the same convention as the
top-level 'decode' function.
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/decoder.py#L123-L132
|
[
"def is_cache_key(name):\n return len(name) and (name[0] == SUB and name != MAP_AS_ARR)\n"
] |
class Decoder(object):
"""The Decoder is the lowest level entry point for parsing, decoding, and
fully converting Transit data into Python objects.
During the creation of a Decoder object, you can specify custom options
in a dictionary. One such option is 'decoders'. Note that while you
can specify your own decoders and override many of the built in decoders,
some decoders are silently enforced and cannot be overriden. These are
known as Ground Decoders, and are needed to maintain bottom-tier
compatibility.
"""
def __init__(self, options={}):
self.options = default_options.copy()
self.options.update(options)
self.decoders = self.options["decoders"]
# Always ensure we control the ground decoders
self.decoders.update(ground_decoders)
def decode(self, node, cache=None, as_map_key=False):
"""Given a node of data (any supported decodeable obj - string, dict,
list), return the decoded object. Optionally set the current decode
cache [None]. If None, a new RollingCache is instantiated and used.
You may also hit to the decoder that this node is to be treated as a
map key [False]. This is used internally.
"""
if not cache:
cache = RollingCache()
return self._decode(node, cache, as_map_key)
def _decode(self, node, cache, as_map_key):
tp = type(node)
if tp is pyversion.unicode_type:
return self.decode_string(node, cache, as_map_key)
elif tp is bytes:
return self.decode_string(node.decode("utf-8"), cache, as_map_key)
elif tp is dict or tp is OrderedDict:
return self.decode_hash(node, cache, as_map_key)
elif tp is list:
return self.decode_list(node, cache, as_map_key)
elif tp is str:
return self.decode_string(unicode(node, "utf-8"), cache, as_map_key)
elif tp is bool:
return true if node else false
return node
def decode_list(self, node, cache, as_map_key):
"""Special case decodes map-as-array.
Otherwise lists are treated as Python lists.
Arguments follow the same convention as the top-level 'decode'
function.
"""
if node:
if node[0] == MAP_AS_ARR:
# key must be decoded before value for caching to work.
returned_dict = {}
for k, v in pairs(node[1:]):
key = self._decode(k, cache, True)
val = self._decode(v, cache, as_map_key)
returned_dict[key] = val
return transit_types.frozendict(returned_dict)
decoded = self._decode(node[0], cache, as_map_key)
if isinstance(decoded, Tag):
return self.decode_tag(decoded.tag,
self._decode(node[1], cache, as_map_key))
return tuple(self._decode(x, cache, as_map_key) for x in node)
def decode_tag(self, tag, rep):
decoder = self.decoders.get(tag, None)
if decoder:
return decoder.from_rep(rep)
else:
return self.options["default_decoder"].from_rep(tag, rep)
def decode_hash(self, hash, cache, as_map_key):
if len(hash) != 1:
h = {}
for k, v in hash.items():
# crude/verbose implementation, but this is only version that
# plays nice w/cache for both msgpack and json thus far.
# -- e.g., we have to specify encode/decode order for key/val
# -- explicitly, all implicit ordering has broken in corner
# -- cases, thus these extraneous seeming assignments
key = self._decode(k, cache, True)
val = self._decode(v, cache, False)
h[key] = val
return transit_types.frozendict(h)
else:
key = list(hash)[0]
value = hash[key]
key = self._decode(key, cache, True)
if isinstance(key, Tag):
return self.decode_tag(key.tag,
self._decode(value, cache, as_map_key))
return transit_types.frozendict({key: self._decode(value, cache, False)})
def parse_string(self, string, cache, as_map_key):
if string.startswith(ESC):
m = string[1]
if m in self.decoders:
return self.decoders[m].from_rep(string[2:])
elif m == ESC or m == SUB or m == RES:
return string[1:]
elif m == "#":
return Tag(string[2:])
else:
return self.options["default_decoder"].from_rep(string[1],
string[2:])
return string
def register(self, key_or_tag, obj):
"""Register a custom Transit tag and new parsing function with the
decoder. Also, you can optionally set the 'default_decoder' with
this function. Your new tag and parse/decode function will be added
to the interal dictionary of decoders for this Decoder object.
"""
if key_or_tag == "default_decoder":
self.options["default_decoder"] = obj
else:
self.decoders[key_or_tag] = obj
|
cognitect/transit-python
|
transit/decoder.py
|
Decoder.register
|
python
|
def register(self, key_or_tag, obj):
if key_or_tag == "default_decoder":
self.options["default_decoder"] = obj
else:
self.decoders[key_or_tag] = obj
|
Register a custom Transit tag and new parsing function with the
decoder. Also, you can optionally set the 'default_decoder' with
this function. Your new tag and parse/decode function will be added
to the interal dictionary of decoders for this Decoder object.
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/decoder.py#L177-L186
| null |
class Decoder(object):
"""The Decoder is the lowest level entry point for parsing, decoding, and
fully converting Transit data into Python objects.
During the creation of a Decoder object, you can specify custom options
in a dictionary. One such option is 'decoders'. Note that while you
can specify your own decoders and override many of the built in decoders,
some decoders are silently enforced and cannot be overriden. These are
known as Ground Decoders, and are needed to maintain bottom-tier
compatibility.
"""
def __init__(self, options={}):
self.options = default_options.copy()
self.options.update(options)
self.decoders = self.options["decoders"]
# Always ensure we control the ground decoders
self.decoders.update(ground_decoders)
def decode(self, node, cache=None, as_map_key=False):
"""Given a node of data (any supported decodeable obj - string, dict,
list), return the decoded object. Optionally set the current decode
cache [None]. If None, a new RollingCache is instantiated and used.
You may also hit to the decoder that this node is to be treated as a
map key [False]. This is used internally.
"""
if not cache:
cache = RollingCache()
return self._decode(node, cache, as_map_key)
def _decode(self, node, cache, as_map_key):
tp = type(node)
if tp is pyversion.unicode_type:
return self.decode_string(node, cache, as_map_key)
elif tp is bytes:
return self.decode_string(node.decode("utf-8"), cache, as_map_key)
elif tp is dict or tp is OrderedDict:
return self.decode_hash(node, cache, as_map_key)
elif tp is list:
return self.decode_list(node, cache, as_map_key)
elif tp is str:
return self.decode_string(unicode(node, "utf-8"), cache, as_map_key)
elif tp is bool:
return true if node else false
return node
def decode_list(self, node, cache, as_map_key):
"""Special case decodes map-as-array.
Otherwise lists are treated as Python lists.
Arguments follow the same convention as the top-level 'decode'
function.
"""
if node:
if node[0] == MAP_AS_ARR:
# key must be decoded before value for caching to work.
returned_dict = {}
for k, v in pairs(node[1:]):
key = self._decode(k, cache, True)
val = self._decode(v, cache, as_map_key)
returned_dict[key] = val
return transit_types.frozendict(returned_dict)
decoded = self._decode(node[0], cache, as_map_key)
if isinstance(decoded, Tag):
return self.decode_tag(decoded.tag,
self._decode(node[1], cache, as_map_key))
return tuple(self._decode(x, cache, as_map_key) for x in node)
def decode_string(self, string, cache, as_map_key):
"""Decode a string - arguments follow the same convention as the
top-level 'decode' function.
"""
if is_cache_key(string):
return self.parse_string(cache.decode(string, as_map_key),
cache, as_map_key)
if is_cacheable(string, as_map_key):
cache.encode(string, as_map_key)
return self.parse_string(string, cache, as_map_key)
def decode_tag(self, tag, rep):
decoder = self.decoders.get(tag, None)
if decoder:
return decoder.from_rep(rep)
else:
return self.options["default_decoder"].from_rep(tag, rep)
def decode_hash(self, hash, cache, as_map_key):
if len(hash) != 1:
h = {}
for k, v in hash.items():
# crude/verbose implementation, but this is only version that
# plays nice w/cache for both msgpack and json thus far.
# -- e.g., we have to specify encode/decode order for key/val
# -- explicitly, all implicit ordering has broken in corner
# -- cases, thus these extraneous seeming assignments
key = self._decode(k, cache, True)
val = self._decode(v, cache, False)
h[key] = val
return transit_types.frozendict(h)
else:
key = list(hash)[0]
value = hash[key]
key = self._decode(key, cache, True)
if isinstance(key, Tag):
return self.decode_tag(key.tag,
self._decode(value, cache, as_map_key))
return transit_types.frozendict({key: self._decode(value, cache, False)})
def parse_string(self, string, cache, as_map_key):
if string.startswith(ESC):
m = string[1]
if m in self.decoders:
return self.decoders[m].from_rep(string[2:])
elif m == ESC or m == SUB or m == RES:
return string[1:]
elif m == "#":
return Tag(string[2:])
else:
return self.options["default_decoder"].from_rep(string[1],
string[2:])
return string
|
cognitect/transit-python
|
transit/read_handlers.py
|
UuidHandler.from_rep
|
python
|
def from_rep(u):
if isinstance(u, pyversion.string_types):
return uuid.UUID(u)
# hack to remove signs
a = ctypes.c_ulong(u[0])
b = ctypes.c_ulong(u[1])
combined = a.value << 64 | b.value
return uuid.UUID(int=combined)
|
Given a string, return a UUID object.
|
train
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/read_handlers.py#L78-L87
| null |
class UuidHandler(object):
@staticmethod
|
bwesterb/py-seccure
|
src/__init__.py
|
serialize_number
|
python
|
def serialize_number(x, fmt=SER_BINARY, outlen=None):
ret = b''
if fmt == SER_BINARY:
while x:
x, r = divmod(x, 256)
ret = six.int2byte(int(r)) + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, b'\0')
return ret
assert fmt == SER_COMPACT
while x:
x, r = divmod(x, len(COMPACT_DIGITS))
ret = COMPACT_DIGITS[r:r + 1] + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])
return ret
|
Serializes `x' to a string of length `outlen' in format `fmt'
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L57-L75
| null |
""" Elliptic Curve cryptography compatible with SECCURE:
http://point-at-infinity.org/seccure/ """
import hmac
import hashlib
import logging
import binascii
import contextlib
import collections
from ._version import __version__ # noqa: F401
# PyCrypto
import Crypto.Util
import Crypto.Cipher.AES
import Crypto.Random.random
# gmpy
import gmpy
# six
import six
# TODO replace with six.byte2int, when it is released
if six.PY3:
from io import BytesIO as BytesIO
def byte2int(b): return b
def stringlike(x): return isinstance(x, (str, bytes))
else:
from cStringIO import StringIO as BytesIO
def byte2int(b): return ord(b)
def stringlike(x): return isinstance(x, basestring)
l = logging.getLogger(__name__)
class IntegrityError(ValueError):
pass
# Serialization of numbers
# #########################################################
SER_COMPACT = 0
SER_BINARY = 1
COMPACT_DIGITS = (b'!#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'[]^_abcdefghijklmnopqrstuvwxyz{|}~')
R_COMPACT_DIGITS = {} # TODO is a tuple/list faster?
for i, c in enumerate(COMPACT_DIGITS):
R_COMPACT_DIGITS[c] = i
def deserialize_number(s, fmt=SER_BINARY):
""" Deserializes a number from a string `s' in format `fmt' """
ret = gmpy.mpz(0)
if fmt == SER_BINARY:
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
for c in s:
ret *= 256
ret += byte2int(c)
return ret
assert fmt == SER_COMPACT
if isinstance(s, six.text_type):
s = s.encode('ascii')
for c in s:
ret *= len(COMPACT_DIGITS)
ret += R_COMPACT_DIGITS[c]
return ret
def get_serialized_number_len(x, fmt=SER_BINARY):
if fmt == SER_BINARY:
return (x.numdigits(2) + 7) // 8
assert fmt == SER_COMPACT
res = 0
while x != 0:
x = x // len(COMPACT_DIGITS)
res += 1
return res
# Some modular arithmetic
# #########################################################
def mod_issquare(a, p):
""" Returns whether `a' is a square modulo p """
if not a:
return True
p1 = p // 2
p2 = pow(a, p1, p)
return p2 == 1
def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x
# Raw curve parameters
# #########################################################
raw_curve_parameters = collections.namedtuple(
'raw_curve_parameters',
('name',
'a',
'b',
'm',
'base_x',
'base_y',
'order',
'cofactor',
'pk_len_compact'))
RAW_CURVES = (
("secp112r1",
b"db7c2abf62e35e668076bead2088",
b"659ef8ba043916eede8911702b22",
b"db7c2abf62e35e668076bead208b",
b"09487239995a5ee76b55f9c2f098",
b"a89ce5af8724c0a23e0e0ff77500",
b"db7c2abf62e35e7628dfac6561c5", 1, 18),
("secp128r1",
b"fffffffdfffffffffffffffffffffffc",
b"e87579c11079f43dd824993c2cee5ed3",
b"fffffffdffffffffffffffffffffffff",
b"161ff7528b899b2d0c28607ca52c5b86",
b"cf5ac8395bafeb13c02da292dded7a83",
b"fffffffe0000000075a30d1b9038a115", 1, 20),
("secp160r1",
b"ffffffffffffffffffffffffffffffff7ffffffc",
b"1c97befc54bd7a8b65acf89f81d4d4adc565fa45",
b"ffffffffffffffffffffffffffffffff7fffffff",
b"4a96b5688ef573284664698968c38bb913cbfc82",
b"23a628553168947d59dcc912042351377ac5fb32",
b"0100000000000000000001f4c8f927aed3ca752257", 1, 25),
("secp192r1/nistp192",
b"fffffffffffffffffffffffffffffffefffffffffffffffc",
b"64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1",
b"fffffffffffffffffffffffffffffffeffffffffffffffff",
b"188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012",
b"07192b95ffc8da78631011ed6b24cdd573f977a11e794811",
b"ffffffffffffffffffffffff99def836146bc9b1b4d22831", 1, 30),
("secp224r1/nistp224",
b"fffffffffffffffffffffffffffffffefffffffffffffffffffffffe",
b"b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4",
b"ffffffffffffffffffffffffffffffff000000000000000000000001",
b"b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
b"bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34",
b"ffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", 1, 35),
("secp256r1/nistp256",
b"ffffffff00000001000000000000000000000000fffffffffffffffffffffffc",
b"5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b",
b"ffffffff00000001000000000000000000000000ffffffffffffffffffffffff",
b"6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
b"4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5",
b"ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
1, 40),
("secp384r1/nistp384",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000fffffffc",
b"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875a"
b"c656398d8a2ed19d2a85c8edd3ec2aef",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000ffffffff",
b"aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a38"
b"5502f25dbf55296c3a545e3872760ab7",
b"3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c0"
b"0a60b1ce1d7e819d7a431d7c90ea0e5f",
b"ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf"
b"581a0db248b0a77aecec196accc52973", 1, 60),
("secp521r1/nistp521",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffffffc",
b"0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef1"
b"09e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd4"
b"6b503f00",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffff",
b"00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d"
b"3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31"
b"c2e5bd66",
b"011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e"
b"662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be9476"
b"9fd16650",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e"
b"91386409", 1, 81),
("brainpoolp160r1",
b"340e7be2a280eb74e2be61bada745d97e8f7c300",
b"1e589a8595423412134faa2dbdec95c8d8675e58",
b"e95e4a5f737059dc60dfc7ad95b3d8139515620f",
b"bed5af16ea3f6a4f62938c4631eb5af7bdbcdbc3",
b"1667cb477a1a8ec338f94741669c976316da6321",
b"e95e4a5f737059dc60df5991d45029409e60fc09", 1, 25),
("brainpoolp192r1",
b"6a91174076b1e0e19c39c031fe8685c1cae040e5c69a28ef",
b"469a28ef7c28cca3dc721d044f4496bcca7ef4146fbf25c9",
b"c302f41d932a36cda7a3463093d18db78fce476de1a86297",
b"c0a0647eaab6a48753b033c56cb0f0900a2f5c4853375fd6",
b"14b690866abd5bb88b5f4828c1490002e6773fa2fa299b8f",
b"c302f41d932a36cda7a3462f9e9e916b5be8f1029ac4acc1", 1, 30),
("brainpoolp224r1",
b"68a5e62ca9ce6c1c299803a6c1530b514e182ad8b0042a59cad29f43",
b"2580f63ccfe44138870713b1a92369e33e2135d266dbb372386c400b",
b"d7c134aa264366862a18302575d1d787b09f075797da89f57ec8c0ff",
b"0d9029ad2c7e5cf4340823b2a87dc68c9e4ce3174c1e6efdee12c07d",
b"58aa56f772c0726f24c6b89e4ecdac24354b9e99caa3f6d3761402cd",
b"d7c134aa264366862a18302575d0fb98d116bc4b6ddebca3a5a7939f",
1, 35),
("brainpoolp256r1",
b"7d5a0975fc2c3057eef67530417affe7fb8055c126dc5c6ce94a4b44f330b5d9",
b"26dc5c6ce94a4b44f330b5d9bbd77cbf958416295cf7e1ce6bccdc18ff8c07b6",
b"a9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5377",
b"8bd2aeb9cb7e57cb2c4b482ffc81b7afb9de27e1e3bd23c23a4453bd9ace3262",
b"547ef835c3dac4fd97f8461a14611dc9c27745132ded8e545c1d54c72f046997",
b"a9fb57dba1eea9bc3e660a909d838d718c397aa3b561a6f7901e0e82974856a7",
1, 40),
("brainpoolp320r1",
b"3ee30b568fbab0f883ccebd46d3f3bb8a2a73513f5eb79da66190eb085ffa9f49"
b"2f375a97d860eb4",
b"520883949dfdbc42d3ad198640688a6fe13f41349554b49acc31dccd884539816"
b"f5eb4ac8fb1f1a6",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa6f6f40def4f92b9ec7893ec28f"
b"cd412b1f1b32e27",
b"43bd7e9afb53d8b85289bcc48ee5bfe6f20137d10a087eb6e7871e2a10a599c71"
b"0af8d0d39e20611",
b"14fdd05545ec1cc8ab4093247f77275e0743ffed117182eaa9c77877aaac6ac7d"
b"35245d1692e8ee1",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa5b68f12a32d482ec7ee8658e98"
b"691555b44c59311", 1, 50),
("brainpoolp384r1",
b"7bc382c63d8c150c3c72080ace05afa0c2bea28e4fb22787139165efba91f90f8"
b"aa5814a503ad4eb04a8c7dd22ce2826",
b"04a8c7dd22ce28268b39b55416f0447c2fb77de107dcd2a62e880ea53eeb62d57"
b"cb4390295dbc9943ab78696fa504c11",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b412b1da197fb71123a"
b"cd3a729901d1a71874700133107ec53",
b"1d1c64f068cf45ffa2a63a81b7c13f6b8847a3e77ef14fe3db7fcafe0cbd10e8e"
b"826e03436d646aaef87b2e247d4af1e",
b"8abe1d7520f9c2a45cb1eb8e95cfd55262b70b29feec5864e19c054ff99129280"
b"e4646217791811142820341263c5315",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b31f166e6cac0425a7c"
b"f3ab6af6b7fc3103b883202e9046565", 1, 60),
("brainpoolp512r1",
b"7830a3318b603b89e2327145ac234cc594cbdd8d3df91610a83441caea9863bc2"
b"ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72bf2c7b9e7c1ac4d77fc94"
b"ca",
b"3df91610a83441caea9863bc2ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72"
b"bf2c7b9e7c1ac4d77fc94cadc083e67984050b75ebae5dd2809bd638016f7"
b"23",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308717"
b"d4d9b009bc66842aecda12ae6a380e62881ff2f2d82c68528aa6056583a48"
b"f3",
b"81aee4bdd82ed9645a21322e9c4c6a9385ed9f70b5d916c1b43b62eef4d0098ef"
b"f3b1f78e2d0d48d50d1687b93b97d5f7c6d5047406a5e688b352209bcb9f8"
b"22",
b"7dde385d566332ecc0eabfa9cf7822fdf209f70024a57b1aa000c55b881f8111b"
b"2dcde494a5f485e5bca4bd88a2763aed1ca2b2fa8f0540678cd1e0f3ad808"
b"92",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308705"
b"53e5c414ca92619418661197fac10471db1d381085ddaddb58796829ca900"
b"69",
1, 79),
)
curves = [r[0] for r in RAW_CURVES]
# Arithmetic on elliptic curves
# #########################################################
class JacobianPoint(object):
def __init__(self, x, y, z, curve):
self.x = x
self.y = y
self.z = z
self.curve = curve
def to_affine(self):
if self.z == 0:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
h = gmpy.invert(self.z, m)
y = (h * h) % m
x = (self.x * y) % m
y = (y * h) % m
y = (y * self.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def double(self):
if not self.z:
return self
if not self.y:
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t1 = (self.x * self.x) % m
t2 = (t1 + t1) % m
t2 = (t2 + t1) % m
t1 = (self.z * self.z) % m
t1 = (t1 * t1) % m
t1 = (t1 * a) % m
t1 = (t1 + t2) % m
z = (self.z * self.y) % m
z = (z + z) % m
y = (self.y * self.y) % m
y = (y + y) % m
t2 = (self.x * y) % m
t2 = (t2 + t2) % m
x = (t1 * t1) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
t1 = (t1 * t2) % m
t2 = (y * y) % m
t2 = (t2 + t2) % m
y = (t1 - t2) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self.z:
return other.to_jacobian()
m = self.curve.m
t1 = (self.z * self.z) % m
t2 = (t1 * other.x) % m
t1 = (t1 * self.z) % m
t1 = (t1 * other.y) % m
if self.x == t2:
if self.y == t1:
return self.double()
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
x = (self.x - t2) % m
y = (self.y - t1) % m
z = (self.z * x) % m
t3 = (x * x) % m
t2 = (t2 * t3) % m
t3 = (t3 * x) % m
t1 = (t1 * t3) % m
x = (y * y) % m
x = (x - t3) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
y = (y * t2) % m
y = (y - t1) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __repr__(self):
return "<JacobianPoint (%s, %s, %s) of %s>" % (
self.x, self.y, self.z, self.curve.name)
class AffinePoint(object):
def __init__(self, x, y, curve):
self.x = x
self.y = y
self.curve = curve
@property
def on_curve(self):
if not self:
return True
m = self.curve.m
a = self.curve.a
b = self.curve.b
h1 = (self.x * self.x) % m
h1 = (h1 + a) % m
h1 = (h1 * self.x) % m
h1 = (h1 + b) % m
h2 = (self.y * self.y) % m
return h1 == h2
def to_jacobian(self):
if not self:
return JacobianPoint(x=0, y=0, z=0, curve=self.curve)
return JacobianPoint(x=self.x, y=self.y, z=1, curve=self.curve)
def double(self):
if not self.y:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t2 = (self.x * self.x) % m
t1 = (t2 + t2) % m
t1 = (t1 + t2) % m
t1 = (t1 + a) % m
t2 = (self.y + self.y) % m
t2 = gmpy.invert(t2, m)
t1 = (t1 * t2) % m
t2 = (t1 * t1) % m
t2 = (t2 - self.x) % m
t2 = (t2 - self.x) % m
x = (self.x - t2) % m
t1 = (t1 * x) % m
y = (t1 - self.y) % m
x = t2
return AffinePoint(x=x, y=y, curve=self.curve)
def __mul__(self, exp):
n = exp.numdigits(2)
r = JacobianPoint(x=0, y=0, z=0, curve=self.curve)
while n:
r = r.double()
n -= 1
if exp.getbit(n):
r = r + self
R = r.to_affine()
assert R.on_curve
return R
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self:
return other
if self.x == other.x:
if self.y == other.y:
return self.double()
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
t = (self.y - other.y) % m
y = (self.x - other.x) % m
y = gmpy.invert(y, m)
y = (t * y) % m
t = (y * y) % m
x = (self.x + other.x) % m
x = (t - x) % m
t = (other.x - x) % m
y = (y * t) % m
y = (y - other.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def __nonzero__(self):
return bool(self.x or self.y)
__bool__ = __nonzero__
def __repr__(self):
return "<AffinePoint (%s, %s) of %s>" % (
self.x, self.y, self.curve.name)
def __eq__(self, other):
if not isinstance(other, AffinePoint):
return False
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.to_string(SER_COMPACT)
def to_bytes(self, fmt=SER_BINARY):
outlen = (self.curve.pk_len_compact if fmt == SER_COMPACT
else self.curve.pk_len_bin)
if self._point_compress():
return serialize_number(self.x + self.curve.m, fmt, outlen)
return serialize_number(self.x, fmt, outlen)
def to_string(self, fmt=SER_BINARY):
return self.to_bytes(fmt).decode()
def _point_compress(self):
return self.y.getbit(0) == 1
def _ECIES_KDF(self, R):
h = hashlib.sha512()
h.update(serialize_number(self.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.y, SER_BINARY, self.curve.elem_len_bin))
return h.digest()
def _ECIES_encryption(self):
while True:
k = gmpy.mpz(
Crypto.Random.random.randrange(
0, int(
self.curve.order - 1)))
R = self.curve.base * k
k = k * self.curve.cofactor
Z = self * k
if Z:
break
return (Z._ECIES_KDF(R), R)
def _ECIES_decryption(self, d):
if isinstance(d, PrivKey):
d = d.e
e = d * self.curve.cofactor
if not self.valid_embedded_key:
raise ValueError
Z = self * e
if not Z:
raise ValueError
return Z._ECIES_KDF(self)
def _ECDSA_verify(self, md, sig):
order = self.curve.order
s, r = divmod(sig, order)
if s <= 0 or order <= s or r <= 0 or order <= r:
return False
e = deserialize_number(md, SER_BINARY) % order
s = gmpy.invert(s, order)
e = (e * s) % order
X1 = self.curve.base * e
e = (r * s) % order
X2 = self * e
X1 = X1 + X2
if not X1:
return False
s = X1.x % order
return s == r
@property
def valid_embedded_key(self):
if (self.x < 0 or self.x >= self.curve.m or self.y < 0 or
self.y > self.curve.m):
return False
if not self:
return False
if not self.on_curve:
return False
return True
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
def verify(self, h, sig, sig_fmt=SER_BINARY):
""" Verifies that `sig' is a signature for a message with
SHA-512 hash `h'. """
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
@contextlib.contextmanager
def encrypt_to(self, f, mac_bytes=10):
""" Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. """
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
class PrivKey(object):
""" A secret exponent """
def __init__(self, e, curve):
self.e = e
self.curve = curve
@contextlib.contextmanager
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError("s should be bytes")
instream = BytesIO(s)
with self.decrypt_from(instream, mac_bytes) as f:
return f.read()
def sign(self, h, sig_format=SER_BINARY):
""" Signs the message with SHA-512 hash `h' with this private key. """
outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT
else self.curve.sig_len_bin)
sig = self._ECDSA_sign(h)
return serialize_number(sig, sig_format, outlen)
def __repr__(self):
return "<PrivKey %s>" % self.e
def __str__(self):
return str(self.e)
def _ECDSA_sign(self, md):
# Get the pseudo-random exponent from the messagedigest
# and the private key.
order = self.curve.order
hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)
h = hmac.new(hmk, digestmod=hashlib.sha256)
h.update(md)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cprng = Crypto.Cipher.AES.new(h.digest(),
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
r = 0
s = 0
while s == 0:
while r == 0:
buf = cprng.encrypt(b'\0' * self.curve.order_len_bin)
k = self.curve._buf_to_exponent(buf)
p1 = self.curve.base * k
r = p1.x % order
e = deserialize_number(md, SER_BINARY)
e = (e % order)
s = (self.e * r) % order
s = (s + e) % order
e = gmpy.invert(k, order)
s = (s * e) % order
s = s * order
s = s + r
return s
# Encryption and decryption contexts
# #########################################################
class EncryptionContext(object):
""" Holds state of encryption. Use AffinePoint.encrypt_to """
def __init__(self, f, p, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
key, R = p._ECIES_encryption()
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
f.write(R.to_bytes(SER_BINARY))
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
def write(self, s):
if not self.f:
raise IOError("closed")
ct = self.cipher.encrypt(s)
self.f.write(ct)
self.h.update(ct)
def finish(self):
if not self.f:
raise IOError("closed")
self.f.write(self.h.digest()[:self.mac_bytes])
self.f = None
class DecryptionContext(object):
""" Holds state of decryption. Use Curve.decrypt_from """
def __init__(self, curve, f, privkey, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
R = curve.point_from_string(f.read(curve.pk_len_bin), SER_BINARY)
key = R._ECIES_decryption(privkey)
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
self.ahead = f.read(mac_bytes)
def read(self, n=None):
if not self.f:
return ''
if n is None:
tmp = self.ahead + self.f.read()
else:
tmp = self.ahead + self.f.read(n)
ct = tmp[:-self.mac_bytes]
self.ahead = tmp[-self.mac_bytes:]
self.h.update(ct)
pt = self.cipher.decrypt(ct)
if n is None or len(ct) < n:
if self.h.digest()[:self.mac_bytes] != self.ahead:
raise IntegrityError
self.f = None
return pt
# The main Curve objects
# #########################################################
class Curve(object):
""" Represents a Elliptic Curve """
@staticmethod
def by_name_substring(substring):
substring = substring.lower()
candidates = []
for raw_curve in RAW_CURVES:
if substring in raw_curve[0]:
candidates.append(raw_curve)
if len(candidates) != 1:
raise KeyError
return Curve(candidates[0])
@staticmethod
def by_name(name):
for raw_curve in RAW_CURVES:
if raw_curve[0] == name:
return Curve(raw_curve)
raise KeyError
@staticmethod
def by_pk_len(pk_len):
for raw_curve in RAW_CURVES:
if raw_curve[8] == pk_len:
return Curve(raw_curve)
raise KeyError
def __init__(self, raw_curve_params):
""" Initialize a new curve from raw curve parameters.
Use `Curve.by_pk_len' instead """
r = raw_curve_parameters(*raw_curve_params)
# Store domain parameters
self.name = r.name
self.a = deserialize_number(binascii.unhexlify(r.a), SER_BINARY)
self.b = deserialize_number(binascii.unhexlify(r.b), SER_BINARY)
self.m = deserialize_number(binascii.unhexlify(r.m), SER_BINARY)
self.order = deserialize_number(
binascii.unhexlify(r.order), SER_BINARY)
self.base = AffinePoint(
curve=self, x=deserialize_number(
binascii.unhexlify(
r.base_x), SER_BINARY), y=deserialize_number(
binascii.unhexlify(
r.base_y), SER_BINARY))
self.cofactor = r.cofactor
# Calculate some other parameters
self.pk_len_bin = get_serialized_number_len(
(2 * self.m) - 1, SER_BINARY)
self.pk_len_compact = get_serialized_number_len(
(2 * self.m) - 1, SER_COMPACT)
assert self.pk_len_compact == r.pk_len_compact
self.sig_len_bin = get_serialized_number_len(
(self.order * self.order) - 1, SER_BINARY)
self.sig_len_compact = get_serialized_number_len(
(self.order * self.order) - 1, SER_COMPACT)
self.dh_len_bin = min((self.order.numdigits(2) // 2 + 7) // 8, 32)
self.dh_len_compact = get_serialized_number_len(
2 ** self.dh_len_bin - 1, SER_COMPACT)
self.elem_len_bin = get_serialized_number_len(self.m, SER_BINARY)
self.order_len_bin = get_serialized_number_len(self.order, SER_BINARY)
@property
def key_bytes(self):
""" The approximate number of bytes of information in a key. """
return self.pk_len_bin
def __repr__(self):
return "<Curve %s>" % self.name
def point_from_string(self, s, fmt=SER_BINARY):
x = deserialize_number(s, fmt)
yflag = x >= self.m
if yflag:
x = x - self.m
assert 0 < x and x <= self.m
return self._point_decompress(x, yflag)
def pubkey_from_string(self, s, fmt=SER_BINARY):
return PubKey(self.point_from_string(s, fmt))
def _point_decompress(self, x, yflag):
m = self.m
h = (x * x) % m
h = (h + self.a) % m
h = (h * x) % m
h = (h + self.b) % m
y = mod_root(h, m)
if y or not yflag:
if bool(y.getbit(0)) == yflag:
return AffinePoint(x=x, y=y, curve=self)
return AffinePoint(x=x, y=m - y, curve=self)
def hash_to_exponent(self, h):
""" Converts a 32 byte hash to an exponent """
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cipher = Crypto.Cipher.AES.new(h,
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
buf = cipher.encrypt(b'\0' * self.order_len_bin)
return self._buf_to_exponent(buf)
def _buf_to_exponent(self, buf):
a = deserialize_number(buf, SER_BINARY)
a = (a % (self.order - 1)) + 1
return a
def passphrase_to_pubkey(self, passphrase):
return PubKey(self.base * self.passphrase_to_privkey(passphrase).e)
def passphrase_to_privkey(self, passphrase):
if isinstance(passphrase, six.text_type):
raise ValueError(
"Encode `passphrase` to a bytestring yourself to" +
" prevent problems with different default encodings")
h = _passphrase_to_hash(passphrase)
return PrivKey(self.hash_to_exponent(h), self)
@contextlib.contextmanager
def decrypt_from(self, f, privkey, mac_bytes=10):
ctx = DecryptionContext(self, f, privkey, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, privkey, mac_bytes=10):
instream = BytesIO(s)
with self.decrypt_from(instream, privkey, mac_bytes) as f:
return f.read()
# Helpers
# #########################################################
def _passphrase_to_hash(passphrase):
""" Converts a passphrase to a hash. """
return hashlib.sha256(passphrase).digest()
def encrypt(s, pk, pk_format=SER_COMPACT, mac_bytes=10, curve=None):
""" Encrypts `s' for public key `pk' """
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.encrypt(s, mac_bytes)
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):
""" Decrypts `s' with passphrase `passphrase' """
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.decrypt(s, mac_bytes)
def encrypt_file(in_path_or_file, out_path_or_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
""" Encrypts `in_file' to `out_file' for pubkey `pk' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_encrypt_file(in_file, out_file, pk, pk_format, mac_bytes, chunk_size,
curve)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def decrypt_file(in_path_or_file, out_path_or_file, passphrase,
curve='secp160r1', mac_bytes=10, chunk_size=4096):
""" Decrypts `in_file' to `out_file' with passphrase `passphrase' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_decrypt_file(in_file, out_file, passphrase, curve, mac_bytes,
chunk_size)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def _encrypt_file(in_file, out_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
with p.encrypt_to(out_file, mac_bytes) as encrypted_out:
while True:
buff = in_file.read(chunk_size)
if not buff:
break
encrypted_out.write(buff)
def _decrypt_file(in_file, out_file, passphrase, curve='secp160r1',
mac_bytes=10, chunk_size=4096):
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
with privkey.decrypt_from(in_file, mac_bytes) as decrypted_in:
while True:
buff = decrypted_in.read(chunk_size)
if not buff:
break
out_file.write(buff)
def verify(s, sig, pk, sig_format=SER_COMPACT, pk_format=SER_COMPACT,
curve=None):
""" Verifies that `sig' is a signature of pubkey `pk' for the
message `s'. """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.verify(hashlib.sha512(s).digest(), sig, sig_format)
def sign(s, passphrase, sig_format=SER_COMPACT, curve='secp160r1'):
""" Signs `s' with passphrase `passphrase' """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.sign(hashlib.sha512(s).digest(), sig_format)
def passphrase_to_pubkey(passphrase, curve='secp160r1'):
curve = Curve.by_name(curve)
return curve.passphrase_to_pubkey(passphrase)
def generate_keypair(curve='secp160r1', randfunc=None):
""" Convenience function to generate a random
new keypair (passphrase, pubkey). """
if randfunc is None:
randfunc = Crypto.Random.new().read
curve = Curve.by_name(curve)
raw_privkey = randfunc(curve.order_len_bin)
privkey = serialize_number(deserialize_number(raw_privkey), SER_COMPACT)
pubkey = str(passphrase_to_pubkey(privkey))
return (privkey, pubkey)
|
bwesterb/py-seccure
|
src/__init__.py
|
deserialize_number
|
python
|
def deserialize_number(s, fmt=SER_BINARY):
ret = gmpy.mpz(0)
if fmt == SER_BINARY:
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
for c in s:
ret *= 256
ret += byte2int(c)
return ret
assert fmt == SER_COMPACT
if isinstance(s, six.text_type):
s = s.encode('ascii')
for c in s:
ret *= len(COMPACT_DIGITS)
ret += R_COMPACT_DIGITS[c]
return ret
|
Deserializes a number from a string `s' in format `fmt'
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L78-L96
|
[
"def byte2int(b): return b\n"
] |
""" Elliptic Curve cryptography compatible with SECCURE:
http://point-at-infinity.org/seccure/ """
import hmac
import hashlib
import logging
import binascii
import contextlib
import collections
from ._version import __version__ # noqa: F401
# PyCrypto
import Crypto.Util
import Crypto.Cipher.AES
import Crypto.Random.random
# gmpy
import gmpy
# six
import six
# TODO replace with six.byte2int, when it is released
if six.PY3:
from io import BytesIO as BytesIO
def byte2int(b): return b
def stringlike(x): return isinstance(x, (str, bytes))
else:
from cStringIO import StringIO as BytesIO
def byte2int(b): return ord(b)
def stringlike(x): return isinstance(x, basestring)
l = logging.getLogger(__name__)
class IntegrityError(ValueError):
pass
# Serialization of numbers
# #########################################################
SER_COMPACT = 0
SER_BINARY = 1
COMPACT_DIGITS = (b'!#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'[]^_abcdefghijklmnopqrstuvwxyz{|}~')
R_COMPACT_DIGITS = {} # TODO is a tuple/list faster?
for i, c in enumerate(COMPACT_DIGITS):
R_COMPACT_DIGITS[c] = i
def serialize_number(x, fmt=SER_BINARY, outlen=None):
""" Serializes `x' to a string of length `outlen' in format `fmt' """
ret = b''
if fmt == SER_BINARY:
while x:
x, r = divmod(x, 256)
ret = six.int2byte(int(r)) + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, b'\0')
return ret
assert fmt == SER_COMPACT
while x:
x, r = divmod(x, len(COMPACT_DIGITS))
ret = COMPACT_DIGITS[r:r + 1] + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])
return ret
def get_serialized_number_len(x, fmt=SER_BINARY):
if fmt == SER_BINARY:
return (x.numdigits(2) + 7) // 8
assert fmt == SER_COMPACT
res = 0
while x != 0:
x = x // len(COMPACT_DIGITS)
res += 1
return res
# Some modular arithmetic
# #########################################################
def mod_issquare(a, p):
""" Returns whether `a' is a square modulo p """
if not a:
return True
p1 = p // 2
p2 = pow(a, p1, p)
return p2 == 1
def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x
# Raw curve parameters
# #########################################################
raw_curve_parameters = collections.namedtuple(
'raw_curve_parameters',
('name',
'a',
'b',
'm',
'base_x',
'base_y',
'order',
'cofactor',
'pk_len_compact'))
RAW_CURVES = (
("secp112r1",
b"db7c2abf62e35e668076bead2088",
b"659ef8ba043916eede8911702b22",
b"db7c2abf62e35e668076bead208b",
b"09487239995a5ee76b55f9c2f098",
b"a89ce5af8724c0a23e0e0ff77500",
b"db7c2abf62e35e7628dfac6561c5", 1, 18),
("secp128r1",
b"fffffffdfffffffffffffffffffffffc",
b"e87579c11079f43dd824993c2cee5ed3",
b"fffffffdffffffffffffffffffffffff",
b"161ff7528b899b2d0c28607ca52c5b86",
b"cf5ac8395bafeb13c02da292dded7a83",
b"fffffffe0000000075a30d1b9038a115", 1, 20),
("secp160r1",
b"ffffffffffffffffffffffffffffffff7ffffffc",
b"1c97befc54bd7a8b65acf89f81d4d4adc565fa45",
b"ffffffffffffffffffffffffffffffff7fffffff",
b"4a96b5688ef573284664698968c38bb913cbfc82",
b"23a628553168947d59dcc912042351377ac5fb32",
b"0100000000000000000001f4c8f927aed3ca752257", 1, 25),
("secp192r1/nistp192",
b"fffffffffffffffffffffffffffffffefffffffffffffffc",
b"64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1",
b"fffffffffffffffffffffffffffffffeffffffffffffffff",
b"188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012",
b"07192b95ffc8da78631011ed6b24cdd573f977a11e794811",
b"ffffffffffffffffffffffff99def836146bc9b1b4d22831", 1, 30),
("secp224r1/nistp224",
b"fffffffffffffffffffffffffffffffefffffffffffffffffffffffe",
b"b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4",
b"ffffffffffffffffffffffffffffffff000000000000000000000001",
b"b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
b"bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34",
b"ffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", 1, 35),
("secp256r1/nistp256",
b"ffffffff00000001000000000000000000000000fffffffffffffffffffffffc",
b"5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b",
b"ffffffff00000001000000000000000000000000ffffffffffffffffffffffff",
b"6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
b"4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5",
b"ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
1, 40),
("secp384r1/nistp384",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000fffffffc",
b"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875a"
b"c656398d8a2ed19d2a85c8edd3ec2aef",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000ffffffff",
b"aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a38"
b"5502f25dbf55296c3a545e3872760ab7",
b"3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c0"
b"0a60b1ce1d7e819d7a431d7c90ea0e5f",
b"ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf"
b"581a0db248b0a77aecec196accc52973", 1, 60),
("secp521r1/nistp521",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffffffc",
b"0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef1"
b"09e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd4"
b"6b503f00",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffff",
b"00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d"
b"3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31"
b"c2e5bd66",
b"011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e"
b"662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be9476"
b"9fd16650",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e"
b"91386409", 1, 81),
("brainpoolp160r1",
b"340e7be2a280eb74e2be61bada745d97e8f7c300",
b"1e589a8595423412134faa2dbdec95c8d8675e58",
b"e95e4a5f737059dc60dfc7ad95b3d8139515620f",
b"bed5af16ea3f6a4f62938c4631eb5af7bdbcdbc3",
b"1667cb477a1a8ec338f94741669c976316da6321",
b"e95e4a5f737059dc60df5991d45029409e60fc09", 1, 25),
("brainpoolp192r1",
b"6a91174076b1e0e19c39c031fe8685c1cae040e5c69a28ef",
b"469a28ef7c28cca3dc721d044f4496bcca7ef4146fbf25c9",
b"c302f41d932a36cda7a3463093d18db78fce476de1a86297",
b"c0a0647eaab6a48753b033c56cb0f0900a2f5c4853375fd6",
b"14b690866abd5bb88b5f4828c1490002e6773fa2fa299b8f",
b"c302f41d932a36cda7a3462f9e9e916b5be8f1029ac4acc1", 1, 30),
("brainpoolp224r1",
b"68a5e62ca9ce6c1c299803a6c1530b514e182ad8b0042a59cad29f43",
b"2580f63ccfe44138870713b1a92369e33e2135d266dbb372386c400b",
b"d7c134aa264366862a18302575d1d787b09f075797da89f57ec8c0ff",
b"0d9029ad2c7e5cf4340823b2a87dc68c9e4ce3174c1e6efdee12c07d",
b"58aa56f772c0726f24c6b89e4ecdac24354b9e99caa3f6d3761402cd",
b"d7c134aa264366862a18302575d0fb98d116bc4b6ddebca3a5a7939f",
1, 35),
("brainpoolp256r1",
b"7d5a0975fc2c3057eef67530417affe7fb8055c126dc5c6ce94a4b44f330b5d9",
b"26dc5c6ce94a4b44f330b5d9bbd77cbf958416295cf7e1ce6bccdc18ff8c07b6",
b"a9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5377",
b"8bd2aeb9cb7e57cb2c4b482ffc81b7afb9de27e1e3bd23c23a4453bd9ace3262",
b"547ef835c3dac4fd97f8461a14611dc9c27745132ded8e545c1d54c72f046997",
b"a9fb57dba1eea9bc3e660a909d838d718c397aa3b561a6f7901e0e82974856a7",
1, 40),
("brainpoolp320r1",
b"3ee30b568fbab0f883ccebd46d3f3bb8a2a73513f5eb79da66190eb085ffa9f49"
b"2f375a97d860eb4",
b"520883949dfdbc42d3ad198640688a6fe13f41349554b49acc31dccd884539816"
b"f5eb4ac8fb1f1a6",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa6f6f40def4f92b9ec7893ec28f"
b"cd412b1f1b32e27",
b"43bd7e9afb53d8b85289bcc48ee5bfe6f20137d10a087eb6e7871e2a10a599c71"
b"0af8d0d39e20611",
b"14fdd05545ec1cc8ab4093247f77275e0743ffed117182eaa9c77877aaac6ac7d"
b"35245d1692e8ee1",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa5b68f12a32d482ec7ee8658e98"
b"691555b44c59311", 1, 50),
("brainpoolp384r1",
b"7bc382c63d8c150c3c72080ace05afa0c2bea28e4fb22787139165efba91f90f8"
b"aa5814a503ad4eb04a8c7dd22ce2826",
b"04a8c7dd22ce28268b39b55416f0447c2fb77de107dcd2a62e880ea53eeb62d57"
b"cb4390295dbc9943ab78696fa504c11",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b412b1da197fb71123a"
b"cd3a729901d1a71874700133107ec53",
b"1d1c64f068cf45ffa2a63a81b7c13f6b8847a3e77ef14fe3db7fcafe0cbd10e8e"
b"826e03436d646aaef87b2e247d4af1e",
b"8abe1d7520f9c2a45cb1eb8e95cfd55262b70b29feec5864e19c054ff99129280"
b"e4646217791811142820341263c5315",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b31f166e6cac0425a7c"
b"f3ab6af6b7fc3103b883202e9046565", 1, 60),
("brainpoolp512r1",
b"7830a3318b603b89e2327145ac234cc594cbdd8d3df91610a83441caea9863bc2"
b"ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72bf2c7b9e7c1ac4d77fc94"
b"ca",
b"3df91610a83441caea9863bc2ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72"
b"bf2c7b9e7c1ac4d77fc94cadc083e67984050b75ebae5dd2809bd638016f7"
b"23",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308717"
b"d4d9b009bc66842aecda12ae6a380e62881ff2f2d82c68528aa6056583a48"
b"f3",
b"81aee4bdd82ed9645a21322e9c4c6a9385ed9f70b5d916c1b43b62eef4d0098ef"
b"f3b1f78e2d0d48d50d1687b93b97d5f7c6d5047406a5e688b352209bcb9f8"
b"22",
b"7dde385d566332ecc0eabfa9cf7822fdf209f70024a57b1aa000c55b881f8111b"
b"2dcde494a5f485e5bca4bd88a2763aed1ca2b2fa8f0540678cd1e0f3ad808"
b"92",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308705"
b"53e5c414ca92619418661197fac10471db1d381085ddaddb58796829ca900"
b"69",
1, 79),
)
curves = [r[0] for r in RAW_CURVES]
# Arithmetic on elliptic curves
# #########################################################
class JacobianPoint(object):
def __init__(self, x, y, z, curve):
self.x = x
self.y = y
self.z = z
self.curve = curve
def to_affine(self):
if self.z == 0:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
h = gmpy.invert(self.z, m)
y = (h * h) % m
x = (self.x * y) % m
y = (y * h) % m
y = (y * self.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def double(self):
if not self.z:
return self
if not self.y:
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t1 = (self.x * self.x) % m
t2 = (t1 + t1) % m
t2 = (t2 + t1) % m
t1 = (self.z * self.z) % m
t1 = (t1 * t1) % m
t1 = (t1 * a) % m
t1 = (t1 + t2) % m
z = (self.z * self.y) % m
z = (z + z) % m
y = (self.y * self.y) % m
y = (y + y) % m
t2 = (self.x * y) % m
t2 = (t2 + t2) % m
x = (t1 * t1) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
t1 = (t1 * t2) % m
t2 = (y * y) % m
t2 = (t2 + t2) % m
y = (t1 - t2) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self.z:
return other.to_jacobian()
m = self.curve.m
t1 = (self.z * self.z) % m
t2 = (t1 * other.x) % m
t1 = (t1 * self.z) % m
t1 = (t1 * other.y) % m
if self.x == t2:
if self.y == t1:
return self.double()
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
x = (self.x - t2) % m
y = (self.y - t1) % m
z = (self.z * x) % m
t3 = (x * x) % m
t2 = (t2 * t3) % m
t3 = (t3 * x) % m
t1 = (t1 * t3) % m
x = (y * y) % m
x = (x - t3) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
y = (y * t2) % m
y = (y - t1) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __repr__(self):
return "<JacobianPoint (%s, %s, %s) of %s>" % (
self.x, self.y, self.z, self.curve.name)
class AffinePoint(object):
def __init__(self, x, y, curve):
self.x = x
self.y = y
self.curve = curve
@property
def on_curve(self):
if not self:
return True
m = self.curve.m
a = self.curve.a
b = self.curve.b
h1 = (self.x * self.x) % m
h1 = (h1 + a) % m
h1 = (h1 * self.x) % m
h1 = (h1 + b) % m
h2 = (self.y * self.y) % m
return h1 == h2
def to_jacobian(self):
if not self:
return JacobianPoint(x=0, y=0, z=0, curve=self.curve)
return JacobianPoint(x=self.x, y=self.y, z=1, curve=self.curve)
def double(self):
if not self.y:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t2 = (self.x * self.x) % m
t1 = (t2 + t2) % m
t1 = (t1 + t2) % m
t1 = (t1 + a) % m
t2 = (self.y + self.y) % m
t2 = gmpy.invert(t2, m)
t1 = (t1 * t2) % m
t2 = (t1 * t1) % m
t2 = (t2 - self.x) % m
t2 = (t2 - self.x) % m
x = (self.x - t2) % m
t1 = (t1 * x) % m
y = (t1 - self.y) % m
x = t2
return AffinePoint(x=x, y=y, curve=self.curve)
def __mul__(self, exp):
n = exp.numdigits(2)
r = JacobianPoint(x=0, y=0, z=0, curve=self.curve)
while n:
r = r.double()
n -= 1
if exp.getbit(n):
r = r + self
R = r.to_affine()
assert R.on_curve
return R
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self:
return other
if self.x == other.x:
if self.y == other.y:
return self.double()
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
t = (self.y - other.y) % m
y = (self.x - other.x) % m
y = gmpy.invert(y, m)
y = (t * y) % m
t = (y * y) % m
x = (self.x + other.x) % m
x = (t - x) % m
t = (other.x - x) % m
y = (y * t) % m
y = (y - other.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def __nonzero__(self):
return bool(self.x or self.y)
__bool__ = __nonzero__
def __repr__(self):
return "<AffinePoint (%s, %s) of %s>" % (
self.x, self.y, self.curve.name)
def __eq__(self, other):
if not isinstance(other, AffinePoint):
return False
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.to_string(SER_COMPACT)
def to_bytes(self, fmt=SER_BINARY):
outlen = (self.curve.pk_len_compact if fmt == SER_COMPACT
else self.curve.pk_len_bin)
if self._point_compress():
return serialize_number(self.x + self.curve.m, fmt, outlen)
return serialize_number(self.x, fmt, outlen)
def to_string(self, fmt=SER_BINARY):
return self.to_bytes(fmt).decode()
def _point_compress(self):
return self.y.getbit(0) == 1
def _ECIES_KDF(self, R):
h = hashlib.sha512()
h.update(serialize_number(self.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.y, SER_BINARY, self.curve.elem_len_bin))
return h.digest()
def _ECIES_encryption(self):
while True:
k = gmpy.mpz(
Crypto.Random.random.randrange(
0, int(
self.curve.order - 1)))
R = self.curve.base * k
k = k * self.curve.cofactor
Z = self * k
if Z:
break
return (Z._ECIES_KDF(R), R)
def _ECIES_decryption(self, d):
if isinstance(d, PrivKey):
d = d.e
e = d * self.curve.cofactor
if not self.valid_embedded_key:
raise ValueError
Z = self * e
if not Z:
raise ValueError
return Z._ECIES_KDF(self)
def _ECDSA_verify(self, md, sig):
order = self.curve.order
s, r = divmod(sig, order)
if s <= 0 or order <= s or r <= 0 or order <= r:
return False
e = deserialize_number(md, SER_BINARY) % order
s = gmpy.invert(s, order)
e = (e * s) % order
X1 = self.curve.base * e
e = (r * s) % order
X2 = self * e
X1 = X1 + X2
if not X1:
return False
s = X1.x % order
return s == r
@property
def valid_embedded_key(self):
if (self.x < 0 or self.x >= self.curve.m or self.y < 0 or
self.y > self.curve.m):
return False
if not self:
return False
if not self.on_curve:
return False
return True
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
def verify(self, h, sig, sig_fmt=SER_BINARY):
""" Verifies that `sig' is a signature for a message with
SHA-512 hash `h'. """
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
@contextlib.contextmanager
def encrypt_to(self, f, mac_bytes=10):
""" Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. """
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
class PrivKey(object):
""" A secret exponent """
def __init__(self, e, curve):
self.e = e
self.curve = curve
@contextlib.contextmanager
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError("s should be bytes")
instream = BytesIO(s)
with self.decrypt_from(instream, mac_bytes) as f:
return f.read()
def sign(self, h, sig_format=SER_BINARY):
""" Signs the message with SHA-512 hash `h' with this private key. """
outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT
else self.curve.sig_len_bin)
sig = self._ECDSA_sign(h)
return serialize_number(sig, sig_format, outlen)
def __repr__(self):
return "<PrivKey %s>" % self.e
def __str__(self):
return str(self.e)
def _ECDSA_sign(self, md):
# Get the pseudo-random exponent from the messagedigest
# and the private key.
order = self.curve.order
hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)
h = hmac.new(hmk, digestmod=hashlib.sha256)
h.update(md)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cprng = Crypto.Cipher.AES.new(h.digest(),
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
r = 0
s = 0
while s == 0:
while r == 0:
buf = cprng.encrypt(b'\0' * self.curve.order_len_bin)
k = self.curve._buf_to_exponent(buf)
p1 = self.curve.base * k
r = p1.x % order
e = deserialize_number(md, SER_BINARY)
e = (e % order)
s = (self.e * r) % order
s = (s + e) % order
e = gmpy.invert(k, order)
s = (s * e) % order
s = s * order
s = s + r
return s
# Encryption and decryption contexts
# #########################################################
class EncryptionContext(object):
""" Holds state of encryption. Use AffinePoint.encrypt_to """
def __init__(self, f, p, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
key, R = p._ECIES_encryption()
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
f.write(R.to_bytes(SER_BINARY))
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
def write(self, s):
if not self.f:
raise IOError("closed")
ct = self.cipher.encrypt(s)
self.f.write(ct)
self.h.update(ct)
def finish(self):
if not self.f:
raise IOError("closed")
self.f.write(self.h.digest()[:self.mac_bytes])
self.f = None
class DecryptionContext(object):
""" Holds state of decryption. Use Curve.decrypt_from """
def __init__(self, curve, f, privkey, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
R = curve.point_from_string(f.read(curve.pk_len_bin), SER_BINARY)
key = R._ECIES_decryption(privkey)
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
self.ahead = f.read(mac_bytes)
def read(self, n=None):
if not self.f:
return ''
if n is None:
tmp = self.ahead + self.f.read()
else:
tmp = self.ahead + self.f.read(n)
ct = tmp[:-self.mac_bytes]
self.ahead = tmp[-self.mac_bytes:]
self.h.update(ct)
pt = self.cipher.decrypt(ct)
if n is None or len(ct) < n:
if self.h.digest()[:self.mac_bytes] != self.ahead:
raise IntegrityError
self.f = None
return pt
# The main Curve objects
# #########################################################
class Curve(object):
""" Represents a Elliptic Curve """
@staticmethod
def by_name_substring(substring):
substring = substring.lower()
candidates = []
for raw_curve in RAW_CURVES:
if substring in raw_curve[0]:
candidates.append(raw_curve)
if len(candidates) != 1:
raise KeyError
return Curve(candidates[0])
@staticmethod
def by_name(name):
for raw_curve in RAW_CURVES:
if raw_curve[0] == name:
return Curve(raw_curve)
raise KeyError
@staticmethod
def by_pk_len(pk_len):
for raw_curve in RAW_CURVES:
if raw_curve[8] == pk_len:
return Curve(raw_curve)
raise KeyError
def __init__(self, raw_curve_params):
""" Initialize a new curve from raw curve parameters.
Use `Curve.by_pk_len' instead """
r = raw_curve_parameters(*raw_curve_params)
# Store domain parameters
self.name = r.name
self.a = deserialize_number(binascii.unhexlify(r.a), SER_BINARY)
self.b = deserialize_number(binascii.unhexlify(r.b), SER_BINARY)
self.m = deserialize_number(binascii.unhexlify(r.m), SER_BINARY)
self.order = deserialize_number(
binascii.unhexlify(r.order), SER_BINARY)
self.base = AffinePoint(
curve=self, x=deserialize_number(
binascii.unhexlify(
r.base_x), SER_BINARY), y=deserialize_number(
binascii.unhexlify(
r.base_y), SER_BINARY))
self.cofactor = r.cofactor
# Calculate some other parameters
self.pk_len_bin = get_serialized_number_len(
(2 * self.m) - 1, SER_BINARY)
self.pk_len_compact = get_serialized_number_len(
(2 * self.m) - 1, SER_COMPACT)
assert self.pk_len_compact == r.pk_len_compact
self.sig_len_bin = get_serialized_number_len(
(self.order * self.order) - 1, SER_BINARY)
self.sig_len_compact = get_serialized_number_len(
(self.order * self.order) - 1, SER_COMPACT)
self.dh_len_bin = min((self.order.numdigits(2) // 2 + 7) // 8, 32)
self.dh_len_compact = get_serialized_number_len(
2 ** self.dh_len_bin - 1, SER_COMPACT)
self.elem_len_bin = get_serialized_number_len(self.m, SER_BINARY)
self.order_len_bin = get_serialized_number_len(self.order, SER_BINARY)
@property
def key_bytes(self):
""" The approximate number of bytes of information in a key. """
return self.pk_len_bin
def __repr__(self):
return "<Curve %s>" % self.name
def point_from_string(self, s, fmt=SER_BINARY):
x = deserialize_number(s, fmt)
yflag = x >= self.m
if yflag:
x = x - self.m
assert 0 < x and x <= self.m
return self._point_decompress(x, yflag)
def pubkey_from_string(self, s, fmt=SER_BINARY):
return PubKey(self.point_from_string(s, fmt))
def _point_decompress(self, x, yflag):
m = self.m
h = (x * x) % m
h = (h + self.a) % m
h = (h * x) % m
h = (h + self.b) % m
y = mod_root(h, m)
if y or not yflag:
if bool(y.getbit(0)) == yflag:
return AffinePoint(x=x, y=y, curve=self)
return AffinePoint(x=x, y=m - y, curve=self)
def hash_to_exponent(self, h):
""" Converts a 32 byte hash to an exponent """
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cipher = Crypto.Cipher.AES.new(h,
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
buf = cipher.encrypt(b'\0' * self.order_len_bin)
return self._buf_to_exponent(buf)
def _buf_to_exponent(self, buf):
a = deserialize_number(buf, SER_BINARY)
a = (a % (self.order - 1)) + 1
return a
def passphrase_to_pubkey(self, passphrase):
return PubKey(self.base * self.passphrase_to_privkey(passphrase).e)
def passphrase_to_privkey(self, passphrase):
if isinstance(passphrase, six.text_type):
raise ValueError(
"Encode `passphrase` to a bytestring yourself to" +
" prevent problems with different default encodings")
h = _passphrase_to_hash(passphrase)
return PrivKey(self.hash_to_exponent(h), self)
@contextlib.contextmanager
def decrypt_from(self, f, privkey, mac_bytes=10):
ctx = DecryptionContext(self, f, privkey, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, privkey, mac_bytes=10):
instream = BytesIO(s)
with self.decrypt_from(instream, privkey, mac_bytes) as f:
return f.read()
# Helpers
# #########################################################
def _passphrase_to_hash(passphrase):
""" Converts a passphrase to a hash. """
return hashlib.sha256(passphrase).digest()
def encrypt(s, pk, pk_format=SER_COMPACT, mac_bytes=10, curve=None):
""" Encrypts `s' for public key `pk' """
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.encrypt(s, mac_bytes)
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):
""" Decrypts `s' with passphrase `passphrase' """
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.decrypt(s, mac_bytes)
def encrypt_file(in_path_or_file, out_path_or_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
""" Encrypts `in_file' to `out_file' for pubkey `pk' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_encrypt_file(in_file, out_file, pk, pk_format, mac_bytes, chunk_size,
curve)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def decrypt_file(in_path_or_file, out_path_or_file, passphrase,
curve='secp160r1', mac_bytes=10, chunk_size=4096):
""" Decrypts `in_file' to `out_file' with passphrase `passphrase' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_decrypt_file(in_file, out_file, passphrase, curve, mac_bytes,
chunk_size)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def _encrypt_file(in_file, out_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
with p.encrypt_to(out_file, mac_bytes) as encrypted_out:
while True:
buff = in_file.read(chunk_size)
if not buff:
break
encrypted_out.write(buff)
def _decrypt_file(in_file, out_file, passphrase, curve='secp160r1',
mac_bytes=10, chunk_size=4096):
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
with privkey.decrypt_from(in_file, mac_bytes) as decrypted_in:
while True:
buff = decrypted_in.read(chunk_size)
if not buff:
break
out_file.write(buff)
def verify(s, sig, pk, sig_format=SER_COMPACT, pk_format=SER_COMPACT,
curve=None):
""" Verifies that `sig' is a signature of pubkey `pk' for the
message `s'. """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.verify(hashlib.sha512(s).digest(), sig, sig_format)
def sign(s, passphrase, sig_format=SER_COMPACT, curve='secp160r1'):
""" Signs `s' with passphrase `passphrase' """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.sign(hashlib.sha512(s).digest(), sig_format)
def passphrase_to_pubkey(passphrase, curve='secp160r1'):
curve = Curve.by_name(curve)
return curve.passphrase_to_pubkey(passphrase)
def generate_keypair(curve='secp160r1', randfunc=None):
""" Convenience function to generate a random
new keypair (passphrase, pubkey). """
if randfunc is None:
randfunc = Crypto.Random.new().read
curve = Curve.by_name(curve)
raw_privkey = randfunc(curve.order_len_bin)
privkey = serialize_number(deserialize_number(raw_privkey), SER_COMPACT)
pubkey = str(passphrase_to_pubkey(privkey))
return (privkey, pubkey)
|
bwesterb/py-seccure
|
src/__init__.py
|
mod_issquare
|
python
|
def mod_issquare(a, p):
if not a:
return True
p1 = p // 2
p2 = pow(a, p1, p)
return p2 == 1
|
Returns whether `a' is a square modulo p
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L113-L119
| null |
""" Elliptic Curve cryptography compatible with SECCURE:
http://point-at-infinity.org/seccure/ """
import hmac
import hashlib
import logging
import binascii
import contextlib
import collections
from ._version import __version__ # noqa: F401
# PyCrypto
import Crypto.Util
import Crypto.Cipher.AES
import Crypto.Random.random
# gmpy
import gmpy
# six
import six
# TODO replace with six.byte2int, when it is released
if six.PY3:
from io import BytesIO as BytesIO
def byte2int(b): return b
def stringlike(x): return isinstance(x, (str, bytes))
else:
from cStringIO import StringIO as BytesIO
def byte2int(b): return ord(b)
def stringlike(x): return isinstance(x, basestring)
l = logging.getLogger(__name__)
class IntegrityError(ValueError):
pass
# Serialization of numbers
# #########################################################
SER_COMPACT = 0
SER_BINARY = 1
COMPACT_DIGITS = (b'!#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'[]^_abcdefghijklmnopqrstuvwxyz{|}~')
R_COMPACT_DIGITS = {} # TODO is a tuple/list faster?
for i, c in enumerate(COMPACT_DIGITS):
R_COMPACT_DIGITS[c] = i
def serialize_number(x, fmt=SER_BINARY, outlen=None):
""" Serializes `x' to a string of length `outlen' in format `fmt' """
ret = b''
if fmt == SER_BINARY:
while x:
x, r = divmod(x, 256)
ret = six.int2byte(int(r)) + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, b'\0')
return ret
assert fmt == SER_COMPACT
while x:
x, r = divmod(x, len(COMPACT_DIGITS))
ret = COMPACT_DIGITS[r:r + 1] + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])
return ret
def deserialize_number(s, fmt=SER_BINARY):
""" Deserializes a number from a string `s' in format `fmt' """
ret = gmpy.mpz(0)
if fmt == SER_BINARY:
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
for c in s:
ret *= 256
ret += byte2int(c)
return ret
assert fmt == SER_COMPACT
if isinstance(s, six.text_type):
s = s.encode('ascii')
for c in s:
ret *= len(COMPACT_DIGITS)
ret += R_COMPACT_DIGITS[c]
return ret
def get_serialized_number_len(x, fmt=SER_BINARY):
if fmt == SER_BINARY:
return (x.numdigits(2) + 7) // 8
assert fmt == SER_COMPACT
res = 0
while x != 0:
x = x // len(COMPACT_DIGITS)
res += 1
return res
# Some modular arithmetic
# #########################################################
def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x
# Raw curve parameters
# #########################################################
raw_curve_parameters = collections.namedtuple(
'raw_curve_parameters',
('name',
'a',
'b',
'm',
'base_x',
'base_y',
'order',
'cofactor',
'pk_len_compact'))
RAW_CURVES = (
("secp112r1",
b"db7c2abf62e35e668076bead2088",
b"659ef8ba043916eede8911702b22",
b"db7c2abf62e35e668076bead208b",
b"09487239995a5ee76b55f9c2f098",
b"a89ce5af8724c0a23e0e0ff77500",
b"db7c2abf62e35e7628dfac6561c5", 1, 18),
("secp128r1",
b"fffffffdfffffffffffffffffffffffc",
b"e87579c11079f43dd824993c2cee5ed3",
b"fffffffdffffffffffffffffffffffff",
b"161ff7528b899b2d0c28607ca52c5b86",
b"cf5ac8395bafeb13c02da292dded7a83",
b"fffffffe0000000075a30d1b9038a115", 1, 20),
("secp160r1",
b"ffffffffffffffffffffffffffffffff7ffffffc",
b"1c97befc54bd7a8b65acf89f81d4d4adc565fa45",
b"ffffffffffffffffffffffffffffffff7fffffff",
b"4a96b5688ef573284664698968c38bb913cbfc82",
b"23a628553168947d59dcc912042351377ac5fb32",
b"0100000000000000000001f4c8f927aed3ca752257", 1, 25),
("secp192r1/nistp192",
b"fffffffffffffffffffffffffffffffefffffffffffffffc",
b"64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1",
b"fffffffffffffffffffffffffffffffeffffffffffffffff",
b"188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012",
b"07192b95ffc8da78631011ed6b24cdd573f977a11e794811",
b"ffffffffffffffffffffffff99def836146bc9b1b4d22831", 1, 30),
("secp224r1/nistp224",
b"fffffffffffffffffffffffffffffffefffffffffffffffffffffffe",
b"b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4",
b"ffffffffffffffffffffffffffffffff000000000000000000000001",
b"b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
b"bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34",
b"ffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", 1, 35),
("secp256r1/nistp256",
b"ffffffff00000001000000000000000000000000fffffffffffffffffffffffc",
b"5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b",
b"ffffffff00000001000000000000000000000000ffffffffffffffffffffffff",
b"6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
b"4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5",
b"ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
1, 40),
("secp384r1/nistp384",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000fffffffc",
b"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875a"
b"c656398d8a2ed19d2a85c8edd3ec2aef",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000ffffffff",
b"aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a38"
b"5502f25dbf55296c3a545e3872760ab7",
b"3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c0"
b"0a60b1ce1d7e819d7a431d7c90ea0e5f",
b"ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf"
b"581a0db248b0a77aecec196accc52973", 1, 60),
("secp521r1/nistp521",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffffffc",
b"0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef1"
b"09e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd4"
b"6b503f00",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffff",
b"00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d"
b"3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31"
b"c2e5bd66",
b"011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e"
b"662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be9476"
b"9fd16650",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e"
b"91386409", 1, 81),
("brainpoolp160r1",
b"340e7be2a280eb74e2be61bada745d97e8f7c300",
b"1e589a8595423412134faa2dbdec95c8d8675e58",
b"e95e4a5f737059dc60dfc7ad95b3d8139515620f",
b"bed5af16ea3f6a4f62938c4631eb5af7bdbcdbc3",
b"1667cb477a1a8ec338f94741669c976316da6321",
b"e95e4a5f737059dc60df5991d45029409e60fc09", 1, 25),
("brainpoolp192r1",
b"6a91174076b1e0e19c39c031fe8685c1cae040e5c69a28ef",
b"469a28ef7c28cca3dc721d044f4496bcca7ef4146fbf25c9",
b"c302f41d932a36cda7a3463093d18db78fce476de1a86297",
b"c0a0647eaab6a48753b033c56cb0f0900a2f5c4853375fd6",
b"14b690866abd5bb88b5f4828c1490002e6773fa2fa299b8f",
b"c302f41d932a36cda7a3462f9e9e916b5be8f1029ac4acc1", 1, 30),
("brainpoolp224r1",
b"68a5e62ca9ce6c1c299803a6c1530b514e182ad8b0042a59cad29f43",
b"2580f63ccfe44138870713b1a92369e33e2135d266dbb372386c400b",
b"d7c134aa264366862a18302575d1d787b09f075797da89f57ec8c0ff",
b"0d9029ad2c7e5cf4340823b2a87dc68c9e4ce3174c1e6efdee12c07d",
b"58aa56f772c0726f24c6b89e4ecdac24354b9e99caa3f6d3761402cd",
b"d7c134aa264366862a18302575d0fb98d116bc4b6ddebca3a5a7939f",
1, 35),
("brainpoolp256r1",
b"7d5a0975fc2c3057eef67530417affe7fb8055c126dc5c6ce94a4b44f330b5d9",
b"26dc5c6ce94a4b44f330b5d9bbd77cbf958416295cf7e1ce6bccdc18ff8c07b6",
b"a9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5377",
b"8bd2aeb9cb7e57cb2c4b482ffc81b7afb9de27e1e3bd23c23a4453bd9ace3262",
b"547ef835c3dac4fd97f8461a14611dc9c27745132ded8e545c1d54c72f046997",
b"a9fb57dba1eea9bc3e660a909d838d718c397aa3b561a6f7901e0e82974856a7",
1, 40),
("brainpoolp320r1",
b"3ee30b568fbab0f883ccebd46d3f3bb8a2a73513f5eb79da66190eb085ffa9f49"
b"2f375a97d860eb4",
b"520883949dfdbc42d3ad198640688a6fe13f41349554b49acc31dccd884539816"
b"f5eb4ac8fb1f1a6",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa6f6f40def4f92b9ec7893ec28f"
b"cd412b1f1b32e27",
b"43bd7e9afb53d8b85289bcc48ee5bfe6f20137d10a087eb6e7871e2a10a599c71"
b"0af8d0d39e20611",
b"14fdd05545ec1cc8ab4093247f77275e0743ffed117182eaa9c77877aaac6ac7d"
b"35245d1692e8ee1",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa5b68f12a32d482ec7ee8658e98"
b"691555b44c59311", 1, 50),
("brainpoolp384r1",
b"7bc382c63d8c150c3c72080ace05afa0c2bea28e4fb22787139165efba91f90f8"
b"aa5814a503ad4eb04a8c7dd22ce2826",
b"04a8c7dd22ce28268b39b55416f0447c2fb77de107dcd2a62e880ea53eeb62d57"
b"cb4390295dbc9943ab78696fa504c11",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b412b1da197fb71123a"
b"cd3a729901d1a71874700133107ec53",
b"1d1c64f068cf45ffa2a63a81b7c13f6b8847a3e77ef14fe3db7fcafe0cbd10e8e"
b"826e03436d646aaef87b2e247d4af1e",
b"8abe1d7520f9c2a45cb1eb8e95cfd55262b70b29feec5864e19c054ff99129280"
b"e4646217791811142820341263c5315",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b31f166e6cac0425a7c"
b"f3ab6af6b7fc3103b883202e9046565", 1, 60),
("brainpoolp512r1",
b"7830a3318b603b89e2327145ac234cc594cbdd8d3df91610a83441caea9863bc2"
b"ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72bf2c7b9e7c1ac4d77fc94"
b"ca",
b"3df91610a83441caea9863bc2ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72"
b"bf2c7b9e7c1ac4d77fc94cadc083e67984050b75ebae5dd2809bd638016f7"
b"23",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308717"
b"d4d9b009bc66842aecda12ae6a380e62881ff2f2d82c68528aa6056583a48"
b"f3",
b"81aee4bdd82ed9645a21322e9c4c6a9385ed9f70b5d916c1b43b62eef4d0098ef"
b"f3b1f78e2d0d48d50d1687b93b97d5f7c6d5047406a5e688b352209bcb9f8"
b"22",
b"7dde385d566332ecc0eabfa9cf7822fdf209f70024a57b1aa000c55b881f8111b"
b"2dcde494a5f485e5bca4bd88a2763aed1ca2b2fa8f0540678cd1e0f3ad808"
b"92",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308705"
b"53e5c414ca92619418661197fac10471db1d381085ddaddb58796829ca900"
b"69",
1, 79),
)
curves = [r[0] for r in RAW_CURVES]
# Arithmetic on elliptic curves
# #########################################################
class JacobianPoint(object):
def __init__(self, x, y, z, curve):
self.x = x
self.y = y
self.z = z
self.curve = curve
def to_affine(self):
if self.z == 0:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
h = gmpy.invert(self.z, m)
y = (h * h) % m
x = (self.x * y) % m
y = (y * h) % m
y = (y * self.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def double(self):
if not self.z:
return self
if not self.y:
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t1 = (self.x * self.x) % m
t2 = (t1 + t1) % m
t2 = (t2 + t1) % m
t1 = (self.z * self.z) % m
t1 = (t1 * t1) % m
t1 = (t1 * a) % m
t1 = (t1 + t2) % m
z = (self.z * self.y) % m
z = (z + z) % m
y = (self.y * self.y) % m
y = (y + y) % m
t2 = (self.x * y) % m
t2 = (t2 + t2) % m
x = (t1 * t1) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
t1 = (t1 * t2) % m
t2 = (y * y) % m
t2 = (t2 + t2) % m
y = (t1 - t2) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self.z:
return other.to_jacobian()
m = self.curve.m
t1 = (self.z * self.z) % m
t2 = (t1 * other.x) % m
t1 = (t1 * self.z) % m
t1 = (t1 * other.y) % m
if self.x == t2:
if self.y == t1:
return self.double()
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
x = (self.x - t2) % m
y = (self.y - t1) % m
z = (self.z * x) % m
t3 = (x * x) % m
t2 = (t2 * t3) % m
t3 = (t3 * x) % m
t1 = (t1 * t3) % m
x = (y * y) % m
x = (x - t3) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
y = (y * t2) % m
y = (y - t1) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __repr__(self):
return "<JacobianPoint (%s, %s, %s) of %s>" % (
self.x, self.y, self.z, self.curve.name)
class AffinePoint(object):
def __init__(self, x, y, curve):
self.x = x
self.y = y
self.curve = curve
@property
def on_curve(self):
if not self:
return True
m = self.curve.m
a = self.curve.a
b = self.curve.b
h1 = (self.x * self.x) % m
h1 = (h1 + a) % m
h1 = (h1 * self.x) % m
h1 = (h1 + b) % m
h2 = (self.y * self.y) % m
return h1 == h2
def to_jacobian(self):
if not self:
return JacobianPoint(x=0, y=0, z=0, curve=self.curve)
return JacobianPoint(x=self.x, y=self.y, z=1, curve=self.curve)
def double(self):
if not self.y:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t2 = (self.x * self.x) % m
t1 = (t2 + t2) % m
t1 = (t1 + t2) % m
t1 = (t1 + a) % m
t2 = (self.y + self.y) % m
t2 = gmpy.invert(t2, m)
t1 = (t1 * t2) % m
t2 = (t1 * t1) % m
t2 = (t2 - self.x) % m
t2 = (t2 - self.x) % m
x = (self.x - t2) % m
t1 = (t1 * x) % m
y = (t1 - self.y) % m
x = t2
return AffinePoint(x=x, y=y, curve=self.curve)
def __mul__(self, exp):
n = exp.numdigits(2)
r = JacobianPoint(x=0, y=0, z=0, curve=self.curve)
while n:
r = r.double()
n -= 1
if exp.getbit(n):
r = r + self
R = r.to_affine()
assert R.on_curve
return R
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self:
return other
if self.x == other.x:
if self.y == other.y:
return self.double()
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
t = (self.y - other.y) % m
y = (self.x - other.x) % m
y = gmpy.invert(y, m)
y = (t * y) % m
t = (y * y) % m
x = (self.x + other.x) % m
x = (t - x) % m
t = (other.x - x) % m
y = (y * t) % m
y = (y - other.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def __nonzero__(self):
return bool(self.x or self.y)
__bool__ = __nonzero__
def __repr__(self):
return "<AffinePoint (%s, %s) of %s>" % (
self.x, self.y, self.curve.name)
def __eq__(self, other):
if not isinstance(other, AffinePoint):
return False
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.to_string(SER_COMPACT)
def to_bytes(self, fmt=SER_BINARY):
outlen = (self.curve.pk_len_compact if fmt == SER_COMPACT
else self.curve.pk_len_bin)
if self._point_compress():
return serialize_number(self.x + self.curve.m, fmt, outlen)
return serialize_number(self.x, fmt, outlen)
def to_string(self, fmt=SER_BINARY):
return self.to_bytes(fmt).decode()
def _point_compress(self):
return self.y.getbit(0) == 1
def _ECIES_KDF(self, R):
h = hashlib.sha512()
h.update(serialize_number(self.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.y, SER_BINARY, self.curve.elem_len_bin))
return h.digest()
def _ECIES_encryption(self):
while True:
k = gmpy.mpz(
Crypto.Random.random.randrange(
0, int(
self.curve.order - 1)))
R = self.curve.base * k
k = k * self.curve.cofactor
Z = self * k
if Z:
break
return (Z._ECIES_KDF(R), R)
def _ECIES_decryption(self, d):
if isinstance(d, PrivKey):
d = d.e
e = d * self.curve.cofactor
if not self.valid_embedded_key:
raise ValueError
Z = self * e
if not Z:
raise ValueError
return Z._ECIES_KDF(self)
def _ECDSA_verify(self, md, sig):
order = self.curve.order
s, r = divmod(sig, order)
if s <= 0 or order <= s or r <= 0 or order <= r:
return False
e = deserialize_number(md, SER_BINARY) % order
s = gmpy.invert(s, order)
e = (e * s) % order
X1 = self.curve.base * e
e = (r * s) % order
X2 = self * e
X1 = X1 + X2
if not X1:
return False
s = X1.x % order
return s == r
@property
def valid_embedded_key(self):
if (self.x < 0 or self.x >= self.curve.m or self.y < 0 or
self.y > self.curve.m):
return False
if not self:
return False
if not self.on_curve:
return False
return True
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
def verify(self, h, sig, sig_fmt=SER_BINARY):
""" Verifies that `sig' is a signature for a message with
SHA-512 hash `h'. """
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
@contextlib.contextmanager
def encrypt_to(self, f, mac_bytes=10):
""" Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. """
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
class PrivKey(object):
""" A secret exponent """
def __init__(self, e, curve):
self.e = e
self.curve = curve
@contextlib.contextmanager
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError("s should be bytes")
instream = BytesIO(s)
with self.decrypt_from(instream, mac_bytes) as f:
return f.read()
def sign(self, h, sig_format=SER_BINARY):
""" Signs the message with SHA-512 hash `h' with this private key. """
outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT
else self.curve.sig_len_bin)
sig = self._ECDSA_sign(h)
return serialize_number(sig, sig_format, outlen)
def __repr__(self):
return "<PrivKey %s>" % self.e
def __str__(self):
return str(self.e)
def _ECDSA_sign(self, md):
# Get the pseudo-random exponent from the messagedigest
# and the private key.
order = self.curve.order
hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)
h = hmac.new(hmk, digestmod=hashlib.sha256)
h.update(md)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cprng = Crypto.Cipher.AES.new(h.digest(),
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
r = 0
s = 0
while s == 0:
while r == 0:
buf = cprng.encrypt(b'\0' * self.curve.order_len_bin)
k = self.curve._buf_to_exponent(buf)
p1 = self.curve.base * k
r = p1.x % order
e = deserialize_number(md, SER_BINARY)
e = (e % order)
s = (self.e * r) % order
s = (s + e) % order
e = gmpy.invert(k, order)
s = (s * e) % order
s = s * order
s = s + r
return s
# Encryption and decryption contexts
# #########################################################
class EncryptionContext(object):
""" Holds state of encryption. Use AffinePoint.encrypt_to """
def __init__(self, f, p, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
key, R = p._ECIES_encryption()
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
f.write(R.to_bytes(SER_BINARY))
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
def write(self, s):
if not self.f:
raise IOError("closed")
ct = self.cipher.encrypt(s)
self.f.write(ct)
self.h.update(ct)
def finish(self):
if not self.f:
raise IOError("closed")
self.f.write(self.h.digest()[:self.mac_bytes])
self.f = None
class DecryptionContext(object):
""" Holds state of decryption. Use Curve.decrypt_from """
def __init__(self, curve, f, privkey, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
R = curve.point_from_string(f.read(curve.pk_len_bin), SER_BINARY)
key = R._ECIES_decryption(privkey)
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
self.ahead = f.read(mac_bytes)
def read(self, n=None):
if not self.f:
return ''
if n is None:
tmp = self.ahead + self.f.read()
else:
tmp = self.ahead + self.f.read(n)
ct = tmp[:-self.mac_bytes]
self.ahead = tmp[-self.mac_bytes:]
self.h.update(ct)
pt = self.cipher.decrypt(ct)
if n is None or len(ct) < n:
if self.h.digest()[:self.mac_bytes] != self.ahead:
raise IntegrityError
self.f = None
return pt
# The main Curve objects
# #########################################################
class Curve(object):
""" Represents a Elliptic Curve """
@staticmethod
def by_name_substring(substring):
substring = substring.lower()
candidates = []
for raw_curve in RAW_CURVES:
if substring in raw_curve[0]:
candidates.append(raw_curve)
if len(candidates) != 1:
raise KeyError
return Curve(candidates[0])
@staticmethod
def by_name(name):
for raw_curve in RAW_CURVES:
if raw_curve[0] == name:
return Curve(raw_curve)
raise KeyError
@staticmethod
def by_pk_len(pk_len):
for raw_curve in RAW_CURVES:
if raw_curve[8] == pk_len:
return Curve(raw_curve)
raise KeyError
def __init__(self, raw_curve_params):
""" Initialize a new curve from raw curve parameters.
Use `Curve.by_pk_len' instead """
r = raw_curve_parameters(*raw_curve_params)
# Store domain parameters
self.name = r.name
self.a = deserialize_number(binascii.unhexlify(r.a), SER_BINARY)
self.b = deserialize_number(binascii.unhexlify(r.b), SER_BINARY)
self.m = deserialize_number(binascii.unhexlify(r.m), SER_BINARY)
self.order = deserialize_number(
binascii.unhexlify(r.order), SER_BINARY)
self.base = AffinePoint(
curve=self, x=deserialize_number(
binascii.unhexlify(
r.base_x), SER_BINARY), y=deserialize_number(
binascii.unhexlify(
r.base_y), SER_BINARY))
self.cofactor = r.cofactor
# Calculate some other parameters
self.pk_len_bin = get_serialized_number_len(
(2 * self.m) - 1, SER_BINARY)
self.pk_len_compact = get_serialized_number_len(
(2 * self.m) - 1, SER_COMPACT)
assert self.pk_len_compact == r.pk_len_compact
self.sig_len_bin = get_serialized_number_len(
(self.order * self.order) - 1, SER_BINARY)
self.sig_len_compact = get_serialized_number_len(
(self.order * self.order) - 1, SER_COMPACT)
self.dh_len_bin = min((self.order.numdigits(2) // 2 + 7) // 8, 32)
self.dh_len_compact = get_serialized_number_len(
2 ** self.dh_len_bin - 1, SER_COMPACT)
self.elem_len_bin = get_serialized_number_len(self.m, SER_BINARY)
self.order_len_bin = get_serialized_number_len(self.order, SER_BINARY)
@property
def key_bytes(self):
""" The approximate number of bytes of information in a key. """
return self.pk_len_bin
def __repr__(self):
return "<Curve %s>" % self.name
def point_from_string(self, s, fmt=SER_BINARY):
x = deserialize_number(s, fmt)
yflag = x >= self.m
if yflag:
x = x - self.m
assert 0 < x and x <= self.m
return self._point_decompress(x, yflag)
def pubkey_from_string(self, s, fmt=SER_BINARY):
return PubKey(self.point_from_string(s, fmt))
def _point_decompress(self, x, yflag):
m = self.m
h = (x * x) % m
h = (h + self.a) % m
h = (h * x) % m
h = (h + self.b) % m
y = mod_root(h, m)
if y or not yflag:
if bool(y.getbit(0)) == yflag:
return AffinePoint(x=x, y=y, curve=self)
return AffinePoint(x=x, y=m - y, curve=self)
def hash_to_exponent(self, h):
""" Converts a 32 byte hash to an exponent """
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cipher = Crypto.Cipher.AES.new(h,
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
buf = cipher.encrypt(b'\0' * self.order_len_bin)
return self._buf_to_exponent(buf)
def _buf_to_exponent(self, buf):
a = deserialize_number(buf, SER_BINARY)
a = (a % (self.order - 1)) + 1
return a
def passphrase_to_pubkey(self, passphrase):
return PubKey(self.base * self.passphrase_to_privkey(passphrase).e)
def passphrase_to_privkey(self, passphrase):
if isinstance(passphrase, six.text_type):
raise ValueError(
"Encode `passphrase` to a bytestring yourself to" +
" prevent problems with different default encodings")
h = _passphrase_to_hash(passphrase)
return PrivKey(self.hash_to_exponent(h), self)
@contextlib.contextmanager
def decrypt_from(self, f, privkey, mac_bytes=10):
ctx = DecryptionContext(self, f, privkey, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, privkey, mac_bytes=10):
instream = BytesIO(s)
with self.decrypt_from(instream, privkey, mac_bytes) as f:
return f.read()
# Helpers
# #########################################################
def _passphrase_to_hash(passphrase):
""" Converts a passphrase to a hash. """
return hashlib.sha256(passphrase).digest()
def encrypt(s, pk, pk_format=SER_COMPACT, mac_bytes=10, curve=None):
""" Encrypts `s' for public key `pk' """
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.encrypt(s, mac_bytes)
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):
""" Decrypts `s' with passphrase `passphrase' """
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.decrypt(s, mac_bytes)
def encrypt_file(in_path_or_file, out_path_or_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
""" Encrypts `in_file' to `out_file' for pubkey `pk' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_encrypt_file(in_file, out_file, pk, pk_format, mac_bytes, chunk_size,
curve)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def decrypt_file(in_path_or_file, out_path_or_file, passphrase,
curve='secp160r1', mac_bytes=10, chunk_size=4096):
""" Decrypts `in_file' to `out_file' with passphrase `passphrase' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_decrypt_file(in_file, out_file, passphrase, curve, mac_bytes,
chunk_size)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def _encrypt_file(in_file, out_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
with p.encrypt_to(out_file, mac_bytes) as encrypted_out:
while True:
buff = in_file.read(chunk_size)
if not buff:
break
encrypted_out.write(buff)
def _decrypt_file(in_file, out_file, passphrase, curve='secp160r1',
mac_bytes=10, chunk_size=4096):
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
with privkey.decrypt_from(in_file, mac_bytes) as decrypted_in:
while True:
buff = decrypted_in.read(chunk_size)
if not buff:
break
out_file.write(buff)
def verify(s, sig, pk, sig_format=SER_COMPACT, pk_format=SER_COMPACT,
curve=None):
""" Verifies that `sig' is a signature of pubkey `pk' for the
message `s'. """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.verify(hashlib.sha512(s).digest(), sig, sig_format)
def sign(s, passphrase, sig_format=SER_COMPACT, curve='secp160r1'):
""" Signs `s' with passphrase `passphrase' """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.sign(hashlib.sha512(s).digest(), sig_format)
def passphrase_to_pubkey(passphrase, curve='secp160r1'):
curve = Curve.by_name(curve)
return curve.passphrase_to_pubkey(passphrase)
def generate_keypair(curve='secp160r1', randfunc=None):
""" Convenience function to generate a random
new keypair (passphrase, pubkey). """
if randfunc is None:
randfunc = Crypto.Random.new().read
curve = Curve.by_name(curve)
raw_privkey = randfunc(curve.order_len_bin)
privkey = serialize_number(deserialize_number(raw_privkey), SER_COMPACT)
pubkey = str(passphrase_to_pubkey(privkey))
return (privkey, pubkey)
|
bwesterb/py-seccure
|
src/__init__.py
|
mod_root
|
python
|
def mod_root(a, p):
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x
|
Return a root of `a' modulo p
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L122-L154
|
[
"def mod_issquare(a, p):\n \"\"\" Returns whether `a' is a square modulo p \"\"\"\n if not a:\n return True\n p1 = p // 2\n p2 = pow(a, p1, p)\n return p2 == 1\n"
] |
""" Elliptic Curve cryptography compatible with SECCURE:
http://point-at-infinity.org/seccure/ """
import hmac
import hashlib
import logging
import binascii
import contextlib
import collections
from ._version import __version__ # noqa: F401
# PyCrypto
import Crypto.Util
import Crypto.Cipher.AES
import Crypto.Random.random
# gmpy
import gmpy
# six
import six
# TODO replace with six.byte2int, when it is released
if six.PY3:
from io import BytesIO as BytesIO
def byte2int(b): return b
def stringlike(x): return isinstance(x, (str, bytes))
else:
from cStringIO import StringIO as BytesIO
def byte2int(b): return ord(b)
def stringlike(x): return isinstance(x, basestring)
l = logging.getLogger(__name__)
class IntegrityError(ValueError):
pass
# Serialization of numbers
# #########################################################
SER_COMPACT = 0
SER_BINARY = 1
COMPACT_DIGITS = (b'!#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'[]^_abcdefghijklmnopqrstuvwxyz{|}~')
R_COMPACT_DIGITS = {} # TODO is a tuple/list faster?
for i, c in enumerate(COMPACT_DIGITS):
R_COMPACT_DIGITS[c] = i
def serialize_number(x, fmt=SER_BINARY, outlen=None):
""" Serializes `x' to a string of length `outlen' in format `fmt' """
ret = b''
if fmt == SER_BINARY:
while x:
x, r = divmod(x, 256)
ret = six.int2byte(int(r)) + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, b'\0')
return ret
assert fmt == SER_COMPACT
while x:
x, r = divmod(x, len(COMPACT_DIGITS))
ret = COMPACT_DIGITS[r:r + 1] + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])
return ret
def deserialize_number(s, fmt=SER_BINARY):
""" Deserializes a number from a string `s' in format `fmt' """
ret = gmpy.mpz(0)
if fmt == SER_BINARY:
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
for c in s:
ret *= 256
ret += byte2int(c)
return ret
assert fmt == SER_COMPACT
if isinstance(s, six.text_type):
s = s.encode('ascii')
for c in s:
ret *= len(COMPACT_DIGITS)
ret += R_COMPACT_DIGITS[c]
return ret
def get_serialized_number_len(x, fmt=SER_BINARY):
if fmt == SER_BINARY:
return (x.numdigits(2) + 7) // 8
assert fmt == SER_COMPACT
res = 0
while x != 0:
x = x // len(COMPACT_DIGITS)
res += 1
return res
# Some modular arithmetic
# #########################################################
def mod_issquare(a, p):
""" Returns whether `a' is a square modulo p """
if not a:
return True
p1 = p // 2
p2 = pow(a, p1, p)
return p2 == 1
# Raw curve parameters
# #########################################################
raw_curve_parameters = collections.namedtuple(
'raw_curve_parameters',
('name',
'a',
'b',
'm',
'base_x',
'base_y',
'order',
'cofactor',
'pk_len_compact'))
RAW_CURVES = (
("secp112r1",
b"db7c2abf62e35e668076bead2088",
b"659ef8ba043916eede8911702b22",
b"db7c2abf62e35e668076bead208b",
b"09487239995a5ee76b55f9c2f098",
b"a89ce5af8724c0a23e0e0ff77500",
b"db7c2abf62e35e7628dfac6561c5", 1, 18),
("secp128r1",
b"fffffffdfffffffffffffffffffffffc",
b"e87579c11079f43dd824993c2cee5ed3",
b"fffffffdffffffffffffffffffffffff",
b"161ff7528b899b2d0c28607ca52c5b86",
b"cf5ac8395bafeb13c02da292dded7a83",
b"fffffffe0000000075a30d1b9038a115", 1, 20),
("secp160r1",
b"ffffffffffffffffffffffffffffffff7ffffffc",
b"1c97befc54bd7a8b65acf89f81d4d4adc565fa45",
b"ffffffffffffffffffffffffffffffff7fffffff",
b"4a96b5688ef573284664698968c38bb913cbfc82",
b"23a628553168947d59dcc912042351377ac5fb32",
b"0100000000000000000001f4c8f927aed3ca752257", 1, 25),
("secp192r1/nistp192",
b"fffffffffffffffffffffffffffffffefffffffffffffffc",
b"64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1",
b"fffffffffffffffffffffffffffffffeffffffffffffffff",
b"188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012",
b"07192b95ffc8da78631011ed6b24cdd573f977a11e794811",
b"ffffffffffffffffffffffff99def836146bc9b1b4d22831", 1, 30),
("secp224r1/nistp224",
b"fffffffffffffffffffffffffffffffefffffffffffffffffffffffe",
b"b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4",
b"ffffffffffffffffffffffffffffffff000000000000000000000001",
b"b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
b"bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34",
b"ffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", 1, 35),
("secp256r1/nistp256",
b"ffffffff00000001000000000000000000000000fffffffffffffffffffffffc",
b"5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b",
b"ffffffff00000001000000000000000000000000ffffffffffffffffffffffff",
b"6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
b"4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5",
b"ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
1, 40),
("secp384r1/nistp384",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000fffffffc",
b"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875a"
b"c656398d8a2ed19d2a85c8edd3ec2aef",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000ffffffff",
b"aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a38"
b"5502f25dbf55296c3a545e3872760ab7",
b"3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c0"
b"0a60b1ce1d7e819d7a431d7c90ea0e5f",
b"ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf"
b"581a0db248b0a77aecec196accc52973", 1, 60),
("secp521r1/nistp521",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffffffc",
b"0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef1"
b"09e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd4"
b"6b503f00",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffff",
b"00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d"
b"3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31"
b"c2e5bd66",
b"011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e"
b"662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be9476"
b"9fd16650",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e"
b"91386409", 1, 81),
("brainpoolp160r1",
b"340e7be2a280eb74e2be61bada745d97e8f7c300",
b"1e589a8595423412134faa2dbdec95c8d8675e58",
b"e95e4a5f737059dc60dfc7ad95b3d8139515620f",
b"bed5af16ea3f6a4f62938c4631eb5af7bdbcdbc3",
b"1667cb477a1a8ec338f94741669c976316da6321",
b"e95e4a5f737059dc60df5991d45029409e60fc09", 1, 25),
("brainpoolp192r1",
b"6a91174076b1e0e19c39c031fe8685c1cae040e5c69a28ef",
b"469a28ef7c28cca3dc721d044f4496bcca7ef4146fbf25c9",
b"c302f41d932a36cda7a3463093d18db78fce476de1a86297",
b"c0a0647eaab6a48753b033c56cb0f0900a2f5c4853375fd6",
b"14b690866abd5bb88b5f4828c1490002e6773fa2fa299b8f",
b"c302f41d932a36cda7a3462f9e9e916b5be8f1029ac4acc1", 1, 30),
("brainpoolp224r1",
b"68a5e62ca9ce6c1c299803a6c1530b514e182ad8b0042a59cad29f43",
b"2580f63ccfe44138870713b1a92369e33e2135d266dbb372386c400b",
b"d7c134aa264366862a18302575d1d787b09f075797da89f57ec8c0ff",
b"0d9029ad2c7e5cf4340823b2a87dc68c9e4ce3174c1e6efdee12c07d",
b"58aa56f772c0726f24c6b89e4ecdac24354b9e99caa3f6d3761402cd",
b"d7c134aa264366862a18302575d0fb98d116bc4b6ddebca3a5a7939f",
1, 35),
("brainpoolp256r1",
b"7d5a0975fc2c3057eef67530417affe7fb8055c126dc5c6ce94a4b44f330b5d9",
b"26dc5c6ce94a4b44f330b5d9bbd77cbf958416295cf7e1ce6bccdc18ff8c07b6",
b"a9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5377",
b"8bd2aeb9cb7e57cb2c4b482ffc81b7afb9de27e1e3bd23c23a4453bd9ace3262",
b"547ef835c3dac4fd97f8461a14611dc9c27745132ded8e545c1d54c72f046997",
b"a9fb57dba1eea9bc3e660a909d838d718c397aa3b561a6f7901e0e82974856a7",
1, 40),
("brainpoolp320r1",
b"3ee30b568fbab0f883ccebd46d3f3bb8a2a73513f5eb79da66190eb085ffa9f49"
b"2f375a97d860eb4",
b"520883949dfdbc42d3ad198640688a6fe13f41349554b49acc31dccd884539816"
b"f5eb4ac8fb1f1a6",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa6f6f40def4f92b9ec7893ec28f"
b"cd412b1f1b32e27",
b"43bd7e9afb53d8b85289bcc48ee5bfe6f20137d10a087eb6e7871e2a10a599c71"
b"0af8d0d39e20611",
b"14fdd05545ec1cc8ab4093247f77275e0743ffed117182eaa9c77877aaac6ac7d"
b"35245d1692e8ee1",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa5b68f12a32d482ec7ee8658e98"
b"691555b44c59311", 1, 50),
("brainpoolp384r1",
b"7bc382c63d8c150c3c72080ace05afa0c2bea28e4fb22787139165efba91f90f8"
b"aa5814a503ad4eb04a8c7dd22ce2826",
b"04a8c7dd22ce28268b39b55416f0447c2fb77de107dcd2a62e880ea53eeb62d57"
b"cb4390295dbc9943ab78696fa504c11",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b412b1da197fb71123a"
b"cd3a729901d1a71874700133107ec53",
b"1d1c64f068cf45ffa2a63a81b7c13f6b8847a3e77ef14fe3db7fcafe0cbd10e8e"
b"826e03436d646aaef87b2e247d4af1e",
b"8abe1d7520f9c2a45cb1eb8e95cfd55262b70b29feec5864e19c054ff99129280"
b"e4646217791811142820341263c5315",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b31f166e6cac0425a7c"
b"f3ab6af6b7fc3103b883202e9046565", 1, 60),
("brainpoolp512r1",
b"7830a3318b603b89e2327145ac234cc594cbdd8d3df91610a83441caea9863bc2"
b"ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72bf2c7b9e7c1ac4d77fc94"
b"ca",
b"3df91610a83441caea9863bc2ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72"
b"bf2c7b9e7c1ac4d77fc94cadc083e67984050b75ebae5dd2809bd638016f7"
b"23",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308717"
b"d4d9b009bc66842aecda12ae6a380e62881ff2f2d82c68528aa6056583a48"
b"f3",
b"81aee4bdd82ed9645a21322e9c4c6a9385ed9f70b5d916c1b43b62eef4d0098ef"
b"f3b1f78e2d0d48d50d1687b93b97d5f7c6d5047406a5e688b352209bcb9f8"
b"22",
b"7dde385d566332ecc0eabfa9cf7822fdf209f70024a57b1aa000c55b881f8111b"
b"2dcde494a5f485e5bca4bd88a2763aed1ca2b2fa8f0540678cd1e0f3ad808"
b"92",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308705"
b"53e5c414ca92619418661197fac10471db1d381085ddaddb58796829ca900"
b"69",
1, 79),
)
curves = [r[0] for r in RAW_CURVES]
# Arithmetic on elliptic curves
# #########################################################
class JacobianPoint(object):
def __init__(self, x, y, z, curve):
self.x = x
self.y = y
self.z = z
self.curve = curve
def to_affine(self):
if self.z == 0:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
h = gmpy.invert(self.z, m)
y = (h * h) % m
x = (self.x * y) % m
y = (y * h) % m
y = (y * self.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def double(self):
if not self.z:
return self
if not self.y:
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t1 = (self.x * self.x) % m
t2 = (t1 + t1) % m
t2 = (t2 + t1) % m
t1 = (self.z * self.z) % m
t1 = (t1 * t1) % m
t1 = (t1 * a) % m
t1 = (t1 + t2) % m
z = (self.z * self.y) % m
z = (z + z) % m
y = (self.y * self.y) % m
y = (y + y) % m
t2 = (self.x * y) % m
t2 = (t2 + t2) % m
x = (t1 * t1) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
t1 = (t1 * t2) % m
t2 = (y * y) % m
t2 = (t2 + t2) % m
y = (t1 - t2) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self.z:
return other.to_jacobian()
m = self.curve.m
t1 = (self.z * self.z) % m
t2 = (t1 * other.x) % m
t1 = (t1 * self.z) % m
t1 = (t1 * other.y) % m
if self.x == t2:
if self.y == t1:
return self.double()
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
x = (self.x - t2) % m
y = (self.y - t1) % m
z = (self.z * x) % m
t3 = (x * x) % m
t2 = (t2 * t3) % m
t3 = (t3 * x) % m
t1 = (t1 * t3) % m
x = (y * y) % m
x = (x - t3) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
y = (y * t2) % m
y = (y - t1) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __repr__(self):
return "<JacobianPoint (%s, %s, %s) of %s>" % (
self.x, self.y, self.z, self.curve.name)
class AffinePoint(object):
def __init__(self, x, y, curve):
self.x = x
self.y = y
self.curve = curve
@property
def on_curve(self):
if not self:
return True
m = self.curve.m
a = self.curve.a
b = self.curve.b
h1 = (self.x * self.x) % m
h1 = (h1 + a) % m
h1 = (h1 * self.x) % m
h1 = (h1 + b) % m
h2 = (self.y * self.y) % m
return h1 == h2
def to_jacobian(self):
if not self:
return JacobianPoint(x=0, y=0, z=0, curve=self.curve)
return JacobianPoint(x=self.x, y=self.y, z=1, curve=self.curve)
def double(self):
if not self.y:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t2 = (self.x * self.x) % m
t1 = (t2 + t2) % m
t1 = (t1 + t2) % m
t1 = (t1 + a) % m
t2 = (self.y + self.y) % m
t2 = gmpy.invert(t2, m)
t1 = (t1 * t2) % m
t2 = (t1 * t1) % m
t2 = (t2 - self.x) % m
t2 = (t2 - self.x) % m
x = (self.x - t2) % m
t1 = (t1 * x) % m
y = (t1 - self.y) % m
x = t2
return AffinePoint(x=x, y=y, curve=self.curve)
def __mul__(self, exp):
n = exp.numdigits(2)
r = JacobianPoint(x=0, y=0, z=0, curve=self.curve)
while n:
r = r.double()
n -= 1
if exp.getbit(n):
r = r + self
R = r.to_affine()
assert R.on_curve
return R
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self:
return other
if self.x == other.x:
if self.y == other.y:
return self.double()
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
t = (self.y - other.y) % m
y = (self.x - other.x) % m
y = gmpy.invert(y, m)
y = (t * y) % m
t = (y * y) % m
x = (self.x + other.x) % m
x = (t - x) % m
t = (other.x - x) % m
y = (y * t) % m
y = (y - other.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def __nonzero__(self):
return bool(self.x or self.y)
__bool__ = __nonzero__
def __repr__(self):
return "<AffinePoint (%s, %s) of %s>" % (
self.x, self.y, self.curve.name)
def __eq__(self, other):
if not isinstance(other, AffinePoint):
return False
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.to_string(SER_COMPACT)
def to_bytes(self, fmt=SER_BINARY):
outlen = (self.curve.pk_len_compact if fmt == SER_COMPACT
else self.curve.pk_len_bin)
if self._point_compress():
return serialize_number(self.x + self.curve.m, fmt, outlen)
return serialize_number(self.x, fmt, outlen)
def to_string(self, fmt=SER_BINARY):
return self.to_bytes(fmt).decode()
def _point_compress(self):
return self.y.getbit(0) == 1
def _ECIES_KDF(self, R):
h = hashlib.sha512()
h.update(serialize_number(self.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.y, SER_BINARY, self.curve.elem_len_bin))
return h.digest()
def _ECIES_encryption(self):
while True:
k = gmpy.mpz(
Crypto.Random.random.randrange(
0, int(
self.curve.order - 1)))
R = self.curve.base * k
k = k * self.curve.cofactor
Z = self * k
if Z:
break
return (Z._ECIES_KDF(R), R)
def _ECIES_decryption(self, d):
if isinstance(d, PrivKey):
d = d.e
e = d * self.curve.cofactor
if not self.valid_embedded_key:
raise ValueError
Z = self * e
if not Z:
raise ValueError
return Z._ECIES_KDF(self)
def _ECDSA_verify(self, md, sig):
order = self.curve.order
s, r = divmod(sig, order)
if s <= 0 or order <= s or r <= 0 or order <= r:
return False
e = deserialize_number(md, SER_BINARY) % order
s = gmpy.invert(s, order)
e = (e * s) % order
X1 = self.curve.base * e
e = (r * s) % order
X2 = self * e
X1 = X1 + X2
if not X1:
return False
s = X1.x % order
return s == r
@property
def valid_embedded_key(self):
if (self.x < 0 or self.x >= self.curve.m or self.y < 0 or
self.y > self.curve.m):
return False
if not self:
return False
if not self.on_curve:
return False
return True
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
def verify(self, h, sig, sig_fmt=SER_BINARY):
""" Verifies that `sig' is a signature for a message with
SHA-512 hash `h'. """
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
@contextlib.contextmanager
def encrypt_to(self, f, mac_bytes=10):
""" Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. """
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
class PrivKey(object):
""" A secret exponent """
def __init__(self, e, curve):
self.e = e
self.curve = curve
@contextlib.contextmanager
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError("s should be bytes")
instream = BytesIO(s)
with self.decrypt_from(instream, mac_bytes) as f:
return f.read()
def sign(self, h, sig_format=SER_BINARY):
""" Signs the message with SHA-512 hash `h' with this private key. """
outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT
else self.curve.sig_len_bin)
sig = self._ECDSA_sign(h)
return serialize_number(sig, sig_format, outlen)
def __repr__(self):
return "<PrivKey %s>" % self.e
def __str__(self):
return str(self.e)
def _ECDSA_sign(self, md):
# Get the pseudo-random exponent from the messagedigest
# and the private key.
order = self.curve.order
hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)
h = hmac.new(hmk, digestmod=hashlib.sha256)
h.update(md)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cprng = Crypto.Cipher.AES.new(h.digest(),
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
r = 0
s = 0
while s == 0:
while r == 0:
buf = cprng.encrypt(b'\0' * self.curve.order_len_bin)
k = self.curve._buf_to_exponent(buf)
p1 = self.curve.base * k
r = p1.x % order
e = deserialize_number(md, SER_BINARY)
e = (e % order)
s = (self.e * r) % order
s = (s + e) % order
e = gmpy.invert(k, order)
s = (s * e) % order
s = s * order
s = s + r
return s
# Encryption and decryption contexts
# #########################################################
class EncryptionContext(object):
""" Holds state of encryption. Use AffinePoint.encrypt_to """
def __init__(self, f, p, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
key, R = p._ECIES_encryption()
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
f.write(R.to_bytes(SER_BINARY))
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
def write(self, s):
if not self.f:
raise IOError("closed")
ct = self.cipher.encrypt(s)
self.f.write(ct)
self.h.update(ct)
def finish(self):
if not self.f:
raise IOError("closed")
self.f.write(self.h.digest()[:self.mac_bytes])
self.f = None
class DecryptionContext(object):
""" Holds state of decryption. Use Curve.decrypt_from """
def __init__(self, curve, f, privkey, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
R = curve.point_from_string(f.read(curve.pk_len_bin), SER_BINARY)
key = R._ECIES_decryption(privkey)
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
self.ahead = f.read(mac_bytes)
def read(self, n=None):
if not self.f:
return ''
if n is None:
tmp = self.ahead + self.f.read()
else:
tmp = self.ahead + self.f.read(n)
ct = tmp[:-self.mac_bytes]
self.ahead = tmp[-self.mac_bytes:]
self.h.update(ct)
pt = self.cipher.decrypt(ct)
if n is None or len(ct) < n:
if self.h.digest()[:self.mac_bytes] != self.ahead:
raise IntegrityError
self.f = None
return pt
# The main Curve objects
# #########################################################
class Curve(object):
""" Represents a Elliptic Curve """
@staticmethod
def by_name_substring(substring):
substring = substring.lower()
candidates = []
for raw_curve in RAW_CURVES:
if substring in raw_curve[0]:
candidates.append(raw_curve)
if len(candidates) != 1:
raise KeyError
return Curve(candidates[0])
@staticmethod
def by_name(name):
for raw_curve in RAW_CURVES:
if raw_curve[0] == name:
return Curve(raw_curve)
raise KeyError
@staticmethod
def by_pk_len(pk_len):
for raw_curve in RAW_CURVES:
if raw_curve[8] == pk_len:
return Curve(raw_curve)
raise KeyError
def __init__(self, raw_curve_params):
""" Initialize a new curve from raw curve parameters.
Use `Curve.by_pk_len' instead """
r = raw_curve_parameters(*raw_curve_params)
# Store domain parameters
self.name = r.name
self.a = deserialize_number(binascii.unhexlify(r.a), SER_BINARY)
self.b = deserialize_number(binascii.unhexlify(r.b), SER_BINARY)
self.m = deserialize_number(binascii.unhexlify(r.m), SER_BINARY)
self.order = deserialize_number(
binascii.unhexlify(r.order), SER_BINARY)
self.base = AffinePoint(
curve=self, x=deserialize_number(
binascii.unhexlify(
r.base_x), SER_BINARY), y=deserialize_number(
binascii.unhexlify(
r.base_y), SER_BINARY))
self.cofactor = r.cofactor
# Calculate some other parameters
self.pk_len_bin = get_serialized_number_len(
(2 * self.m) - 1, SER_BINARY)
self.pk_len_compact = get_serialized_number_len(
(2 * self.m) - 1, SER_COMPACT)
assert self.pk_len_compact == r.pk_len_compact
self.sig_len_bin = get_serialized_number_len(
(self.order * self.order) - 1, SER_BINARY)
self.sig_len_compact = get_serialized_number_len(
(self.order * self.order) - 1, SER_COMPACT)
self.dh_len_bin = min((self.order.numdigits(2) // 2 + 7) // 8, 32)
self.dh_len_compact = get_serialized_number_len(
2 ** self.dh_len_bin - 1, SER_COMPACT)
self.elem_len_bin = get_serialized_number_len(self.m, SER_BINARY)
self.order_len_bin = get_serialized_number_len(self.order, SER_BINARY)
@property
def key_bytes(self):
""" The approximate number of bytes of information in a key. """
return self.pk_len_bin
def __repr__(self):
return "<Curve %s>" % self.name
def point_from_string(self, s, fmt=SER_BINARY):
x = deserialize_number(s, fmt)
yflag = x >= self.m
if yflag:
x = x - self.m
assert 0 < x and x <= self.m
return self._point_decompress(x, yflag)
def pubkey_from_string(self, s, fmt=SER_BINARY):
return PubKey(self.point_from_string(s, fmt))
def _point_decompress(self, x, yflag):
m = self.m
h = (x * x) % m
h = (h + self.a) % m
h = (h * x) % m
h = (h + self.b) % m
y = mod_root(h, m)
if y or not yflag:
if bool(y.getbit(0)) == yflag:
return AffinePoint(x=x, y=y, curve=self)
return AffinePoint(x=x, y=m - y, curve=self)
def hash_to_exponent(self, h):
""" Converts a 32 byte hash to an exponent """
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cipher = Crypto.Cipher.AES.new(h,
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
buf = cipher.encrypt(b'\0' * self.order_len_bin)
return self._buf_to_exponent(buf)
def _buf_to_exponent(self, buf):
a = deserialize_number(buf, SER_BINARY)
a = (a % (self.order - 1)) + 1
return a
def passphrase_to_pubkey(self, passphrase):
return PubKey(self.base * self.passphrase_to_privkey(passphrase).e)
def passphrase_to_privkey(self, passphrase):
if isinstance(passphrase, six.text_type):
raise ValueError(
"Encode `passphrase` to a bytestring yourself to" +
" prevent problems with different default encodings")
h = _passphrase_to_hash(passphrase)
return PrivKey(self.hash_to_exponent(h), self)
@contextlib.contextmanager
def decrypt_from(self, f, privkey, mac_bytes=10):
ctx = DecryptionContext(self, f, privkey, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, privkey, mac_bytes=10):
instream = BytesIO(s)
with self.decrypt_from(instream, privkey, mac_bytes) as f:
return f.read()
# Helpers
# #########################################################
def _passphrase_to_hash(passphrase):
""" Converts a passphrase to a hash. """
return hashlib.sha256(passphrase).digest()
def encrypt(s, pk, pk_format=SER_COMPACT, mac_bytes=10, curve=None):
""" Encrypts `s' for public key `pk' """
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.encrypt(s, mac_bytes)
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):
""" Decrypts `s' with passphrase `passphrase' """
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.decrypt(s, mac_bytes)
def encrypt_file(in_path_or_file, out_path_or_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
""" Encrypts `in_file' to `out_file' for pubkey `pk' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_encrypt_file(in_file, out_file, pk, pk_format, mac_bytes, chunk_size,
curve)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def decrypt_file(in_path_or_file, out_path_or_file, passphrase,
curve='secp160r1', mac_bytes=10, chunk_size=4096):
""" Decrypts `in_file' to `out_file' with passphrase `passphrase' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_decrypt_file(in_file, out_file, passphrase, curve, mac_bytes,
chunk_size)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def _encrypt_file(in_file, out_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
with p.encrypt_to(out_file, mac_bytes) as encrypted_out:
while True:
buff = in_file.read(chunk_size)
if not buff:
break
encrypted_out.write(buff)
def _decrypt_file(in_file, out_file, passphrase, curve='secp160r1',
mac_bytes=10, chunk_size=4096):
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
with privkey.decrypt_from(in_file, mac_bytes) as decrypted_in:
while True:
buff = decrypted_in.read(chunk_size)
if not buff:
break
out_file.write(buff)
def verify(s, sig, pk, sig_format=SER_COMPACT, pk_format=SER_COMPACT,
curve=None):
""" Verifies that `sig' is a signature of pubkey `pk' for the
message `s'. """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.verify(hashlib.sha512(s).digest(), sig, sig_format)
def sign(s, passphrase, sig_format=SER_COMPACT, curve='secp160r1'):
""" Signs `s' with passphrase `passphrase' """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.sign(hashlib.sha512(s).digest(), sig_format)
def passphrase_to_pubkey(passphrase, curve='secp160r1'):
curve = Curve.by_name(curve)
return curve.passphrase_to_pubkey(passphrase)
def generate_keypair(curve='secp160r1', randfunc=None):
""" Convenience function to generate a random
new keypair (passphrase, pubkey). """
if randfunc is None:
randfunc = Crypto.Random.new().read
curve = Curve.by_name(curve)
raw_privkey = randfunc(curve.order_len_bin)
privkey = serialize_number(deserialize_number(raw_privkey), SER_COMPACT)
pubkey = str(passphrase_to_pubkey(privkey))
return (privkey, pubkey)
|
bwesterb/py-seccure
|
src/__init__.py
|
encrypt
|
python
|
def encrypt(s, pk, pk_format=SER_COMPACT, mac_bytes=10, curve=None):
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.encrypt(s, mac_bytes)
|
Encrypts `s' for public key `pk'
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L903-L908
|
[
"def encrypt(self, s, mac_bytes=10):\n \"\"\" Encrypt `s' for this pubkey. \"\"\"\n if isinstance(s, six.text_type):\n raise ValueError(\n \"Encode `s` to a bytestring yourself to\" +\n \" prevent problems with different default encodings\")\n out = BytesIO()\n with self.encrypt_to(out, mac_bytes) as f:\n f.write(s)\n return out.getvalue()\n",
"def by_name(name):\n for raw_curve in RAW_CURVES:\n if raw_curve[0] == name:\n return Curve(raw_curve)\n raise KeyError\n",
"def by_pk_len(pk_len):\n for raw_curve in RAW_CURVES:\n if raw_curve[8] == pk_len:\n return Curve(raw_curve)\n raise KeyError\n",
"def pubkey_from_string(self, s, fmt=SER_BINARY):\n return PubKey(self.point_from_string(s, fmt))\n"
] |
""" Elliptic Curve cryptography compatible with SECCURE:
http://point-at-infinity.org/seccure/ """
import hmac
import hashlib
import logging
import binascii
import contextlib
import collections
from ._version import __version__ # noqa: F401
# PyCrypto
import Crypto.Util
import Crypto.Cipher.AES
import Crypto.Random.random
# gmpy
import gmpy
# six
import six
# TODO replace with six.byte2int, when it is released
if six.PY3:
from io import BytesIO as BytesIO
def byte2int(b): return b
def stringlike(x): return isinstance(x, (str, bytes))
else:
from cStringIO import StringIO as BytesIO
def byte2int(b): return ord(b)
def stringlike(x): return isinstance(x, basestring)
l = logging.getLogger(__name__)
class IntegrityError(ValueError):
pass
# Serialization of numbers
# #########################################################
SER_COMPACT = 0
SER_BINARY = 1
COMPACT_DIGITS = (b'!#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'[]^_abcdefghijklmnopqrstuvwxyz{|}~')
R_COMPACT_DIGITS = {} # TODO is a tuple/list faster?
for i, c in enumerate(COMPACT_DIGITS):
R_COMPACT_DIGITS[c] = i
def serialize_number(x, fmt=SER_BINARY, outlen=None):
""" Serializes `x' to a string of length `outlen' in format `fmt' """
ret = b''
if fmt == SER_BINARY:
while x:
x, r = divmod(x, 256)
ret = six.int2byte(int(r)) + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, b'\0')
return ret
assert fmt == SER_COMPACT
while x:
x, r = divmod(x, len(COMPACT_DIGITS))
ret = COMPACT_DIGITS[r:r + 1] + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])
return ret
def deserialize_number(s, fmt=SER_BINARY):
""" Deserializes a number from a string `s' in format `fmt' """
ret = gmpy.mpz(0)
if fmt == SER_BINARY:
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
for c in s:
ret *= 256
ret += byte2int(c)
return ret
assert fmt == SER_COMPACT
if isinstance(s, six.text_type):
s = s.encode('ascii')
for c in s:
ret *= len(COMPACT_DIGITS)
ret += R_COMPACT_DIGITS[c]
return ret
def get_serialized_number_len(x, fmt=SER_BINARY):
if fmt == SER_BINARY:
return (x.numdigits(2) + 7) // 8
assert fmt == SER_COMPACT
res = 0
while x != 0:
x = x // len(COMPACT_DIGITS)
res += 1
return res
# Some modular arithmetic
# #########################################################
def mod_issquare(a, p):
""" Returns whether `a' is a square modulo p """
if not a:
return True
p1 = p // 2
p2 = pow(a, p1, p)
return p2 == 1
def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x
# Raw curve parameters
# #########################################################
raw_curve_parameters = collections.namedtuple(
'raw_curve_parameters',
('name',
'a',
'b',
'm',
'base_x',
'base_y',
'order',
'cofactor',
'pk_len_compact'))
RAW_CURVES = (
("secp112r1",
b"db7c2abf62e35e668076bead2088",
b"659ef8ba043916eede8911702b22",
b"db7c2abf62e35e668076bead208b",
b"09487239995a5ee76b55f9c2f098",
b"a89ce5af8724c0a23e0e0ff77500",
b"db7c2abf62e35e7628dfac6561c5", 1, 18),
("secp128r1",
b"fffffffdfffffffffffffffffffffffc",
b"e87579c11079f43dd824993c2cee5ed3",
b"fffffffdffffffffffffffffffffffff",
b"161ff7528b899b2d0c28607ca52c5b86",
b"cf5ac8395bafeb13c02da292dded7a83",
b"fffffffe0000000075a30d1b9038a115", 1, 20),
("secp160r1",
b"ffffffffffffffffffffffffffffffff7ffffffc",
b"1c97befc54bd7a8b65acf89f81d4d4adc565fa45",
b"ffffffffffffffffffffffffffffffff7fffffff",
b"4a96b5688ef573284664698968c38bb913cbfc82",
b"23a628553168947d59dcc912042351377ac5fb32",
b"0100000000000000000001f4c8f927aed3ca752257", 1, 25),
("secp192r1/nistp192",
b"fffffffffffffffffffffffffffffffefffffffffffffffc",
b"64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1",
b"fffffffffffffffffffffffffffffffeffffffffffffffff",
b"188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012",
b"07192b95ffc8da78631011ed6b24cdd573f977a11e794811",
b"ffffffffffffffffffffffff99def836146bc9b1b4d22831", 1, 30),
("secp224r1/nistp224",
b"fffffffffffffffffffffffffffffffefffffffffffffffffffffffe",
b"b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4",
b"ffffffffffffffffffffffffffffffff000000000000000000000001",
b"b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
b"bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34",
b"ffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", 1, 35),
("secp256r1/nistp256",
b"ffffffff00000001000000000000000000000000fffffffffffffffffffffffc",
b"5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b",
b"ffffffff00000001000000000000000000000000ffffffffffffffffffffffff",
b"6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
b"4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5",
b"ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
1, 40),
("secp384r1/nistp384",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000fffffffc",
b"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875a"
b"c656398d8a2ed19d2a85c8edd3ec2aef",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000ffffffff",
b"aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a38"
b"5502f25dbf55296c3a545e3872760ab7",
b"3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c0"
b"0a60b1ce1d7e819d7a431d7c90ea0e5f",
b"ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf"
b"581a0db248b0a77aecec196accc52973", 1, 60),
("secp521r1/nistp521",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffffffc",
b"0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef1"
b"09e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd4"
b"6b503f00",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffff",
b"00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d"
b"3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31"
b"c2e5bd66",
b"011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e"
b"662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be9476"
b"9fd16650",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e"
b"91386409", 1, 81),
("brainpoolp160r1",
b"340e7be2a280eb74e2be61bada745d97e8f7c300",
b"1e589a8595423412134faa2dbdec95c8d8675e58",
b"e95e4a5f737059dc60dfc7ad95b3d8139515620f",
b"bed5af16ea3f6a4f62938c4631eb5af7bdbcdbc3",
b"1667cb477a1a8ec338f94741669c976316da6321",
b"e95e4a5f737059dc60df5991d45029409e60fc09", 1, 25),
("brainpoolp192r1",
b"6a91174076b1e0e19c39c031fe8685c1cae040e5c69a28ef",
b"469a28ef7c28cca3dc721d044f4496bcca7ef4146fbf25c9",
b"c302f41d932a36cda7a3463093d18db78fce476de1a86297",
b"c0a0647eaab6a48753b033c56cb0f0900a2f5c4853375fd6",
b"14b690866abd5bb88b5f4828c1490002e6773fa2fa299b8f",
b"c302f41d932a36cda7a3462f9e9e916b5be8f1029ac4acc1", 1, 30),
("brainpoolp224r1",
b"68a5e62ca9ce6c1c299803a6c1530b514e182ad8b0042a59cad29f43",
b"2580f63ccfe44138870713b1a92369e33e2135d266dbb372386c400b",
b"d7c134aa264366862a18302575d1d787b09f075797da89f57ec8c0ff",
b"0d9029ad2c7e5cf4340823b2a87dc68c9e4ce3174c1e6efdee12c07d",
b"58aa56f772c0726f24c6b89e4ecdac24354b9e99caa3f6d3761402cd",
b"d7c134aa264366862a18302575d0fb98d116bc4b6ddebca3a5a7939f",
1, 35),
("brainpoolp256r1",
b"7d5a0975fc2c3057eef67530417affe7fb8055c126dc5c6ce94a4b44f330b5d9",
b"26dc5c6ce94a4b44f330b5d9bbd77cbf958416295cf7e1ce6bccdc18ff8c07b6",
b"a9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5377",
b"8bd2aeb9cb7e57cb2c4b482ffc81b7afb9de27e1e3bd23c23a4453bd9ace3262",
b"547ef835c3dac4fd97f8461a14611dc9c27745132ded8e545c1d54c72f046997",
b"a9fb57dba1eea9bc3e660a909d838d718c397aa3b561a6f7901e0e82974856a7",
1, 40),
("brainpoolp320r1",
b"3ee30b568fbab0f883ccebd46d3f3bb8a2a73513f5eb79da66190eb085ffa9f49"
b"2f375a97d860eb4",
b"520883949dfdbc42d3ad198640688a6fe13f41349554b49acc31dccd884539816"
b"f5eb4ac8fb1f1a6",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa6f6f40def4f92b9ec7893ec28f"
b"cd412b1f1b32e27",
b"43bd7e9afb53d8b85289bcc48ee5bfe6f20137d10a087eb6e7871e2a10a599c71"
b"0af8d0d39e20611",
b"14fdd05545ec1cc8ab4093247f77275e0743ffed117182eaa9c77877aaac6ac7d"
b"35245d1692e8ee1",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa5b68f12a32d482ec7ee8658e98"
b"691555b44c59311", 1, 50),
("brainpoolp384r1",
b"7bc382c63d8c150c3c72080ace05afa0c2bea28e4fb22787139165efba91f90f8"
b"aa5814a503ad4eb04a8c7dd22ce2826",
b"04a8c7dd22ce28268b39b55416f0447c2fb77de107dcd2a62e880ea53eeb62d57"
b"cb4390295dbc9943ab78696fa504c11",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b412b1da197fb71123a"
b"cd3a729901d1a71874700133107ec53",
b"1d1c64f068cf45ffa2a63a81b7c13f6b8847a3e77ef14fe3db7fcafe0cbd10e8e"
b"826e03436d646aaef87b2e247d4af1e",
b"8abe1d7520f9c2a45cb1eb8e95cfd55262b70b29feec5864e19c054ff99129280"
b"e4646217791811142820341263c5315",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b31f166e6cac0425a7c"
b"f3ab6af6b7fc3103b883202e9046565", 1, 60),
("brainpoolp512r1",
b"7830a3318b603b89e2327145ac234cc594cbdd8d3df91610a83441caea9863bc2"
b"ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72bf2c7b9e7c1ac4d77fc94"
b"ca",
b"3df91610a83441caea9863bc2ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72"
b"bf2c7b9e7c1ac4d77fc94cadc083e67984050b75ebae5dd2809bd638016f7"
b"23",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308717"
b"d4d9b009bc66842aecda12ae6a380e62881ff2f2d82c68528aa6056583a48"
b"f3",
b"81aee4bdd82ed9645a21322e9c4c6a9385ed9f70b5d916c1b43b62eef4d0098ef"
b"f3b1f78e2d0d48d50d1687b93b97d5f7c6d5047406a5e688b352209bcb9f8"
b"22",
b"7dde385d566332ecc0eabfa9cf7822fdf209f70024a57b1aa000c55b881f8111b"
b"2dcde494a5f485e5bca4bd88a2763aed1ca2b2fa8f0540678cd1e0f3ad808"
b"92",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308705"
b"53e5c414ca92619418661197fac10471db1d381085ddaddb58796829ca900"
b"69",
1, 79),
)
curves = [r[0] for r in RAW_CURVES]
# Arithmetic on elliptic curves
# #########################################################
class JacobianPoint(object):
def __init__(self, x, y, z, curve):
self.x = x
self.y = y
self.z = z
self.curve = curve
def to_affine(self):
if self.z == 0:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
h = gmpy.invert(self.z, m)
y = (h * h) % m
x = (self.x * y) % m
y = (y * h) % m
y = (y * self.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def double(self):
if not self.z:
return self
if not self.y:
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t1 = (self.x * self.x) % m
t2 = (t1 + t1) % m
t2 = (t2 + t1) % m
t1 = (self.z * self.z) % m
t1 = (t1 * t1) % m
t1 = (t1 * a) % m
t1 = (t1 + t2) % m
z = (self.z * self.y) % m
z = (z + z) % m
y = (self.y * self.y) % m
y = (y + y) % m
t2 = (self.x * y) % m
t2 = (t2 + t2) % m
x = (t1 * t1) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
t1 = (t1 * t2) % m
t2 = (y * y) % m
t2 = (t2 + t2) % m
y = (t1 - t2) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self.z:
return other.to_jacobian()
m = self.curve.m
t1 = (self.z * self.z) % m
t2 = (t1 * other.x) % m
t1 = (t1 * self.z) % m
t1 = (t1 * other.y) % m
if self.x == t2:
if self.y == t1:
return self.double()
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
x = (self.x - t2) % m
y = (self.y - t1) % m
z = (self.z * x) % m
t3 = (x * x) % m
t2 = (t2 * t3) % m
t3 = (t3 * x) % m
t1 = (t1 * t3) % m
x = (y * y) % m
x = (x - t3) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
y = (y * t2) % m
y = (y - t1) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __repr__(self):
return "<JacobianPoint (%s, %s, %s) of %s>" % (
self.x, self.y, self.z, self.curve.name)
class AffinePoint(object):
def __init__(self, x, y, curve):
self.x = x
self.y = y
self.curve = curve
@property
def on_curve(self):
if not self:
return True
m = self.curve.m
a = self.curve.a
b = self.curve.b
h1 = (self.x * self.x) % m
h1 = (h1 + a) % m
h1 = (h1 * self.x) % m
h1 = (h1 + b) % m
h2 = (self.y * self.y) % m
return h1 == h2
def to_jacobian(self):
if not self:
return JacobianPoint(x=0, y=0, z=0, curve=self.curve)
return JacobianPoint(x=self.x, y=self.y, z=1, curve=self.curve)
def double(self):
if not self.y:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t2 = (self.x * self.x) % m
t1 = (t2 + t2) % m
t1 = (t1 + t2) % m
t1 = (t1 + a) % m
t2 = (self.y + self.y) % m
t2 = gmpy.invert(t2, m)
t1 = (t1 * t2) % m
t2 = (t1 * t1) % m
t2 = (t2 - self.x) % m
t2 = (t2 - self.x) % m
x = (self.x - t2) % m
t1 = (t1 * x) % m
y = (t1 - self.y) % m
x = t2
return AffinePoint(x=x, y=y, curve=self.curve)
def __mul__(self, exp):
n = exp.numdigits(2)
r = JacobianPoint(x=0, y=0, z=0, curve=self.curve)
while n:
r = r.double()
n -= 1
if exp.getbit(n):
r = r + self
R = r.to_affine()
assert R.on_curve
return R
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self:
return other
if self.x == other.x:
if self.y == other.y:
return self.double()
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
t = (self.y - other.y) % m
y = (self.x - other.x) % m
y = gmpy.invert(y, m)
y = (t * y) % m
t = (y * y) % m
x = (self.x + other.x) % m
x = (t - x) % m
t = (other.x - x) % m
y = (y * t) % m
y = (y - other.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def __nonzero__(self):
return bool(self.x or self.y)
__bool__ = __nonzero__
def __repr__(self):
return "<AffinePoint (%s, %s) of %s>" % (
self.x, self.y, self.curve.name)
def __eq__(self, other):
if not isinstance(other, AffinePoint):
return False
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.to_string(SER_COMPACT)
def to_bytes(self, fmt=SER_BINARY):
outlen = (self.curve.pk_len_compact if fmt == SER_COMPACT
else self.curve.pk_len_bin)
if self._point_compress():
return serialize_number(self.x + self.curve.m, fmt, outlen)
return serialize_number(self.x, fmt, outlen)
def to_string(self, fmt=SER_BINARY):
return self.to_bytes(fmt).decode()
def _point_compress(self):
return self.y.getbit(0) == 1
def _ECIES_KDF(self, R):
h = hashlib.sha512()
h.update(serialize_number(self.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.y, SER_BINARY, self.curve.elem_len_bin))
return h.digest()
def _ECIES_encryption(self):
while True:
k = gmpy.mpz(
Crypto.Random.random.randrange(
0, int(
self.curve.order - 1)))
R = self.curve.base * k
k = k * self.curve.cofactor
Z = self * k
if Z:
break
return (Z._ECIES_KDF(R), R)
def _ECIES_decryption(self, d):
if isinstance(d, PrivKey):
d = d.e
e = d * self.curve.cofactor
if not self.valid_embedded_key:
raise ValueError
Z = self * e
if not Z:
raise ValueError
return Z._ECIES_KDF(self)
def _ECDSA_verify(self, md, sig):
order = self.curve.order
s, r = divmod(sig, order)
if s <= 0 or order <= s or r <= 0 or order <= r:
return False
e = deserialize_number(md, SER_BINARY) % order
s = gmpy.invert(s, order)
e = (e * s) % order
X1 = self.curve.base * e
e = (r * s) % order
X2 = self * e
X1 = X1 + X2
if not X1:
return False
s = X1.x % order
return s == r
@property
def valid_embedded_key(self):
if (self.x < 0 or self.x >= self.curve.m or self.y < 0 or
self.y > self.curve.m):
return False
if not self:
return False
if not self.on_curve:
return False
return True
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
def verify(self, h, sig, sig_fmt=SER_BINARY):
""" Verifies that `sig' is a signature for a message with
SHA-512 hash `h'. """
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
@contextlib.contextmanager
def encrypt_to(self, f, mac_bytes=10):
""" Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. """
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
class PrivKey(object):
""" A secret exponent """
def __init__(self, e, curve):
self.e = e
self.curve = curve
@contextlib.contextmanager
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError("s should be bytes")
instream = BytesIO(s)
with self.decrypt_from(instream, mac_bytes) as f:
return f.read()
def sign(self, h, sig_format=SER_BINARY):
""" Signs the message with SHA-512 hash `h' with this private key. """
outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT
else self.curve.sig_len_bin)
sig = self._ECDSA_sign(h)
return serialize_number(sig, sig_format, outlen)
def __repr__(self):
return "<PrivKey %s>" % self.e
def __str__(self):
return str(self.e)
def _ECDSA_sign(self, md):
# Get the pseudo-random exponent from the messagedigest
# and the private key.
order = self.curve.order
hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)
h = hmac.new(hmk, digestmod=hashlib.sha256)
h.update(md)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cprng = Crypto.Cipher.AES.new(h.digest(),
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
r = 0
s = 0
while s == 0:
while r == 0:
buf = cprng.encrypt(b'\0' * self.curve.order_len_bin)
k = self.curve._buf_to_exponent(buf)
p1 = self.curve.base * k
r = p1.x % order
e = deserialize_number(md, SER_BINARY)
e = (e % order)
s = (self.e * r) % order
s = (s + e) % order
e = gmpy.invert(k, order)
s = (s * e) % order
s = s * order
s = s + r
return s
# Encryption and decryption contexts
# #########################################################
class EncryptionContext(object):
""" Holds state of encryption. Use AffinePoint.encrypt_to """
def __init__(self, f, p, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
key, R = p._ECIES_encryption()
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
f.write(R.to_bytes(SER_BINARY))
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
def write(self, s):
if not self.f:
raise IOError("closed")
ct = self.cipher.encrypt(s)
self.f.write(ct)
self.h.update(ct)
def finish(self):
if not self.f:
raise IOError("closed")
self.f.write(self.h.digest()[:self.mac_bytes])
self.f = None
class DecryptionContext(object):
""" Holds state of decryption. Use Curve.decrypt_from """
def __init__(self, curve, f, privkey, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
R = curve.point_from_string(f.read(curve.pk_len_bin), SER_BINARY)
key = R._ECIES_decryption(privkey)
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
self.ahead = f.read(mac_bytes)
def read(self, n=None):
if not self.f:
return ''
if n is None:
tmp = self.ahead + self.f.read()
else:
tmp = self.ahead + self.f.read(n)
ct = tmp[:-self.mac_bytes]
self.ahead = tmp[-self.mac_bytes:]
self.h.update(ct)
pt = self.cipher.decrypt(ct)
if n is None or len(ct) < n:
if self.h.digest()[:self.mac_bytes] != self.ahead:
raise IntegrityError
self.f = None
return pt
# The main Curve objects
# #########################################################
class Curve(object):
""" Represents a Elliptic Curve """
@staticmethod
def by_name_substring(substring):
substring = substring.lower()
candidates = []
for raw_curve in RAW_CURVES:
if substring in raw_curve[0]:
candidates.append(raw_curve)
if len(candidates) != 1:
raise KeyError
return Curve(candidates[0])
@staticmethod
def by_name(name):
for raw_curve in RAW_CURVES:
if raw_curve[0] == name:
return Curve(raw_curve)
raise KeyError
@staticmethod
def by_pk_len(pk_len):
for raw_curve in RAW_CURVES:
if raw_curve[8] == pk_len:
return Curve(raw_curve)
raise KeyError
def __init__(self, raw_curve_params):
""" Initialize a new curve from raw curve parameters.
Use `Curve.by_pk_len' instead """
r = raw_curve_parameters(*raw_curve_params)
# Store domain parameters
self.name = r.name
self.a = deserialize_number(binascii.unhexlify(r.a), SER_BINARY)
self.b = deserialize_number(binascii.unhexlify(r.b), SER_BINARY)
self.m = deserialize_number(binascii.unhexlify(r.m), SER_BINARY)
self.order = deserialize_number(
binascii.unhexlify(r.order), SER_BINARY)
self.base = AffinePoint(
curve=self, x=deserialize_number(
binascii.unhexlify(
r.base_x), SER_BINARY), y=deserialize_number(
binascii.unhexlify(
r.base_y), SER_BINARY))
self.cofactor = r.cofactor
# Calculate some other parameters
self.pk_len_bin = get_serialized_number_len(
(2 * self.m) - 1, SER_BINARY)
self.pk_len_compact = get_serialized_number_len(
(2 * self.m) - 1, SER_COMPACT)
assert self.pk_len_compact == r.pk_len_compact
self.sig_len_bin = get_serialized_number_len(
(self.order * self.order) - 1, SER_BINARY)
self.sig_len_compact = get_serialized_number_len(
(self.order * self.order) - 1, SER_COMPACT)
self.dh_len_bin = min((self.order.numdigits(2) // 2 + 7) // 8, 32)
self.dh_len_compact = get_serialized_number_len(
2 ** self.dh_len_bin - 1, SER_COMPACT)
self.elem_len_bin = get_serialized_number_len(self.m, SER_BINARY)
self.order_len_bin = get_serialized_number_len(self.order, SER_BINARY)
@property
def key_bytes(self):
""" The approximate number of bytes of information in a key. """
return self.pk_len_bin
def __repr__(self):
return "<Curve %s>" % self.name
def point_from_string(self, s, fmt=SER_BINARY):
x = deserialize_number(s, fmt)
yflag = x >= self.m
if yflag:
x = x - self.m
assert 0 < x and x <= self.m
return self._point_decompress(x, yflag)
def pubkey_from_string(self, s, fmt=SER_BINARY):
return PubKey(self.point_from_string(s, fmt))
def _point_decompress(self, x, yflag):
m = self.m
h = (x * x) % m
h = (h + self.a) % m
h = (h * x) % m
h = (h + self.b) % m
y = mod_root(h, m)
if y or not yflag:
if bool(y.getbit(0)) == yflag:
return AffinePoint(x=x, y=y, curve=self)
return AffinePoint(x=x, y=m - y, curve=self)
def hash_to_exponent(self, h):
""" Converts a 32 byte hash to an exponent """
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cipher = Crypto.Cipher.AES.new(h,
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
buf = cipher.encrypt(b'\0' * self.order_len_bin)
return self._buf_to_exponent(buf)
def _buf_to_exponent(self, buf):
a = deserialize_number(buf, SER_BINARY)
a = (a % (self.order - 1)) + 1
return a
def passphrase_to_pubkey(self, passphrase):
return PubKey(self.base * self.passphrase_to_privkey(passphrase).e)
def passphrase_to_privkey(self, passphrase):
if isinstance(passphrase, six.text_type):
raise ValueError(
"Encode `passphrase` to a bytestring yourself to" +
" prevent problems with different default encodings")
h = _passphrase_to_hash(passphrase)
return PrivKey(self.hash_to_exponent(h), self)
@contextlib.contextmanager
def decrypt_from(self, f, privkey, mac_bytes=10):
ctx = DecryptionContext(self, f, privkey, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, privkey, mac_bytes=10):
instream = BytesIO(s)
with self.decrypt_from(instream, privkey, mac_bytes) as f:
return f.read()
# Helpers
# #########################################################
def _passphrase_to_hash(passphrase):
""" Converts a passphrase to a hash. """
return hashlib.sha256(passphrase).digest()
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):
""" Decrypts `s' with passphrase `passphrase' """
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.decrypt(s, mac_bytes)
def encrypt_file(in_path_or_file, out_path_or_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
""" Encrypts `in_file' to `out_file' for pubkey `pk' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_encrypt_file(in_file, out_file, pk, pk_format, mac_bytes, chunk_size,
curve)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def decrypt_file(in_path_or_file, out_path_or_file, passphrase,
curve='secp160r1', mac_bytes=10, chunk_size=4096):
""" Decrypts `in_file' to `out_file' with passphrase `passphrase' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_decrypt_file(in_file, out_file, passphrase, curve, mac_bytes,
chunk_size)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def _encrypt_file(in_file, out_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
with p.encrypt_to(out_file, mac_bytes) as encrypted_out:
while True:
buff = in_file.read(chunk_size)
if not buff:
break
encrypted_out.write(buff)
def _decrypt_file(in_file, out_file, passphrase, curve='secp160r1',
mac_bytes=10, chunk_size=4096):
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
with privkey.decrypt_from(in_file, mac_bytes) as decrypted_in:
while True:
buff = decrypted_in.read(chunk_size)
if not buff:
break
out_file.write(buff)
def verify(s, sig, pk, sig_format=SER_COMPACT, pk_format=SER_COMPACT,
curve=None):
""" Verifies that `sig' is a signature of pubkey `pk' for the
message `s'. """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.verify(hashlib.sha512(s).digest(), sig, sig_format)
def sign(s, passphrase, sig_format=SER_COMPACT, curve='secp160r1'):
""" Signs `s' with passphrase `passphrase' """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.sign(hashlib.sha512(s).digest(), sig_format)
def passphrase_to_pubkey(passphrase, curve='secp160r1'):
curve = Curve.by_name(curve)
return curve.passphrase_to_pubkey(passphrase)
def generate_keypair(curve='secp160r1', randfunc=None):
""" Convenience function to generate a random
new keypair (passphrase, pubkey). """
if randfunc is None:
randfunc = Crypto.Random.new().read
curve = Curve.by_name(curve)
raw_privkey = randfunc(curve.order_len_bin)
privkey = serialize_number(deserialize_number(raw_privkey), SER_COMPACT)
pubkey = str(passphrase_to_pubkey(privkey))
return (privkey, pubkey)
|
bwesterb/py-seccure
|
src/__init__.py
|
decrypt
|
python
|
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.decrypt(s, mac_bytes)
|
Decrypts `s' with passphrase `passphrase'
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L911-L915
|
[
"def decrypt(self, s, mac_bytes=10):\n if isinstance(s, six.text_type):\n raise ValueError(\"s should be bytes\")\n instream = BytesIO(s)\n with self.decrypt_from(instream, mac_bytes) as f:\n return f.read()\n",
"def by_name(name):\n for raw_curve in RAW_CURVES:\n if raw_curve[0] == name:\n return Curve(raw_curve)\n raise KeyError\n",
"def passphrase_to_privkey(self, passphrase):\n if isinstance(passphrase, six.text_type):\n raise ValueError(\n \"Encode `passphrase` to a bytestring yourself to\" +\n \" prevent problems with different default encodings\")\n h = _passphrase_to_hash(passphrase)\n return PrivKey(self.hash_to_exponent(h), self)\n"
] |
""" Elliptic Curve cryptography compatible with SECCURE:
http://point-at-infinity.org/seccure/ """
import hmac
import hashlib
import logging
import binascii
import contextlib
import collections
from ._version import __version__ # noqa: F401
# PyCrypto
import Crypto.Util
import Crypto.Cipher.AES
import Crypto.Random.random
# gmpy
import gmpy
# six
import six
# TODO replace with six.byte2int, when it is released
if six.PY3:
from io import BytesIO as BytesIO
def byte2int(b): return b
def stringlike(x): return isinstance(x, (str, bytes))
else:
from cStringIO import StringIO as BytesIO
def byte2int(b): return ord(b)
def stringlike(x): return isinstance(x, basestring)
l = logging.getLogger(__name__)
class IntegrityError(ValueError):
pass
# Serialization of numbers
# #########################################################
SER_COMPACT = 0
SER_BINARY = 1
COMPACT_DIGITS = (b'!#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'[]^_abcdefghijklmnopqrstuvwxyz{|}~')
R_COMPACT_DIGITS = {} # TODO is a tuple/list faster?
for i, c in enumerate(COMPACT_DIGITS):
R_COMPACT_DIGITS[c] = i
def serialize_number(x, fmt=SER_BINARY, outlen=None):
""" Serializes `x' to a string of length `outlen' in format `fmt' """
ret = b''
if fmt == SER_BINARY:
while x:
x, r = divmod(x, 256)
ret = six.int2byte(int(r)) + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, b'\0')
return ret
assert fmt == SER_COMPACT
while x:
x, r = divmod(x, len(COMPACT_DIGITS))
ret = COMPACT_DIGITS[r:r + 1] + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])
return ret
def deserialize_number(s, fmt=SER_BINARY):
""" Deserializes a number from a string `s' in format `fmt' """
ret = gmpy.mpz(0)
if fmt == SER_BINARY:
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
for c in s:
ret *= 256
ret += byte2int(c)
return ret
assert fmt == SER_COMPACT
if isinstance(s, six.text_type):
s = s.encode('ascii')
for c in s:
ret *= len(COMPACT_DIGITS)
ret += R_COMPACT_DIGITS[c]
return ret
def get_serialized_number_len(x, fmt=SER_BINARY):
if fmt == SER_BINARY:
return (x.numdigits(2) + 7) // 8
assert fmt == SER_COMPACT
res = 0
while x != 0:
x = x // len(COMPACT_DIGITS)
res += 1
return res
# Some modular arithmetic
# #########################################################
def mod_issquare(a, p):
""" Returns whether `a' is a square modulo p """
if not a:
return True
p1 = p // 2
p2 = pow(a, p1, p)
return p2 == 1
def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x
# Raw curve parameters
# #########################################################
raw_curve_parameters = collections.namedtuple(
'raw_curve_parameters',
('name',
'a',
'b',
'm',
'base_x',
'base_y',
'order',
'cofactor',
'pk_len_compact'))
RAW_CURVES = (
("secp112r1",
b"db7c2abf62e35e668076bead2088",
b"659ef8ba043916eede8911702b22",
b"db7c2abf62e35e668076bead208b",
b"09487239995a5ee76b55f9c2f098",
b"a89ce5af8724c0a23e0e0ff77500",
b"db7c2abf62e35e7628dfac6561c5", 1, 18),
("secp128r1",
b"fffffffdfffffffffffffffffffffffc",
b"e87579c11079f43dd824993c2cee5ed3",
b"fffffffdffffffffffffffffffffffff",
b"161ff7528b899b2d0c28607ca52c5b86",
b"cf5ac8395bafeb13c02da292dded7a83",
b"fffffffe0000000075a30d1b9038a115", 1, 20),
("secp160r1",
b"ffffffffffffffffffffffffffffffff7ffffffc",
b"1c97befc54bd7a8b65acf89f81d4d4adc565fa45",
b"ffffffffffffffffffffffffffffffff7fffffff",
b"4a96b5688ef573284664698968c38bb913cbfc82",
b"23a628553168947d59dcc912042351377ac5fb32",
b"0100000000000000000001f4c8f927aed3ca752257", 1, 25),
("secp192r1/nistp192",
b"fffffffffffffffffffffffffffffffefffffffffffffffc",
b"64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1",
b"fffffffffffffffffffffffffffffffeffffffffffffffff",
b"188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012",
b"07192b95ffc8da78631011ed6b24cdd573f977a11e794811",
b"ffffffffffffffffffffffff99def836146bc9b1b4d22831", 1, 30),
("secp224r1/nistp224",
b"fffffffffffffffffffffffffffffffefffffffffffffffffffffffe",
b"b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4",
b"ffffffffffffffffffffffffffffffff000000000000000000000001",
b"b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
b"bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34",
b"ffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", 1, 35),
("secp256r1/nistp256",
b"ffffffff00000001000000000000000000000000fffffffffffffffffffffffc",
b"5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b",
b"ffffffff00000001000000000000000000000000ffffffffffffffffffffffff",
b"6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
b"4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5",
b"ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
1, 40),
("secp384r1/nistp384",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000fffffffc",
b"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875a"
b"c656398d8a2ed19d2a85c8edd3ec2aef",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000ffffffff",
b"aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a38"
b"5502f25dbf55296c3a545e3872760ab7",
b"3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c0"
b"0a60b1ce1d7e819d7a431d7c90ea0e5f",
b"ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf"
b"581a0db248b0a77aecec196accc52973", 1, 60),
("secp521r1/nistp521",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffffffc",
b"0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef1"
b"09e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd4"
b"6b503f00",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffff",
b"00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d"
b"3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31"
b"c2e5bd66",
b"011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e"
b"662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be9476"
b"9fd16650",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e"
b"91386409", 1, 81),
("brainpoolp160r1",
b"340e7be2a280eb74e2be61bada745d97e8f7c300",
b"1e589a8595423412134faa2dbdec95c8d8675e58",
b"e95e4a5f737059dc60dfc7ad95b3d8139515620f",
b"bed5af16ea3f6a4f62938c4631eb5af7bdbcdbc3",
b"1667cb477a1a8ec338f94741669c976316da6321",
b"e95e4a5f737059dc60df5991d45029409e60fc09", 1, 25),
("brainpoolp192r1",
b"6a91174076b1e0e19c39c031fe8685c1cae040e5c69a28ef",
b"469a28ef7c28cca3dc721d044f4496bcca7ef4146fbf25c9",
b"c302f41d932a36cda7a3463093d18db78fce476de1a86297",
b"c0a0647eaab6a48753b033c56cb0f0900a2f5c4853375fd6",
b"14b690866abd5bb88b5f4828c1490002e6773fa2fa299b8f",
b"c302f41d932a36cda7a3462f9e9e916b5be8f1029ac4acc1", 1, 30),
("brainpoolp224r1",
b"68a5e62ca9ce6c1c299803a6c1530b514e182ad8b0042a59cad29f43",
b"2580f63ccfe44138870713b1a92369e33e2135d266dbb372386c400b",
b"d7c134aa264366862a18302575d1d787b09f075797da89f57ec8c0ff",
b"0d9029ad2c7e5cf4340823b2a87dc68c9e4ce3174c1e6efdee12c07d",
b"58aa56f772c0726f24c6b89e4ecdac24354b9e99caa3f6d3761402cd",
b"d7c134aa264366862a18302575d0fb98d116bc4b6ddebca3a5a7939f",
1, 35),
("brainpoolp256r1",
b"7d5a0975fc2c3057eef67530417affe7fb8055c126dc5c6ce94a4b44f330b5d9",
b"26dc5c6ce94a4b44f330b5d9bbd77cbf958416295cf7e1ce6bccdc18ff8c07b6",
b"a9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5377",
b"8bd2aeb9cb7e57cb2c4b482ffc81b7afb9de27e1e3bd23c23a4453bd9ace3262",
b"547ef835c3dac4fd97f8461a14611dc9c27745132ded8e545c1d54c72f046997",
b"a9fb57dba1eea9bc3e660a909d838d718c397aa3b561a6f7901e0e82974856a7",
1, 40),
("brainpoolp320r1",
b"3ee30b568fbab0f883ccebd46d3f3bb8a2a73513f5eb79da66190eb085ffa9f49"
b"2f375a97d860eb4",
b"520883949dfdbc42d3ad198640688a6fe13f41349554b49acc31dccd884539816"
b"f5eb4ac8fb1f1a6",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa6f6f40def4f92b9ec7893ec28f"
b"cd412b1f1b32e27",
b"43bd7e9afb53d8b85289bcc48ee5bfe6f20137d10a087eb6e7871e2a10a599c71"
b"0af8d0d39e20611",
b"14fdd05545ec1cc8ab4093247f77275e0743ffed117182eaa9c77877aaac6ac7d"
b"35245d1692e8ee1",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa5b68f12a32d482ec7ee8658e98"
b"691555b44c59311", 1, 50),
("brainpoolp384r1",
b"7bc382c63d8c150c3c72080ace05afa0c2bea28e4fb22787139165efba91f90f8"
b"aa5814a503ad4eb04a8c7dd22ce2826",
b"04a8c7dd22ce28268b39b55416f0447c2fb77de107dcd2a62e880ea53eeb62d57"
b"cb4390295dbc9943ab78696fa504c11",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b412b1da197fb71123a"
b"cd3a729901d1a71874700133107ec53",
b"1d1c64f068cf45ffa2a63a81b7c13f6b8847a3e77ef14fe3db7fcafe0cbd10e8e"
b"826e03436d646aaef87b2e247d4af1e",
b"8abe1d7520f9c2a45cb1eb8e95cfd55262b70b29feec5864e19c054ff99129280"
b"e4646217791811142820341263c5315",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b31f166e6cac0425a7c"
b"f3ab6af6b7fc3103b883202e9046565", 1, 60),
("brainpoolp512r1",
b"7830a3318b603b89e2327145ac234cc594cbdd8d3df91610a83441caea9863bc2"
b"ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72bf2c7b9e7c1ac4d77fc94"
b"ca",
b"3df91610a83441caea9863bc2ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72"
b"bf2c7b9e7c1ac4d77fc94cadc083e67984050b75ebae5dd2809bd638016f7"
b"23",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308717"
b"d4d9b009bc66842aecda12ae6a380e62881ff2f2d82c68528aa6056583a48"
b"f3",
b"81aee4bdd82ed9645a21322e9c4c6a9385ed9f70b5d916c1b43b62eef4d0098ef"
b"f3b1f78e2d0d48d50d1687b93b97d5f7c6d5047406a5e688b352209bcb9f8"
b"22",
b"7dde385d566332ecc0eabfa9cf7822fdf209f70024a57b1aa000c55b881f8111b"
b"2dcde494a5f485e5bca4bd88a2763aed1ca2b2fa8f0540678cd1e0f3ad808"
b"92",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308705"
b"53e5c414ca92619418661197fac10471db1d381085ddaddb58796829ca900"
b"69",
1, 79),
)
curves = [r[0] for r in RAW_CURVES]
# Arithmetic on elliptic curves
# #########################################################
class JacobianPoint(object):
def __init__(self, x, y, z, curve):
self.x = x
self.y = y
self.z = z
self.curve = curve
def to_affine(self):
if self.z == 0:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
h = gmpy.invert(self.z, m)
y = (h * h) % m
x = (self.x * y) % m
y = (y * h) % m
y = (y * self.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def double(self):
if not self.z:
return self
if not self.y:
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t1 = (self.x * self.x) % m
t2 = (t1 + t1) % m
t2 = (t2 + t1) % m
t1 = (self.z * self.z) % m
t1 = (t1 * t1) % m
t1 = (t1 * a) % m
t1 = (t1 + t2) % m
z = (self.z * self.y) % m
z = (z + z) % m
y = (self.y * self.y) % m
y = (y + y) % m
t2 = (self.x * y) % m
t2 = (t2 + t2) % m
x = (t1 * t1) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
t1 = (t1 * t2) % m
t2 = (y * y) % m
t2 = (t2 + t2) % m
y = (t1 - t2) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self.z:
return other.to_jacobian()
m = self.curve.m
t1 = (self.z * self.z) % m
t2 = (t1 * other.x) % m
t1 = (t1 * self.z) % m
t1 = (t1 * other.y) % m
if self.x == t2:
if self.y == t1:
return self.double()
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
x = (self.x - t2) % m
y = (self.y - t1) % m
z = (self.z * x) % m
t3 = (x * x) % m
t2 = (t2 * t3) % m
t3 = (t3 * x) % m
t1 = (t1 * t3) % m
x = (y * y) % m
x = (x - t3) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
y = (y * t2) % m
y = (y - t1) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __repr__(self):
return "<JacobianPoint (%s, %s, %s) of %s>" % (
self.x, self.y, self.z, self.curve.name)
class AffinePoint(object):
def __init__(self, x, y, curve):
self.x = x
self.y = y
self.curve = curve
@property
def on_curve(self):
if not self:
return True
m = self.curve.m
a = self.curve.a
b = self.curve.b
h1 = (self.x * self.x) % m
h1 = (h1 + a) % m
h1 = (h1 * self.x) % m
h1 = (h1 + b) % m
h2 = (self.y * self.y) % m
return h1 == h2
def to_jacobian(self):
if not self:
return JacobianPoint(x=0, y=0, z=0, curve=self.curve)
return JacobianPoint(x=self.x, y=self.y, z=1, curve=self.curve)
def double(self):
if not self.y:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t2 = (self.x * self.x) % m
t1 = (t2 + t2) % m
t1 = (t1 + t2) % m
t1 = (t1 + a) % m
t2 = (self.y + self.y) % m
t2 = gmpy.invert(t2, m)
t1 = (t1 * t2) % m
t2 = (t1 * t1) % m
t2 = (t2 - self.x) % m
t2 = (t2 - self.x) % m
x = (self.x - t2) % m
t1 = (t1 * x) % m
y = (t1 - self.y) % m
x = t2
return AffinePoint(x=x, y=y, curve=self.curve)
def __mul__(self, exp):
n = exp.numdigits(2)
r = JacobianPoint(x=0, y=0, z=0, curve=self.curve)
while n:
r = r.double()
n -= 1
if exp.getbit(n):
r = r + self
R = r.to_affine()
assert R.on_curve
return R
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self:
return other
if self.x == other.x:
if self.y == other.y:
return self.double()
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
t = (self.y - other.y) % m
y = (self.x - other.x) % m
y = gmpy.invert(y, m)
y = (t * y) % m
t = (y * y) % m
x = (self.x + other.x) % m
x = (t - x) % m
t = (other.x - x) % m
y = (y * t) % m
y = (y - other.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def __nonzero__(self):
return bool(self.x or self.y)
__bool__ = __nonzero__
def __repr__(self):
return "<AffinePoint (%s, %s) of %s>" % (
self.x, self.y, self.curve.name)
def __eq__(self, other):
if not isinstance(other, AffinePoint):
return False
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.to_string(SER_COMPACT)
def to_bytes(self, fmt=SER_BINARY):
outlen = (self.curve.pk_len_compact if fmt == SER_COMPACT
else self.curve.pk_len_bin)
if self._point_compress():
return serialize_number(self.x + self.curve.m, fmt, outlen)
return serialize_number(self.x, fmt, outlen)
def to_string(self, fmt=SER_BINARY):
return self.to_bytes(fmt).decode()
def _point_compress(self):
return self.y.getbit(0) == 1
def _ECIES_KDF(self, R):
h = hashlib.sha512()
h.update(serialize_number(self.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.y, SER_BINARY, self.curve.elem_len_bin))
return h.digest()
def _ECIES_encryption(self):
while True:
k = gmpy.mpz(
Crypto.Random.random.randrange(
0, int(
self.curve.order - 1)))
R = self.curve.base * k
k = k * self.curve.cofactor
Z = self * k
if Z:
break
return (Z._ECIES_KDF(R), R)
def _ECIES_decryption(self, d):
if isinstance(d, PrivKey):
d = d.e
e = d * self.curve.cofactor
if not self.valid_embedded_key:
raise ValueError
Z = self * e
if not Z:
raise ValueError
return Z._ECIES_KDF(self)
def _ECDSA_verify(self, md, sig):
order = self.curve.order
s, r = divmod(sig, order)
if s <= 0 or order <= s or r <= 0 or order <= r:
return False
e = deserialize_number(md, SER_BINARY) % order
s = gmpy.invert(s, order)
e = (e * s) % order
X1 = self.curve.base * e
e = (r * s) % order
X2 = self * e
X1 = X1 + X2
if not X1:
return False
s = X1.x % order
return s == r
@property
def valid_embedded_key(self):
if (self.x < 0 or self.x >= self.curve.m or self.y < 0 or
self.y > self.curve.m):
return False
if not self:
return False
if not self.on_curve:
return False
return True
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
def verify(self, h, sig, sig_fmt=SER_BINARY):
""" Verifies that `sig' is a signature for a message with
SHA-512 hash `h'. """
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
@contextlib.contextmanager
def encrypt_to(self, f, mac_bytes=10):
""" Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. """
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
class PrivKey(object):
""" A secret exponent """
def __init__(self, e, curve):
self.e = e
self.curve = curve
@contextlib.contextmanager
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError("s should be bytes")
instream = BytesIO(s)
with self.decrypt_from(instream, mac_bytes) as f:
return f.read()
def sign(self, h, sig_format=SER_BINARY):
""" Signs the message with SHA-512 hash `h' with this private key. """
outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT
else self.curve.sig_len_bin)
sig = self._ECDSA_sign(h)
return serialize_number(sig, sig_format, outlen)
def __repr__(self):
return "<PrivKey %s>" % self.e
def __str__(self):
return str(self.e)
def _ECDSA_sign(self, md):
# Get the pseudo-random exponent from the messagedigest
# and the private key.
order = self.curve.order
hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)
h = hmac.new(hmk, digestmod=hashlib.sha256)
h.update(md)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cprng = Crypto.Cipher.AES.new(h.digest(),
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
r = 0
s = 0
while s == 0:
while r == 0:
buf = cprng.encrypt(b'\0' * self.curve.order_len_bin)
k = self.curve._buf_to_exponent(buf)
p1 = self.curve.base * k
r = p1.x % order
e = deserialize_number(md, SER_BINARY)
e = (e % order)
s = (self.e * r) % order
s = (s + e) % order
e = gmpy.invert(k, order)
s = (s * e) % order
s = s * order
s = s + r
return s
# Encryption and decryption contexts
# #########################################################
class EncryptionContext(object):
""" Holds state of encryption. Use AffinePoint.encrypt_to """
def __init__(self, f, p, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
key, R = p._ECIES_encryption()
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
f.write(R.to_bytes(SER_BINARY))
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
def write(self, s):
if not self.f:
raise IOError("closed")
ct = self.cipher.encrypt(s)
self.f.write(ct)
self.h.update(ct)
def finish(self):
if not self.f:
raise IOError("closed")
self.f.write(self.h.digest()[:self.mac_bytes])
self.f = None
class DecryptionContext(object):
""" Holds state of decryption. Use Curve.decrypt_from """
def __init__(self, curve, f, privkey, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
R = curve.point_from_string(f.read(curve.pk_len_bin), SER_BINARY)
key = R._ECIES_decryption(privkey)
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
self.ahead = f.read(mac_bytes)
def read(self, n=None):
if not self.f:
return ''
if n is None:
tmp = self.ahead + self.f.read()
else:
tmp = self.ahead + self.f.read(n)
ct = tmp[:-self.mac_bytes]
self.ahead = tmp[-self.mac_bytes:]
self.h.update(ct)
pt = self.cipher.decrypt(ct)
if n is None or len(ct) < n:
if self.h.digest()[:self.mac_bytes] != self.ahead:
raise IntegrityError
self.f = None
return pt
# The main Curve objects
# #########################################################
class Curve(object):
""" Represents a Elliptic Curve """
@staticmethod
def by_name_substring(substring):
substring = substring.lower()
candidates = []
for raw_curve in RAW_CURVES:
if substring in raw_curve[0]:
candidates.append(raw_curve)
if len(candidates) != 1:
raise KeyError
return Curve(candidates[0])
@staticmethod
def by_name(name):
for raw_curve in RAW_CURVES:
if raw_curve[0] == name:
return Curve(raw_curve)
raise KeyError
@staticmethod
def by_pk_len(pk_len):
for raw_curve in RAW_CURVES:
if raw_curve[8] == pk_len:
return Curve(raw_curve)
raise KeyError
def __init__(self, raw_curve_params):
""" Initialize a new curve from raw curve parameters.
Use `Curve.by_pk_len' instead """
r = raw_curve_parameters(*raw_curve_params)
# Store domain parameters
self.name = r.name
self.a = deserialize_number(binascii.unhexlify(r.a), SER_BINARY)
self.b = deserialize_number(binascii.unhexlify(r.b), SER_BINARY)
self.m = deserialize_number(binascii.unhexlify(r.m), SER_BINARY)
self.order = deserialize_number(
binascii.unhexlify(r.order), SER_BINARY)
self.base = AffinePoint(
curve=self, x=deserialize_number(
binascii.unhexlify(
r.base_x), SER_BINARY), y=deserialize_number(
binascii.unhexlify(
r.base_y), SER_BINARY))
self.cofactor = r.cofactor
# Calculate some other parameters
self.pk_len_bin = get_serialized_number_len(
(2 * self.m) - 1, SER_BINARY)
self.pk_len_compact = get_serialized_number_len(
(2 * self.m) - 1, SER_COMPACT)
assert self.pk_len_compact == r.pk_len_compact
self.sig_len_bin = get_serialized_number_len(
(self.order * self.order) - 1, SER_BINARY)
self.sig_len_compact = get_serialized_number_len(
(self.order * self.order) - 1, SER_COMPACT)
self.dh_len_bin = min((self.order.numdigits(2) // 2 + 7) // 8, 32)
self.dh_len_compact = get_serialized_number_len(
2 ** self.dh_len_bin - 1, SER_COMPACT)
self.elem_len_bin = get_serialized_number_len(self.m, SER_BINARY)
self.order_len_bin = get_serialized_number_len(self.order, SER_BINARY)
@property
def key_bytes(self):
""" The approximate number of bytes of information in a key. """
return self.pk_len_bin
def __repr__(self):
return "<Curve %s>" % self.name
def point_from_string(self, s, fmt=SER_BINARY):
x = deserialize_number(s, fmt)
yflag = x >= self.m
if yflag:
x = x - self.m
assert 0 < x and x <= self.m
return self._point_decompress(x, yflag)
def pubkey_from_string(self, s, fmt=SER_BINARY):
return PubKey(self.point_from_string(s, fmt))
def _point_decompress(self, x, yflag):
m = self.m
h = (x * x) % m
h = (h + self.a) % m
h = (h * x) % m
h = (h + self.b) % m
y = mod_root(h, m)
if y or not yflag:
if bool(y.getbit(0)) == yflag:
return AffinePoint(x=x, y=y, curve=self)
return AffinePoint(x=x, y=m - y, curve=self)
def hash_to_exponent(self, h):
""" Converts a 32 byte hash to an exponent """
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cipher = Crypto.Cipher.AES.new(h,
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
buf = cipher.encrypt(b'\0' * self.order_len_bin)
return self._buf_to_exponent(buf)
def _buf_to_exponent(self, buf):
a = deserialize_number(buf, SER_BINARY)
a = (a % (self.order - 1)) + 1
return a
def passphrase_to_pubkey(self, passphrase):
return PubKey(self.base * self.passphrase_to_privkey(passphrase).e)
def passphrase_to_privkey(self, passphrase):
if isinstance(passphrase, six.text_type):
raise ValueError(
"Encode `passphrase` to a bytestring yourself to" +
" prevent problems with different default encodings")
h = _passphrase_to_hash(passphrase)
return PrivKey(self.hash_to_exponent(h), self)
@contextlib.contextmanager
def decrypt_from(self, f, privkey, mac_bytes=10):
ctx = DecryptionContext(self, f, privkey, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, privkey, mac_bytes=10):
instream = BytesIO(s)
with self.decrypt_from(instream, privkey, mac_bytes) as f:
return f.read()
# Helpers
# #########################################################
def _passphrase_to_hash(passphrase):
""" Converts a passphrase to a hash. """
return hashlib.sha256(passphrase).digest()
def encrypt(s, pk, pk_format=SER_COMPACT, mac_bytes=10, curve=None):
""" Encrypts `s' for public key `pk' """
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.encrypt(s, mac_bytes)
def encrypt_file(in_path_or_file, out_path_or_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
""" Encrypts `in_file' to `out_file' for pubkey `pk' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_encrypt_file(in_file, out_file, pk, pk_format, mac_bytes, chunk_size,
curve)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def decrypt_file(in_path_or_file, out_path_or_file, passphrase,
curve='secp160r1', mac_bytes=10, chunk_size=4096):
""" Decrypts `in_file' to `out_file' with passphrase `passphrase' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_decrypt_file(in_file, out_file, passphrase, curve, mac_bytes,
chunk_size)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def _encrypt_file(in_file, out_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
with p.encrypt_to(out_file, mac_bytes) as encrypted_out:
while True:
buff = in_file.read(chunk_size)
if not buff:
break
encrypted_out.write(buff)
def _decrypt_file(in_file, out_file, passphrase, curve='secp160r1',
mac_bytes=10, chunk_size=4096):
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
with privkey.decrypt_from(in_file, mac_bytes) as decrypted_in:
while True:
buff = decrypted_in.read(chunk_size)
if not buff:
break
out_file.write(buff)
def verify(s, sig, pk, sig_format=SER_COMPACT, pk_format=SER_COMPACT,
curve=None):
""" Verifies that `sig' is a signature of pubkey `pk' for the
message `s'. """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.verify(hashlib.sha512(s).digest(), sig, sig_format)
def sign(s, passphrase, sig_format=SER_COMPACT, curve='secp160r1'):
""" Signs `s' with passphrase `passphrase' """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.sign(hashlib.sha512(s).digest(), sig_format)
def passphrase_to_pubkey(passphrase, curve='secp160r1'):
curve = Curve.by_name(curve)
return curve.passphrase_to_pubkey(passphrase)
def generate_keypair(curve='secp160r1', randfunc=None):
""" Convenience function to generate a random
new keypair (passphrase, pubkey). """
if randfunc is None:
randfunc = Crypto.Random.new().read
curve = Curve.by_name(curve)
raw_privkey = randfunc(curve.order_len_bin)
privkey = serialize_number(deserialize_number(raw_privkey), SER_COMPACT)
pubkey = str(passphrase_to_pubkey(privkey))
return (privkey, pubkey)
|
bwesterb/py-seccure
|
src/__init__.py
|
encrypt_file
|
python
|
def encrypt_file(in_path_or_file, out_path_or_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_encrypt_file(in_file, out_file, pk, pk_format, mac_bytes, chunk_size,
curve)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
|
Encrypts `in_file' to `out_file' for pubkey `pk'
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L918-L936
|
[
"def stringlike(x): return isinstance(x, (str, bytes))\n"
] |
""" Elliptic Curve cryptography compatible with SECCURE:
http://point-at-infinity.org/seccure/ """
import hmac
import hashlib
import logging
import binascii
import contextlib
import collections
from ._version import __version__ # noqa: F401
# PyCrypto
import Crypto.Util
import Crypto.Cipher.AES
import Crypto.Random.random
# gmpy
import gmpy
# six
import six
# TODO replace with six.byte2int, when it is released
if six.PY3:
from io import BytesIO as BytesIO
def byte2int(b): return b
def stringlike(x): return isinstance(x, (str, bytes))
else:
from cStringIO import StringIO as BytesIO
def byte2int(b): return ord(b)
def stringlike(x): return isinstance(x, basestring)
l = logging.getLogger(__name__)
class IntegrityError(ValueError):
pass
# Serialization of numbers
# #########################################################
SER_COMPACT = 0
SER_BINARY = 1
COMPACT_DIGITS = (b'!#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'[]^_abcdefghijklmnopqrstuvwxyz{|}~')
R_COMPACT_DIGITS = {} # TODO is a tuple/list faster?
for i, c in enumerate(COMPACT_DIGITS):
R_COMPACT_DIGITS[c] = i
def serialize_number(x, fmt=SER_BINARY, outlen=None):
""" Serializes `x' to a string of length `outlen' in format `fmt' """
ret = b''
if fmt == SER_BINARY:
while x:
x, r = divmod(x, 256)
ret = six.int2byte(int(r)) + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, b'\0')
return ret
assert fmt == SER_COMPACT
while x:
x, r = divmod(x, len(COMPACT_DIGITS))
ret = COMPACT_DIGITS[r:r + 1] + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])
return ret
def deserialize_number(s, fmt=SER_BINARY):
""" Deserializes a number from a string `s' in format `fmt' """
ret = gmpy.mpz(0)
if fmt == SER_BINARY:
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
for c in s:
ret *= 256
ret += byte2int(c)
return ret
assert fmt == SER_COMPACT
if isinstance(s, six.text_type):
s = s.encode('ascii')
for c in s:
ret *= len(COMPACT_DIGITS)
ret += R_COMPACT_DIGITS[c]
return ret
def get_serialized_number_len(x, fmt=SER_BINARY):
if fmt == SER_BINARY:
return (x.numdigits(2) + 7) // 8
assert fmt == SER_COMPACT
res = 0
while x != 0:
x = x // len(COMPACT_DIGITS)
res += 1
return res
# Some modular arithmetic
# #########################################################
def mod_issquare(a, p):
""" Returns whether `a' is a square modulo p """
if not a:
return True
p1 = p // 2
p2 = pow(a, p1, p)
return p2 == 1
def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x
# Raw curve parameters
# #########################################################
raw_curve_parameters = collections.namedtuple(
'raw_curve_parameters',
('name',
'a',
'b',
'm',
'base_x',
'base_y',
'order',
'cofactor',
'pk_len_compact'))
RAW_CURVES = (
("secp112r1",
b"db7c2abf62e35e668076bead2088",
b"659ef8ba043916eede8911702b22",
b"db7c2abf62e35e668076bead208b",
b"09487239995a5ee76b55f9c2f098",
b"a89ce5af8724c0a23e0e0ff77500",
b"db7c2abf62e35e7628dfac6561c5", 1, 18),
("secp128r1",
b"fffffffdfffffffffffffffffffffffc",
b"e87579c11079f43dd824993c2cee5ed3",
b"fffffffdffffffffffffffffffffffff",
b"161ff7528b899b2d0c28607ca52c5b86",
b"cf5ac8395bafeb13c02da292dded7a83",
b"fffffffe0000000075a30d1b9038a115", 1, 20),
("secp160r1",
b"ffffffffffffffffffffffffffffffff7ffffffc",
b"1c97befc54bd7a8b65acf89f81d4d4adc565fa45",
b"ffffffffffffffffffffffffffffffff7fffffff",
b"4a96b5688ef573284664698968c38bb913cbfc82",
b"23a628553168947d59dcc912042351377ac5fb32",
b"0100000000000000000001f4c8f927aed3ca752257", 1, 25),
("secp192r1/nistp192",
b"fffffffffffffffffffffffffffffffefffffffffffffffc",
b"64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1",
b"fffffffffffffffffffffffffffffffeffffffffffffffff",
b"188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012",
b"07192b95ffc8da78631011ed6b24cdd573f977a11e794811",
b"ffffffffffffffffffffffff99def836146bc9b1b4d22831", 1, 30),
("secp224r1/nistp224",
b"fffffffffffffffffffffffffffffffefffffffffffffffffffffffe",
b"b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4",
b"ffffffffffffffffffffffffffffffff000000000000000000000001",
b"b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
b"bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34",
b"ffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", 1, 35),
("secp256r1/nistp256",
b"ffffffff00000001000000000000000000000000fffffffffffffffffffffffc",
b"5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b",
b"ffffffff00000001000000000000000000000000ffffffffffffffffffffffff",
b"6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
b"4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5",
b"ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
1, 40),
("secp384r1/nistp384",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000fffffffc",
b"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875a"
b"c656398d8a2ed19d2a85c8edd3ec2aef",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000ffffffff",
b"aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a38"
b"5502f25dbf55296c3a545e3872760ab7",
b"3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c0"
b"0a60b1ce1d7e819d7a431d7c90ea0e5f",
b"ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf"
b"581a0db248b0a77aecec196accc52973", 1, 60),
("secp521r1/nistp521",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffffffc",
b"0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef1"
b"09e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd4"
b"6b503f00",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffff",
b"00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d"
b"3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31"
b"c2e5bd66",
b"011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e"
b"662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be9476"
b"9fd16650",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e"
b"91386409", 1, 81),
("brainpoolp160r1",
b"340e7be2a280eb74e2be61bada745d97e8f7c300",
b"1e589a8595423412134faa2dbdec95c8d8675e58",
b"e95e4a5f737059dc60dfc7ad95b3d8139515620f",
b"bed5af16ea3f6a4f62938c4631eb5af7bdbcdbc3",
b"1667cb477a1a8ec338f94741669c976316da6321",
b"e95e4a5f737059dc60df5991d45029409e60fc09", 1, 25),
("brainpoolp192r1",
b"6a91174076b1e0e19c39c031fe8685c1cae040e5c69a28ef",
b"469a28ef7c28cca3dc721d044f4496bcca7ef4146fbf25c9",
b"c302f41d932a36cda7a3463093d18db78fce476de1a86297",
b"c0a0647eaab6a48753b033c56cb0f0900a2f5c4853375fd6",
b"14b690866abd5bb88b5f4828c1490002e6773fa2fa299b8f",
b"c302f41d932a36cda7a3462f9e9e916b5be8f1029ac4acc1", 1, 30),
("brainpoolp224r1",
b"68a5e62ca9ce6c1c299803a6c1530b514e182ad8b0042a59cad29f43",
b"2580f63ccfe44138870713b1a92369e33e2135d266dbb372386c400b",
b"d7c134aa264366862a18302575d1d787b09f075797da89f57ec8c0ff",
b"0d9029ad2c7e5cf4340823b2a87dc68c9e4ce3174c1e6efdee12c07d",
b"58aa56f772c0726f24c6b89e4ecdac24354b9e99caa3f6d3761402cd",
b"d7c134aa264366862a18302575d0fb98d116bc4b6ddebca3a5a7939f",
1, 35),
("brainpoolp256r1",
b"7d5a0975fc2c3057eef67530417affe7fb8055c126dc5c6ce94a4b44f330b5d9",
b"26dc5c6ce94a4b44f330b5d9bbd77cbf958416295cf7e1ce6bccdc18ff8c07b6",
b"a9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5377",
b"8bd2aeb9cb7e57cb2c4b482ffc81b7afb9de27e1e3bd23c23a4453bd9ace3262",
b"547ef835c3dac4fd97f8461a14611dc9c27745132ded8e545c1d54c72f046997",
b"a9fb57dba1eea9bc3e660a909d838d718c397aa3b561a6f7901e0e82974856a7",
1, 40),
("brainpoolp320r1",
b"3ee30b568fbab0f883ccebd46d3f3bb8a2a73513f5eb79da66190eb085ffa9f49"
b"2f375a97d860eb4",
b"520883949dfdbc42d3ad198640688a6fe13f41349554b49acc31dccd884539816"
b"f5eb4ac8fb1f1a6",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa6f6f40def4f92b9ec7893ec28f"
b"cd412b1f1b32e27",
b"43bd7e9afb53d8b85289bcc48ee5bfe6f20137d10a087eb6e7871e2a10a599c71"
b"0af8d0d39e20611",
b"14fdd05545ec1cc8ab4093247f77275e0743ffed117182eaa9c77877aaac6ac7d"
b"35245d1692e8ee1",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa5b68f12a32d482ec7ee8658e98"
b"691555b44c59311", 1, 50),
("brainpoolp384r1",
b"7bc382c63d8c150c3c72080ace05afa0c2bea28e4fb22787139165efba91f90f8"
b"aa5814a503ad4eb04a8c7dd22ce2826",
b"04a8c7dd22ce28268b39b55416f0447c2fb77de107dcd2a62e880ea53eeb62d57"
b"cb4390295dbc9943ab78696fa504c11",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b412b1da197fb71123a"
b"cd3a729901d1a71874700133107ec53",
b"1d1c64f068cf45ffa2a63a81b7c13f6b8847a3e77ef14fe3db7fcafe0cbd10e8e"
b"826e03436d646aaef87b2e247d4af1e",
b"8abe1d7520f9c2a45cb1eb8e95cfd55262b70b29feec5864e19c054ff99129280"
b"e4646217791811142820341263c5315",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b31f166e6cac0425a7c"
b"f3ab6af6b7fc3103b883202e9046565", 1, 60),
("brainpoolp512r1",
b"7830a3318b603b89e2327145ac234cc594cbdd8d3df91610a83441caea9863bc2"
b"ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72bf2c7b9e7c1ac4d77fc94"
b"ca",
b"3df91610a83441caea9863bc2ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72"
b"bf2c7b9e7c1ac4d77fc94cadc083e67984050b75ebae5dd2809bd638016f7"
b"23",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308717"
b"d4d9b009bc66842aecda12ae6a380e62881ff2f2d82c68528aa6056583a48"
b"f3",
b"81aee4bdd82ed9645a21322e9c4c6a9385ed9f70b5d916c1b43b62eef4d0098ef"
b"f3b1f78e2d0d48d50d1687b93b97d5f7c6d5047406a5e688b352209bcb9f8"
b"22",
b"7dde385d566332ecc0eabfa9cf7822fdf209f70024a57b1aa000c55b881f8111b"
b"2dcde494a5f485e5bca4bd88a2763aed1ca2b2fa8f0540678cd1e0f3ad808"
b"92",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308705"
b"53e5c414ca92619418661197fac10471db1d381085ddaddb58796829ca900"
b"69",
1, 79),
)
curves = [r[0] for r in RAW_CURVES]
# Arithmetic on elliptic curves
# #########################################################
class JacobianPoint(object):
def __init__(self, x, y, z, curve):
self.x = x
self.y = y
self.z = z
self.curve = curve
def to_affine(self):
if self.z == 0:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
h = gmpy.invert(self.z, m)
y = (h * h) % m
x = (self.x * y) % m
y = (y * h) % m
y = (y * self.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def double(self):
if not self.z:
return self
if not self.y:
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t1 = (self.x * self.x) % m
t2 = (t1 + t1) % m
t2 = (t2 + t1) % m
t1 = (self.z * self.z) % m
t1 = (t1 * t1) % m
t1 = (t1 * a) % m
t1 = (t1 + t2) % m
z = (self.z * self.y) % m
z = (z + z) % m
y = (self.y * self.y) % m
y = (y + y) % m
t2 = (self.x * y) % m
t2 = (t2 + t2) % m
x = (t1 * t1) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
t1 = (t1 * t2) % m
t2 = (y * y) % m
t2 = (t2 + t2) % m
y = (t1 - t2) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self.z:
return other.to_jacobian()
m = self.curve.m
t1 = (self.z * self.z) % m
t2 = (t1 * other.x) % m
t1 = (t1 * self.z) % m
t1 = (t1 * other.y) % m
if self.x == t2:
if self.y == t1:
return self.double()
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
x = (self.x - t2) % m
y = (self.y - t1) % m
z = (self.z * x) % m
t3 = (x * x) % m
t2 = (t2 * t3) % m
t3 = (t3 * x) % m
t1 = (t1 * t3) % m
x = (y * y) % m
x = (x - t3) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
y = (y * t2) % m
y = (y - t1) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __repr__(self):
return "<JacobianPoint (%s, %s, %s) of %s>" % (
self.x, self.y, self.z, self.curve.name)
class AffinePoint(object):
def __init__(self, x, y, curve):
self.x = x
self.y = y
self.curve = curve
@property
def on_curve(self):
if not self:
return True
m = self.curve.m
a = self.curve.a
b = self.curve.b
h1 = (self.x * self.x) % m
h1 = (h1 + a) % m
h1 = (h1 * self.x) % m
h1 = (h1 + b) % m
h2 = (self.y * self.y) % m
return h1 == h2
def to_jacobian(self):
if not self:
return JacobianPoint(x=0, y=0, z=0, curve=self.curve)
return JacobianPoint(x=self.x, y=self.y, z=1, curve=self.curve)
def double(self):
if not self.y:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t2 = (self.x * self.x) % m
t1 = (t2 + t2) % m
t1 = (t1 + t2) % m
t1 = (t1 + a) % m
t2 = (self.y + self.y) % m
t2 = gmpy.invert(t2, m)
t1 = (t1 * t2) % m
t2 = (t1 * t1) % m
t2 = (t2 - self.x) % m
t2 = (t2 - self.x) % m
x = (self.x - t2) % m
t1 = (t1 * x) % m
y = (t1 - self.y) % m
x = t2
return AffinePoint(x=x, y=y, curve=self.curve)
def __mul__(self, exp):
n = exp.numdigits(2)
r = JacobianPoint(x=0, y=0, z=0, curve=self.curve)
while n:
r = r.double()
n -= 1
if exp.getbit(n):
r = r + self
R = r.to_affine()
assert R.on_curve
return R
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self:
return other
if self.x == other.x:
if self.y == other.y:
return self.double()
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
t = (self.y - other.y) % m
y = (self.x - other.x) % m
y = gmpy.invert(y, m)
y = (t * y) % m
t = (y * y) % m
x = (self.x + other.x) % m
x = (t - x) % m
t = (other.x - x) % m
y = (y * t) % m
y = (y - other.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def __nonzero__(self):
return bool(self.x or self.y)
__bool__ = __nonzero__
def __repr__(self):
return "<AffinePoint (%s, %s) of %s>" % (
self.x, self.y, self.curve.name)
def __eq__(self, other):
if not isinstance(other, AffinePoint):
return False
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.to_string(SER_COMPACT)
def to_bytes(self, fmt=SER_BINARY):
outlen = (self.curve.pk_len_compact if fmt == SER_COMPACT
else self.curve.pk_len_bin)
if self._point_compress():
return serialize_number(self.x + self.curve.m, fmt, outlen)
return serialize_number(self.x, fmt, outlen)
def to_string(self, fmt=SER_BINARY):
return self.to_bytes(fmt).decode()
def _point_compress(self):
return self.y.getbit(0) == 1
def _ECIES_KDF(self, R):
h = hashlib.sha512()
h.update(serialize_number(self.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.y, SER_BINARY, self.curve.elem_len_bin))
return h.digest()
def _ECIES_encryption(self):
while True:
k = gmpy.mpz(
Crypto.Random.random.randrange(
0, int(
self.curve.order - 1)))
R = self.curve.base * k
k = k * self.curve.cofactor
Z = self * k
if Z:
break
return (Z._ECIES_KDF(R), R)
def _ECIES_decryption(self, d):
if isinstance(d, PrivKey):
d = d.e
e = d * self.curve.cofactor
if not self.valid_embedded_key:
raise ValueError
Z = self * e
if not Z:
raise ValueError
return Z._ECIES_KDF(self)
def _ECDSA_verify(self, md, sig):
order = self.curve.order
s, r = divmod(sig, order)
if s <= 0 or order <= s or r <= 0 or order <= r:
return False
e = deserialize_number(md, SER_BINARY) % order
s = gmpy.invert(s, order)
e = (e * s) % order
X1 = self.curve.base * e
e = (r * s) % order
X2 = self * e
X1 = X1 + X2
if not X1:
return False
s = X1.x % order
return s == r
@property
def valid_embedded_key(self):
if (self.x < 0 or self.x >= self.curve.m or self.y < 0 or
self.y > self.curve.m):
return False
if not self:
return False
if not self.on_curve:
return False
return True
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
def verify(self, h, sig, sig_fmt=SER_BINARY):
""" Verifies that `sig' is a signature for a message with
SHA-512 hash `h'. """
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
@contextlib.contextmanager
def encrypt_to(self, f, mac_bytes=10):
""" Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. """
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
class PrivKey(object):
""" A secret exponent """
def __init__(self, e, curve):
self.e = e
self.curve = curve
@contextlib.contextmanager
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError("s should be bytes")
instream = BytesIO(s)
with self.decrypt_from(instream, mac_bytes) as f:
return f.read()
def sign(self, h, sig_format=SER_BINARY):
""" Signs the message with SHA-512 hash `h' with this private key. """
outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT
else self.curve.sig_len_bin)
sig = self._ECDSA_sign(h)
return serialize_number(sig, sig_format, outlen)
def __repr__(self):
return "<PrivKey %s>" % self.e
def __str__(self):
return str(self.e)
def _ECDSA_sign(self, md):
# Get the pseudo-random exponent from the messagedigest
# and the private key.
order = self.curve.order
hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)
h = hmac.new(hmk, digestmod=hashlib.sha256)
h.update(md)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cprng = Crypto.Cipher.AES.new(h.digest(),
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
r = 0
s = 0
while s == 0:
while r == 0:
buf = cprng.encrypt(b'\0' * self.curve.order_len_bin)
k = self.curve._buf_to_exponent(buf)
p1 = self.curve.base * k
r = p1.x % order
e = deserialize_number(md, SER_BINARY)
e = (e % order)
s = (self.e * r) % order
s = (s + e) % order
e = gmpy.invert(k, order)
s = (s * e) % order
s = s * order
s = s + r
return s
# Encryption and decryption contexts
# #########################################################
class EncryptionContext(object):
""" Holds state of encryption. Use AffinePoint.encrypt_to """
def __init__(self, f, p, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
key, R = p._ECIES_encryption()
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
f.write(R.to_bytes(SER_BINARY))
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
def write(self, s):
if not self.f:
raise IOError("closed")
ct = self.cipher.encrypt(s)
self.f.write(ct)
self.h.update(ct)
def finish(self):
if not self.f:
raise IOError("closed")
self.f.write(self.h.digest()[:self.mac_bytes])
self.f = None
class DecryptionContext(object):
""" Holds state of decryption. Use Curve.decrypt_from """
def __init__(self, curve, f, privkey, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
R = curve.point_from_string(f.read(curve.pk_len_bin), SER_BINARY)
key = R._ECIES_decryption(privkey)
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
self.ahead = f.read(mac_bytes)
def read(self, n=None):
if not self.f:
return ''
if n is None:
tmp = self.ahead + self.f.read()
else:
tmp = self.ahead + self.f.read(n)
ct = tmp[:-self.mac_bytes]
self.ahead = tmp[-self.mac_bytes:]
self.h.update(ct)
pt = self.cipher.decrypt(ct)
if n is None or len(ct) < n:
if self.h.digest()[:self.mac_bytes] != self.ahead:
raise IntegrityError
self.f = None
return pt
# The main Curve objects
# #########################################################
class Curve(object):
""" Represents a Elliptic Curve """
@staticmethod
def by_name_substring(substring):
substring = substring.lower()
candidates = []
for raw_curve in RAW_CURVES:
if substring in raw_curve[0]:
candidates.append(raw_curve)
if len(candidates) != 1:
raise KeyError
return Curve(candidates[0])
@staticmethod
def by_name(name):
for raw_curve in RAW_CURVES:
if raw_curve[0] == name:
return Curve(raw_curve)
raise KeyError
@staticmethod
def by_pk_len(pk_len):
for raw_curve in RAW_CURVES:
if raw_curve[8] == pk_len:
return Curve(raw_curve)
raise KeyError
def __init__(self, raw_curve_params):
""" Initialize a new curve from raw curve parameters.
Use `Curve.by_pk_len' instead """
r = raw_curve_parameters(*raw_curve_params)
# Store domain parameters
self.name = r.name
self.a = deserialize_number(binascii.unhexlify(r.a), SER_BINARY)
self.b = deserialize_number(binascii.unhexlify(r.b), SER_BINARY)
self.m = deserialize_number(binascii.unhexlify(r.m), SER_BINARY)
self.order = deserialize_number(
binascii.unhexlify(r.order), SER_BINARY)
self.base = AffinePoint(
curve=self, x=deserialize_number(
binascii.unhexlify(
r.base_x), SER_BINARY), y=deserialize_number(
binascii.unhexlify(
r.base_y), SER_BINARY))
self.cofactor = r.cofactor
# Calculate some other parameters
self.pk_len_bin = get_serialized_number_len(
(2 * self.m) - 1, SER_BINARY)
self.pk_len_compact = get_serialized_number_len(
(2 * self.m) - 1, SER_COMPACT)
assert self.pk_len_compact == r.pk_len_compact
self.sig_len_bin = get_serialized_number_len(
(self.order * self.order) - 1, SER_BINARY)
self.sig_len_compact = get_serialized_number_len(
(self.order * self.order) - 1, SER_COMPACT)
self.dh_len_bin = min((self.order.numdigits(2) // 2 + 7) // 8, 32)
self.dh_len_compact = get_serialized_number_len(
2 ** self.dh_len_bin - 1, SER_COMPACT)
self.elem_len_bin = get_serialized_number_len(self.m, SER_BINARY)
self.order_len_bin = get_serialized_number_len(self.order, SER_BINARY)
@property
def key_bytes(self):
""" The approximate number of bytes of information in a key. """
return self.pk_len_bin
def __repr__(self):
return "<Curve %s>" % self.name
def point_from_string(self, s, fmt=SER_BINARY):
x = deserialize_number(s, fmt)
yflag = x >= self.m
if yflag:
x = x - self.m
assert 0 < x and x <= self.m
return self._point_decompress(x, yflag)
def pubkey_from_string(self, s, fmt=SER_BINARY):
return PubKey(self.point_from_string(s, fmt))
def _point_decompress(self, x, yflag):
m = self.m
h = (x * x) % m
h = (h + self.a) % m
h = (h * x) % m
h = (h + self.b) % m
y = mod_root(h, m)
if y or not yflag:
if bool(y.getbit(0)) == yflag:
return AffinePoint(x=x, y=y, curve=self)
return AffinePoint(x=x, y=m - y, curve=self)
def hash_to_exponent(self, h):
""" Converts a 32 byte hash to an exponent """
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cipher = Crypto.Cipher.AES.new(h,
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
buf = cipher.encrypt(b'\0' * self.order_len_bin)
return self._buf_to_exponent(buf)
def _buf_to_exponent(self, buf):
a = deserialize_number(buf, SER_BINARY)
a = (a % (self.order - 1)) + 1
return a
def passphrase_to_pubkey(self, passphrase):
return PubKey(self.base * self.passphrase_to_privkey(passphrase).e)
def passphrase_to_privkey(self, passphrase):
if isinstance(passphrase, six.text_type):
raise ValueError(
"Encode `passphrase` to a bytestring yourself to" +
" prevent problems with different default encodings")
h = _passphrase_to_hash(passphrase)
return PrivKey(self.hash_to_exponent(h), self)
@contextlib.contextmanager
def decrypt_from(self, f, privkey, mac_bytes=10):
ctx = DecryptionContext(self, f, privkey, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, privkey, mac_bytes=10):
instream = BytesIO(s)
with self.decrypt_from(instream, privkey, mac_bytes) as f:
return f.read()
# Helpers
# #########################################################
def _passphrase_to_hash(passphrase):
""" Converts a passphrase to a hash. """
return hashlib.sha256(passphrase).digest()
def encrypt(s, pk, pk_format=SER_COMPACT, mac_bytes=10, curve=None):
""" Encrypts `s' for public key `pk' """
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.encrypt(s, mac_bytes)
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):
""" Decrypts `s' with passphrase `passphrase' """
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.decrypt(s, mac_bytes)
def decrypt_file(in_path_or_file, out_path_or_file, passphrase,
curve='secp160r1', mac_bytes=10, chunk_size=4096):
""" Decrypts `in_file' to `out_file' with passphrase `passphrase' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_decrypt_file(in_file, out_file, passphrase, curve, mac_bytes,
chunk_size)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def _encrypt_file(in_file, out_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
with p.encrypt_to(out_file, mac_bytes) as encrypted_out:
while True:
buff = in_file.read(chunk_size)
if not buff:
break
encrypted_out.write(buff)
def _decrypt_file(in_file, out_file, passphrase, curve='secp160r1',
mac_bytes=10, chunk_size=4096):
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
with privkey.decrypt_from(in_file, mac_bytes) as decrypted_in:
while True:
buff = decrypted_in.read(chunk_size)
if not buff:
break
out_file.write(buff)
def verify(s, sig, pk, sig_format=SER_COMPACT, pk_format=SER_COMPACT,
curve=None):
""" Verifies that `sig' is a signature of pubkey `pk' for the
message `s'. """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.verify(hashlib.sha512(s).digest(), sig, sig_format)
def sign(s, passphrase, sig_format=SER_COMPACT, curve='secp160r1'):
""" Signs `s' with passphrase `passphrase' """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.sign(hashlib.sha512(s).digest(), sig_format)
def passphrase_to_pubkey(passphrase, curve='secp160r1'):
curve = Curve.by_name(curve)
return curve.passphrase_to_pubkey(passphrase)
def generate_keypair(curve='secp160r1', randfunc=None):
""" Convenience function to generate a random
new keypair (passphrase, pubkey). """
if randfunc is None:
randfunc = Crypto.Random.new().read
curve = Curve.by_name(curve)
raw_privkey = randfunc(curve.order_len_bin)
privkey = serialize_number(deserialize_number(raw_privkey), SER_COMPACT)
pubkey = str(passphrase_to_pubkey(privkey))
return (privkey, pubkey)
|
bwesterb/py-seccure
|
src/__init__.py
|
decrypt_file
|
python
|
def decrypt_file(in_path_or_file, out_path_or_file, passphrase,
curve='secp160r1', mac_bytes=10, chunk_size=4096):
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_decrypt_file(in_file, out_file, passphrase, curve, mac_bytes,
chunk_size)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
|
Decrypts `in_file' to `out_file' with passphrase `passphrase'
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L939-L957
|
[
"def stringlike(x): return isinstance(x, (str, bytes))\n"
] |
""" Elliptic Curve cryptography compatible with SECCURE:
http://point-at-infinity.org/seccure/ """
import hmac
import hashlib
import logging
import binascii
import contextlib
import collections
from ._version import __version__ # noqa: F401
# PyCrypto
import Crypto.Util
import Crypto.Cipher.AES
import Crypto.Random.random
# gmpy
import gmpy
# six
import six
# TODO replace with six.byte2int, when it is released
if six.PY3:
from io import BytesIO as BytesIO
def byte2int(b): return b
def stringlike(x): return isinstance(x, (str, bytes))
else:
from cStringIO import StringIO as BytesIO
def byte2int(b): return ord(b)
def stringlike(x): return isinstance(x, basestring)
l = logging.getLogger(__name__)
class IntegrityError(ValueError):
pass
# Serialization of numbers
# #########################################################
SER_COMPACT = 0
SER_BINARY = 1
COMPACT_DIGITS = (b'!#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'[]^_abcdefghijklmnopqrstuvwxyz{|}~')
R_COMPACT_DIGITS = {} # TODO is a tuple/list faster?
for i, c in enumerate(COMPACT_DIGITS):
R_COMPACT_DIGITS[c] = i
def serialize_number(x, fmt=SER_BINARY, outlen=None):
""" Serializes `x' to a string of length `outlen' in format `fmt' """
ret = b''
if fmt == SER_BINARY:
while x:
x, r = divmod(x, 256)
ret = six.int2byte(int(r)) + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, b'\0')
return ret
assert fmt == SER_COMPACT
while x:
x, r = divmod(x, len(COMPACT_DIGITS))
ret = COMPACT_DIGITS[r:r + 1] + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])
return ret
def deserialize_number(s, fmt=SER_BINARY):
""" Deserializes a number from a string `s' in format `fmt' """
ret = gmpy.mpz(0)
if fmt == SER_BINARY:
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
for c in s:
ret *= 256
ret += byte2int(c)
return ret
assert fmt == SER_COMPACT
if isinstance(s, six.text_type):
s = s.encode('ascii')
for c in s:
ret *= len(COMPACT_DIGITS)
ret += R_COMPACT_DIGITS[c]
return ret
def get_serialized_number_len(x, fmt=SER_BINARY):
if fmt == SER_BINARY:
return (x.numdigits(2) + 7) // 8
assert fmt == SER_COMPACT
res = 0
while x != 0:
x = x // len(COMPACT_DIGITS)
res += 1
return res
# Some modular arithmetic
# #########################################################
def mod_issquare(a, p):
""" Returns whether `a' is a square modulo p """
if not a:
return True
p1 = p // 2
p2 = pow(a, p1, p)
return p2 == 1
def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x
# Raw curve parameters
# #########################################################
raw_curve_parameters = collections.namedtuple(
'raw_curve_parameters',
('name',
'a',
'b',
'm',
'base_x',
'base_y',
'order',
'cofactor',
'pk_len_compact'))
RAW_CURVES = (
("secp112r1",
b"db7c2abf62e35e668076bead2088",
b"659ef8ba043916eede8911702b22",
b"db7c2abf62e35e668076bead208b",
b"09487239995a5ee76b55f9c2f098",
b"a89ce5af8724c0a23e0e0ff77500",
b"db7c2abf62e35e7628dfac6561c5", 1, 18),
("secp128r1",
b"fffffffdfffffffffffffffffffffffc",
b"e87579c11079f43dd824993c2cee5ed3",
b"fffffffdffffffffffffffffffffffff",
b"161ff7528b899b2d0c28607ca52c5b86",
b"cf5ac8395bafeb13c02da292dded7a83",
b"fffffffe0000000075a30d1b9038a115", 1, 20),
("secp160r1",
b"ffffffffffffffffffffffffffffffff7ffffffc",
b"1c97befc54bd7a8b65acf89f81d4d4adc565fa45",
b"ffffffffffffffffffffffffffffffff7fffffff",
b"4a96b5688ef573284664698968c38bb913cbfc82",
b"23a628553168947d59dcc912042351377ac5fb32",
b"0100000000000000000001f4c8f927aed3ca752257", 1, 25),
("secp192r1/nistp192",
b"fffffffffffffffffffffffffffffffefffffffffffffffc",
b"64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1",
b"fffffffffffffffffffffffffffffffeffffffffffffffff",
b"188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012",
b"07192b95ffc8da78631011ed6b24cdd573f977a11e794811",
b"ffffffffffffffffffffffff99def836146bc9b1b4d22831", 1, 30),
("secp224r1/nistp224",
b"fffffffffffffffffffffffffffffffefffffffffffffffffffffffe",
b"b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4",
b"ffffffffffffffffffffffffffffffff000000000000000000000001",
b"b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
b"bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34",
b"ffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", 1, 35),
("secp256r1/nistp256",
b"ffffffff00000001000000000000000000000000fffffffffffffffffffffffc",
b"5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b",
b"ffffffff00000001000000000000000000000000ffffffffffffffffffffffff",
b"6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
b"4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5",
b"ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
1, 40),
("secp384r1/nistp384",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000fffffffc",
b"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875a"
b"c656398d8a2ed19d2a85c8edd3ec2aef",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000ffffffff",
b"aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a38"
b"5502f25dbf55296c3a545e3872760ab7",
b"3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c0"
b"0a60b1ce1d7e819d7a431d7c90ea0e5f",
b"ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf"
b"581a0db248b0a77aecec196accc52973", 1, 60),
("secp521r1/nistp521",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffffffc",
b"0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef1"
b"09e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd4"
b"6b503f00",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffff",
b"00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d"
b"3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31"
b"c2e5bd66",
b"011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e"
b"662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be9476"
b"9fd16650",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e"
b"91386409", 1, 81),
("brainpoolp160r1",
b"340e7be2a280eb74e2be61bada745d97e8f7c300",
b"1e589a8595423412134faa2dbdec95c8d8675e58",
b"e95e4a5f737059dc60dfc7ad95b3d8139515620f",
b"bed5af16ea3f6a4f62938c4631eb5af7bdbcdbc3",
b"1667cb477a1a8ec338f94741669c976316da6321",
b"e95e4a5f737059dc60df5991d45029409e60fc09", 1, 25),
("brainpoolp192r1",
b"6a91174076b1e0e19c39c031fe8685c1cae040e5c69a28ef",
b"469a28ef7c28cca3dc721d044f4496bcca7ef4146fbf25c9",
b"c302f41d932a36cda7a3463093d18db78fce476de1a86297",
b"c0a0647eaab6a48753b033c56cb0f0900a2f5c4853375fd6",
b"14b690866abd5bb88b5f4828c1490002e6773fa2fa299b8f",
b"c302f41d932a36cda7a3462f9e9e916b5be8f1029ac4acc1", 1, 30),
("brainpoolp224r1",
b"68a5e62ca9ce6c1c299803a6c1530b514e182ad8b0042a59cad29f43",
b"2580f63ccfe44138870713b1a92369e33e2135d266dbb372386c400b",
b"d7c134aa264366862a18302575d1d787b09f075797da89f57ec8c0ff",
b"0d9029ad2c7e5cf4340823b2a87dc68c9e4ce3174c1e6efdee12c07d",
b"58aa56f772c0726f24c6b89e4ecdac24354b9e99caa3f6d3761402cd",
b"d7c134aa264366862a18302575d0fb98d116bc4b6ddebca3a5a7939f",
1, 35),
("brainpoolp256r1",
b"7d5a0975fc2c3057eef67530417affe7fb8055c126dc5c6ce94a4b44f330b5d9",
b"26dc5c6ce94a4b44f330b5d9bbd77cbf958416295cf7e1ce6bccdc18ff8c07b6",
b"a9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5377",
b"8bd2aeb9cb7e57cb2c4b482ffc81b7afb9de27e1e3bd23c23a4453bd9ace3262",
b"547ef835c3dac4fd97f8461a14611dc9c27745132ded8e545c1d54c72f046997",
b"a9fb57dba1eea9bc3e660a909d838d718c397aa3b561a6f7901e0e82974856a7",
1, 40),
("brainpoolp320r1",
b"3ee30b568fbab0f883ccebd46d3f3bb8a2a73513f5eb79da66190eb085ffa9f49"
b"2f375a97d860eb4",
b"520883949dfdbc42d3ad198640688a6fe13f41349554b49acc31dccd884539816"
b"f5eb4ac8fb1f1a6",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa6f6f40def4f92b9ec7893ec28f"
b"cd412b1f1b32e27",
b"43bd7e9afb53d8b85289bcc48ee5bfe6f20137d10a087eb6e7871e2a10a599c71"
b"0af8d0d39e20611",
b"14fdd05545ec1cc8ab4093247f77275e0743ffed117182eaa9c77877aaac6ac7d"
b"35245d1692e8ee1",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa5b68f12a32d482ec7ee8658e98"
b"691555b44c59311", 1, 50),
("brainpoolp384r1",
b"7bc382c63d8c150c3c72080ace05afa0c2bea28e4fb22787139165efba91f90f8"
b"aa5814a503ad4eb04a8c7dd22ce2826",
b"04a8c7dd22ce28268b39b55416f0447c2fb77de107dcd2a62e880ea53eeb62d57"
b"cb4390295dbc9943ab78696fa504c11",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b412b1da197fb71123a"
b"cd3a729901d1a71874700133107ec53",
b"1d1c64f068cf45ffa2a63a81b7c13f6b8847a3e77ef14fe3db7fcafe0cbd10e8e"
b"826e03436d646aaef87b2e247d4af1e",
b"8abe1d7520f9c2a45cb1eb8e95cfd55262b70b29feec5864e19c054ff99129280"
b"e4646217791811142820341263c5315",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b31f166e6cac0425a7c"
b"f3ab6af6b7fc3103b883202e9046565", 1, 60),
("brainpoolp512r1",
b"7830a3318b603b89e2327145ac234cc594cbdd8d3df91610a83441caea9863bc2"
b"ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72bf2c7b9e7c1ac4d77fc94"
b"ca",
b"3df91610a83441caea9863bc2ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72"
b"bf2c7b9e7c1ac4d77fc94cadc083e67984050b75ebae5dd2809bd638016f7"
b"23",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308717"
b"d4d9b009bc66842aecda12ae6a380e62881ff2f2d82c68528aa6056583a48"
b"f3",
b"81aee4bdd82ed9645a21322e9c4c6a9385ed9f70b5d916c1b43b62eef4d0098ef"
b"f3b1f78e2d0d48d50d1687b93b97d5f7c6d5047406a5e688b352209bcb9f8"
b"22",
b"7dde385d566332ecc0eabfa9cf7822fdf209f70024a57b1aa000c55b881f8111b"
b"2dcde494a5f485e5bca4bd88a2763aed1ca2b2fa8f0540678cd1e0f3ad808"
b"92",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308705"
b"53e5c414ca92619418661197fac10471db1d381085ddaddb58796829ca900"
b"69",
1, 79),
)
curves = [r[0] for r in RAW_CURVES]
# Arithmetic on elliptic curves
# #########################################################
class JacobianPoint(object):
def __init__(self, x, y, z, curve):
self.x = x
self.y = y
self.z = z
self.curve = curve
def to_affine(self):
if self.z == 0:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
h = gmpy.invert(self.z, m)
y = (h * h) % m
x = (self.x * y) % m
y = (y * h) % m
y = (y * self.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def double(self):
if not self.z:
return self
if not self.y:
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t1 = (self.x * self.x) % m
t2 = (t1 + t1) % m
t2 = (t2 + t1) % m
t1 = (self.z * self.z) % m
t1 = (t1 * t1) % m
t1 = (t1 * a) % m
t1 = (t1 + t2) % m
z = (self.z * self.y) % m
z = (z + z) % m
y = (self.y * self.y) % m
y = (y + y) % m
t2 = (self.x * y) % m
t2 = (t2 + t2) % m
x = (t1 * t1) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
t1 = (t1 * t2) % m
t2 = (y * y) % m
t2 = (t2 + t2) % m
y = (t1 - t2) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self.z:
return other.to_jacobian()
m = self.curve.m
t1 = (self.z * self.z) % m
t2 = (t1 * other.x) % m
t1 = (t1 * self.z) % m
t1 = (t1 * other.y) % m
if self.x == t2:
if self.y == t1:
return self.double()
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
x = (self.x - t2) % m
y = (self.y - t1) % m
z = (self.z * x) % m
t3 = (x * x) % m
t2 = (t2 * t3) % m
t3 = (t3 * x) % m
t1 = (t1 * t3) % m
x = (y * y) % m
x = (x - t3) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
y = (y * t2) % m
y = (y - t1) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __repr__(self):
return "<JacobianPoint (%s, %s, %s) of %s>" % (
self.x, self.y, self.z, self.curve.name)
class AffinePoint(object):
def __init__(self, x, y, curve):
self.x = x
self.y = y
self.curve = curve
@property
def on_curve(self):
if not self:
return True
m = self.curve.m
a = self.curve.a
b = self.curve.b
h1 = (self.x * self.x) % m
h1 = (h1 + a) % m
h1 = (h1 * self.x) % m
h1 = (h1 + b) % m
h2 = (self.y * self.y) % m
return h1 == h2
def to_jacobian(self):
if not self:
return JacobianPoint(x=0, y=0, z=0, curve=self.curve)
return JacobianPoint(x=self.x, y=self.y, z=1, curve=self.curve)
def double(self):
if not self.y:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t2 = (self.x * self.x) % m
t1 = (t2 + t2) % m
t1 = (t1 + t2) % m
t1 = (t1 + a) % m
t2 = (self.y + self.y) % m
t2 = gmpy.invert(t2, m)
t1 = (t1 * t2) % m
t2 = (t1 * t1) % m
t2 = (t2 - self.x) % m
t2 = (t2 - self.x) % m
x = (self.x - t2) % m
t1 = (t1 * x) % m
y = (t1 - self.y) % m
x = t2
return AffinePoint(x=x, y=y, curve=self.curve)
def __mul__(self, exp):
n = exp.numdigits(2)
r = JacobianPoint(x=0, y=0, z=0, curve=self.curve)
while n:
r = r.double()
n -= 1
if exp.getbit(n):
r = r + self
R = r.to_affine()
assert R.on_curve
return R
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self:
return other
if self.x == other.x:
if self.y == other.y:
return self.double()
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
t = (self.y - other.y) % m
y = (self.x - other.x) % m
y = gmpy.invert(y, m)
y = (t * y) % m
t = (y * y) % m
x = (self.x + other.x) % m
x = (t - x) % m
t = (other.x - x) % m
y = (y * t) % m
y = (y - other.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def __nonzero__(self):
return bool(self.x or self.y)
__bool__ = __nonzero__
def __repr__(self):
return "<AffinePoint (%s, %s) of %s>" % (
self.x, self.y, self.curve.name)
def __eq__(self, other):
if not isinstance(other, AffinePoint):
return False
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.to_string(SER_COMPACT)
def to_bytes(self, fmt=SER_BINARY):
outlen = (self.curve.pk_len_compact if fmt == SER_COMPACT
else self.curve.pk_len_bin)
if self._point_compress():
return serialize_number(self.x + self.curve.m, fmt, outlen)
return serialize_number(self.x, fmt, outlen)
def to_string(self, fmt=SER_BINARY):
return self.to_bytes(fmt).decode()
def _point_compress(self):
return self.y.getbit(0) == 1
def _ECIES_KDF(self, R):
h = hashlib.sha512()
h.update(serialize_number(self.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.y, SER_BINARY, self.curve.elem_len_bin))
return h.digest()
def _ECIES_encryption(self):
while True:
k = gmpy.mpz(
Crypto.Random.random.randrange(
0, int(
self.curve.order - 1)))
R = self.curve.base * k
k = k * self.curve.cofactor
Z = self * k
if Z:
break
return (Z._ECIES_KDF(R), R)
def _ECIES_decryption(self, d):
if isinstance(d, PrivKey):
d = d.e
e = d * self.curve.cofactor
if not self.valid_embedded_key:
raise ValueError
Z = self * e
if not Z:
raise ValueError
return Z._ECIES_KDF(self)
def _ECDSA_verify(self, md, sig):
order = self.curve.order
s, r = divmod(sig, order)
if s <= 0 or order <= s or r <= 0 or order <= r:
return False
e = deserialize_number(md, SER_BINARY) % order
s = gmpy.invert(s, order)
e = (e * s) % order
X1 = self.curve.base * e
e = (r * s) % order
X2 = self * e
X1 = X1 + X2
if not X1:
return False
s = X1.x % order
return s == r
@property
def valid_embedded_key(self):
if (self.x < 0 or self.x >= self.curve.m or self.y < 0 or
self.y > self.curve.m):
return False
if not self:
return False
if not self.on_curve:
return False
return True
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
def verify(self, h, sig, sig_fmt=SER_BINARY):
""" Verifies that `sig' is a signature for a message with
SHA-512 hash `h'. """
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
@contextlib.contextmanager
def encrypt_to(self, f, mac_bytes=10):
""" Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. """
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
class PrivKey(object):
""" A secret exponent """
def __init__(self, e, curve):
self.e = e
self.curve = curve
@contextlib.contextmanager
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError("s should be bytes")
instream = BytesIO(s)
with self.decrypt_from(instream, mac_bytes) as f:
return f.read()
def sign(self, h, sig_format=SER_BINARY):
""" Signs the message with SHA-512 hash `h' with this private key. """
outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT
else self.curve.sig_len_bin)
sig = self._ECDSA_sign(h)
return serialize_number(sig, sig_format, outlen)
def __repr__(self):
return "<PrivKey %s>" % self.e
def __str__(self):
return str(self.e)
def _ECDSA_sign(self, md):
# Get the pseudo-random exponent from the messagedigest
# and the private key.
order = self.curve.order
hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)
h = hmac.new(hmk, digestmod=hashlib.sha256)
h.update(md)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cprng = Crypto.Cipher.AES.new(h.digest(),
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
r = 0
s = 0
while s == 0:
while r == 0:
buf = cprng.encrypt(b'\0' * self.curve.order_len_bin)
k = self.curve._buf_to_exponent(buf)
p1 = self.curve.base * k
r = p1.x % order
e = deserialize_number(md, SER_BINARY)
e = (e % order)
s = (self.e * r) % order
s = (s + e) % order
e = gmpy.invert(k, order)
s = (s * e) % order
s = s * order
s = s + r
return s
# Encryption and decryption contexts
# #########################################################
class EncryptionContext(object):
""" Holds state of encryption. Use AffinePoint.encrypt_to """
def __init__(self, f, p, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
key, R = p._ECIES_encryption()
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
f.write(R.to_bytes(SER_BINARY))
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
def write(self, s):
if not self.f:
raise IOError("closed")
ct = self.cipher.encrypt(s)
self.f.write(ct)
self.h.update(ct)
def finish(self):
if not self.f:
raise IOError("closed")
self.f.write(self.h.digest()[:self.mac_bytes])
self.f = None
class DecryptionContext(object):
""" Holds state of decryption. Use Curve.decrypt_from """
def __init__(self, curve, f, privkey, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
R = curve.point_from_string(f.read(curve.pk_len_bin), SER_BINARY)
key = R._ECIES_decryption(privkey)
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
self.ahead = f.read(mac_bytes)
def read(self, n=None):
if not self.f:
return ''
if n is None:
tmp = self.ahead + self.f.read()
else:
tmp = self.ahead + self.f.read(n)
ct = tmp[:-self.mac_bytes]
self.ahead = tmp[-self.mac_bytes:]
self.h.update(ct)
pt = self.cipher.decrypt(ct)
if n is None or len(ct) < n:
if self.h.digest()[:self.mac_bytes] != self.ahead:
raise IntegrityError
self.f = None
return pt
# The main Curve objects
# #########################################################
class Curve(object):
""" Represents a Elliptic Curve """
@staticmethod
def by_name_substring(substring):
substring = substring.lower()
candidates = []
for raw_curve in RAW_CURVES:
if substring in raw_curve[0]:
candidates.append(raw_curve)
if len(candidates) != 1:
raise KeyError
return Curve(candidates[0])
@staticmethod
def by_name(name):
for raw_curve in RAW_CURVES:
if raw_curve[0] == name:
return Curve(raw_curve)
raise KeyError
@staticmethod
def by_pk_len(pk_len):
for raw_curve in RAW_CURVES:
if raw_curve[8] == pk_len:
return Curve(raw_curve)
raise KeyError
def __init__(self, raw_curve_params):
""" Initialize a new curve from raw curve parameters.
Use `Curve.by_pk_len' instead """
r = raw_curve_parameters(*raw_curve_params)
# Store domain parameters
self.name = r.name
self.a = deserialize_number(binascii.unhexlify(r.a), SER_BINARY)
self.b = deserialize_number(binascii.unhexlify(r.b), SER_BINARY)
self.m = deserialize_number(binascii.unhexlify(r.m), SER_BINARY)
self.order = deserialize_number(
binascii.unhexlify(r.order), SER_BINARY)
self.base = AffinePoint(
curve=self, x=deserialize_number(
binascii.unhexlify(
r.base_x), SER_BINARY), y=deserialize_number(
binascii.unhexlify(
r.base_y), SER_BINARY))
self.cofactor = r.cofactor
# Calculate some other parameters
self.pk_len_bin = get_serialized_number_len(
(2 * self.m) - 1, SER_BINARY)
self.pk_len_compact = get_serialized_number_len(
(2 * self.m) - 1, SER_COMPACT)
assert self.pk_len_compact == r.pk_len_compact
self.sig_len_bin = get_serialized_number_len(
(self.order * self.order) - 1, SER_BINARY)
self.sig_len_compact = get_serialized_number_len(
(self.order * self.order) - 1, SER_COMPACT)
self.dh_len_bin = min((self.order.numdigits(2) // 2 + 7) // 8, 32)
self.dh_len_compact = get_serialized_number_len(
2 ** self.dh_len_bin - 1, SER_COMPACT)
self.elem_len_bin = get_serialized_number_len(self.m, SER_BINARY)
self.order_len_bin = get_serialized_number_len(self.order, SER_BINARY)
@property
def key_bytes(self):
""" The approximate number of bytes of information in a key. """
return self.pk_len_bin
def __repr__(self):
return "<Curve %s>" % self.name
def point_from_string(self, s, fmt=SER_BINARY):
x = deserialize_number(s, fmt)
yflag = x >= self.m
if yflag:
x = x - self.m
assert 0 < x and x <= self.m
return self._point_decompress(x, yflag)
def pubkey_from_string(self, s, fmt=SER_BINARY):
return PubKey(self.point_from_string(s, fmt))
def _point_decompress(self, x, yflag):
m = self.m
h = (x * x) % m
h = (h + self.a) % m
h = (h * x) % m
h = (h + self.b) % m
y = mod_root(h, m)
if y or not yflag:
if bool(y.getbit(0)) == yflag:
return AffinePoint(x=x, y=y, curve=self)
return AffinePoint(x=x, y=m - y, curve=self)
def hash_to_exponent(self, h):
""" Converts a 32 byte hash to an exponent """
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cipher = Crypto.Cipher.AES.new(h,
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
buf = cipher.encrypt(b'\0' * self.order_len_bin)
return self._buf_to_exponent(buf)
def _buf_to_exponent(self, buf):
a = deserialize_number(buf, SER_BINARY)
a = (a % (self.order - 1)) + 1
return a
def passphrase_to_pubkey(self, passphrase):
return PubKey(self.base * self.passphrase_to_privkey(passphrase).e)
def passphrase_to_privkey(self, passphrase):
if isinstance(passphrase, six.text_type):
raise ValueError(
"Encode `passphrase` to a bytestring yourself to" +
" prevent problems with different default encodings")
h = _passphrase_to_hash(passphrase)
return PrivKey(self.hash_to_exponent(h), self)
@contextlib.contextmanager
def decrypt_from(self, f, privkey, mac_bytes=10):
ctx = DecryptionContext(self, f, privkey, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, privkey, mac_bytes=10):
instream = BytesIO(s)
with self.decrypt_from(instream, privkey, mac_bytes) as f:
return f.read()
# Helpers
# #########################################################
def _passphrase_to_hash(passphrase):
""" Converts a passphrase to a hash. """
return hashlib.sha256(passphrase).digest()
def encrypt(s, pk, pk_format=SER_COMPACT, mac_bytes=10, curve=None):
""" Encrypts `s' for public key `pk' """
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.encrypt(s, mac_bytes)
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):
""" Decrypts `s' with passphrase `passphrase' """
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.decrypt(s, mac_bytes)
def encrypt_file(in_path_or_file, out_path_or_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
""" Encrypts `in_file' to `out_file' for pubkey `pk' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_encrypt_file(in_file, out_file, pk, pk_format, mac_bytes, chunk_size,
curve)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def _encrypt_file(in_file, out_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
with p.encrypt_to(out_file, mac_bytes) as encrypted_out:
while True:
buff = in_file.read(chunk_size)
if not buff:
break
encrypted_out.write(buff)
def _decrypt_file(in_file, out_file, passphrase, curve='secp160r1',
mac_bytes=10, chunk_size=4096):
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
with privkey.decrypt_from(in_file, mac_bytes) as decrypted_in:
while True:
buff = decrypted_in.read(chunk_size)
if not buff:
break
out_file.write(buff)
def verify(s, sig, pk, sig_format=SER_COMPACT, pk_format=SER_COMPACT,
curve=None):
""" Verifies that `sig' is a signature of pubkey `pk' for the
message `s'. """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.verify(hashlib.sha512(s).digest(), sig, sig_format)
def sign(s, passphrase, sig_format=SER_COMPACT, curve='secp160r1'):
""" Signs `s' with passphrase `passphrase' """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.sign(hashlib.sha512(s).digest(), sig_format)
def passphrase_to_pubkey(passphrase, curve='secp160r1'):
curve = Curve.by_name(curve)
return curve.passphrase_to_pubkey(passphrase)
def generate_keypair(curve='secp160r1', randfunc=None):
""" Convenience function to generate a random
new keypair (passphrase, pubkey). """
if randfunc is None:
randfunc = Crypto.Random.new().read
curve = Curve.by_name(curve)
raw_privkey = randfunc(curve.order_len_bin)
privkey = serialize_number(deserialize_number(raw_privkey), SER_COMPACT)
pubkey = str(passphrase_to_pubkey(privkey))
return (privkey, pubkey)
|
bwesterb/py-seccure
|
src/__init__.py
|
verify
|
python
|
def verify(s, sig, pk, sig_format=SER_COMPACT, pk_format=SER_COMPACT,
curve=None):
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.verify(hashlib.sha512(s).digest(), sig, sig_format)
|
Verifies that `sig' is a signature of pubkey `pk' for the
message `s'.
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L985-L995
|
[
"def verify(self, h, sig, sig_fmt=SER_BINARY):\n \"\"\" Verifies that `sig' is a signature for a message with\n SHA-512 hash `h'. \"\"\"\n s = deserialize_number(sig, sig_fmt)\n return self.p._ECDSA_verify(h, s)\n",
"def by_name(name):\n for raw_curve in RAW_CURVES:\n if raw_curve[0] == name:\n return Curve(raw_curve)\n raise KeyError\n",
"def by_pk_len(pk_len):\n for raw_curve in RAW_CURVES:\n if raw_curve[8] == pk_len:\n return Curve(raw_curve)\n raise KeyError\n",
"def pubkey_from_string(self, s, fmt=SER_BINARY):\n return PubKey(self.point_from_string(s, fmt))\n"
] |
""" Elliptic Curve cryptography compatible with SECCURE:
http://point-at-infinity.org/seccure/ """
import hmac
import hashlib
import logging
import binascii
import contextlib
import collections
from ._version import __version__ # noqa: F401
# PyCrypto
import Crypto.Util
import Crypto.Cipher.AES
import Crypto.Random.random
# gmpy
import gmpy
# six
import six
# TODO replace with six.byte2int, when it is released
if six.PY3:
from io import BytesIO as BytesIO
def byte2int(b): return b
def stringlike(x): return isinstance(x, (str, bytes))
else:
from cStringIO import StringIO as BytesIO
def byte2int(b): return ord(b)
def stringlike(x): return isinstance(x, basestring)
l = logging.getLogger(__name__)
class IntegrityError(ValueError):
pass
# Serialization of numbers
# #########################################################
SER_COMPACT = 0
SER_BINARY = 1
COMPACT_DIGITS = (b'!#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'[]^_abcdefghijklmnopqrstuvwxyz{|}~')
R_COMPACT_DIGITS = {} # TODO is a tuple/list faster?
for i, c in enumerate(COMPACT_DIGITS):
R_COMPACT_DIGITS[c] = i
def serialize_number(x, fmt=SER_BINARY, outlen=None):
""" Serializes `x' to a string of length `outlen' in format `fmt' """
ret = b''
if fmt == SER_BINARY:
while x:
x, r = divmod(x, 256)
ret = six.int2byte(int(r)) + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, b'\0')
return ret
assert fmt == SER_COMPACT
while x:
x, r = divmod(x, len(COMPACT_DIGITS))
ret = COMPACT_DIGITS[r:r + 1] + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])
return ret
def deserialize_number(s, fmt=SER_BINARY):
""" Deserializes a number from a string `s' in format `fmt' """
ret = gmpy.mpz(0)
if fmt == SER_BINARY:
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
for c in s:
ret *= 256
ret += byte2int(c)
return ret
assert fmt == SER_COMPACT
if isinstance(s, six.text_type):
s = s.encode('ascii')
for c in s:
ret *= len(COMPACT_DIGITS)
ret += R_COMPACT_DIGITS[c]
return ret
def get_serialized_number_len(x, fmt=SER_BINARY):
if fmt == SER_BINARY:
return (x.numdigits(2) + 7) // 8
assert fmt == SER_COMPACT
res = 0
while x != 0:
x = x // len(COMPACT_DIGITS)
res += 1
return res
# Some modular arithmetic
# #########################################################
def mod_issquare(a, p):
""" Returns whether `a' is a square modulo p """
if not a:
return True
p1 = p // 2
p2 = pow(a, p1, p)
return p2 == 1
def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x
# Raw curve parameters
# #########################################################
raw_curve_parameters = collections.namedtuple(
'raw_curve_parameters',
('name',
'a',
'b',
'm',
'base_x',
'base_y',
'order',
'cofactor',
'pk_len_compact'))
RAW_CURVES = (
("secp112r1",
b"db7c2abf62e35e668076bead2088",
b"659ef8ba043916eede8911702b22",
b"db7c2abf62e35e668076bead208b",
b"09487239995a5ee76b55f9c2f098",
b"a89ce5af8724c0a23e0e0ff77500",
b"db7c2abf62e35e7628dfac6561c5", 1, 18),
("secp128r1",
b"fffffffdfffffffffffffffffffffffc",
b"e87579c11079f43dd824993c2cee5ed3",
b"fffffffdffffffffffffffffffffffff",
b"161ff7528b899b2d0c28607ca52c5b86",
b"cf5ac8395bafeb13c02da292dded7a83",
b"fffffffe0000000075a30d1b9038a115", 1, 20),
("secp160r1",
b"ffffffffffffffffffffffffffffffff7ffffffc",
b"1c97befc54bd7a8b65acf89f81d4d4adc565fa45",
b"ffffffffffffffffffffffffffffffff7fffffff",
b"4a96b5688ef573284664698968c38bb913cbfc82",
b"23a628553168947d59dcc912042351377ac5fb32",
b"0100000000000000000001f4c8f927aed3ca752257", 1, 25),
("secp192r1/nistp192",
b"fffffffffffffffffffffffffffffffefffffffffffffffc",
b"64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1",
b"fffffffffffffffffffffffffffffffeffffffffffffffff",
b"188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012",
b"07192b95ffc8da78631011ed6b24cdd573f977a11e794811",
b"ffffffffffffffffffffffff99def836146bc9b1b4d22831", 1, 30),
("secp224r1/nistp224",
b"fffffffffffffffffffffffffffffffefffffffffffffffffffffffe",
b"b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4",
b"ffffffffffffffffffffffffffffffff000000000000000000000001",
b"b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
b"bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34",
b"ffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", 1, 35),
("secp256r1/nistp256",
b"ffffffff00000001000000000000000000000000fffffffffffffffffffffffc",
b"5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b",
b"ffffffff00000001000000000000000000000000ffffffffffffffffffffffff",
b"6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
b"4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5",
b"ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
1, 40),
("secp384r1/nistp384",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000fffffffc",
b"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875a"
b"c656398d8a2ed19d2a85c8edd3ec2aef",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000ffffffff",
b"aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a38"
b"5502f25dbf55296c3a545e3872760ab7",
b"3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c0"
b"0a60b1ce1d7e819d7a431d7c90ea0e5f",
b"ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf"
b"581a0db248b0a77aecec196accc52973", 1, 60),
("secp521r1/nistp521",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffffffc",
b"0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef1"
b"09e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd4"
b"6b503f00",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffff",
b"00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d"
b"3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31"
b"c2e5bd66",
b"011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e"
b"662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be9476"
b"9fd16650",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e"
b"91386409", 1, 81),
("brainpoolp160r1",
b"340e7be2a280eb74e2be61bada745d97e8f7c300",
b"1e589a8595423412134faa2dbdec95c8d8675e58",
b"e95e4a5f737059dc60dfc7ad95b3d8139515620f",
b"bed5af16ea3f6a4f62938c4631eb5af7bdbcdbc3",
b"1667cb477a1a8ec338f94741669c976316da6321",
b"e95e4a5f737059dc60df5991d45029409e60fc09", 1, 25),
("brainpoolp192r1",
b"6a91174076b1e0e19c39c031fe8685c1cae040e5c69a28ef",
b"469a28ef7c28cca3dc721d044f4496bcca7ef4146fbf25c9",
b"c302f41d932a36cda7a3463093d18db78fce476de1a86297",
b"c0a0647eaab6a48753b033c56cb0f0900a2f5c4853375fd6",
b"14b690866abd5bb88b5f4828c1490002e6773fa2fa299b8f",
b"c302f41d932a36cda7a3462f9e9e916b5be8f1029ac4acc1", 1, 30),
("brainpoolp224r1",
b"68a5e62ca9ce6c1c299803a6c1530b514e182ad8b0042a59cad29f43",
b"2580f63ccfe44138870713b1a92369e33e2135d266dbb372386c400b",
b"d7c134aa264366862a18302575d1d787b09f075797da89f57ec8c0ff",
b"0d9029ad2c7e5cf4340823b2a87dc68c9e4ce3174c1e6efdee12c07d",
b"58aa56f772c0726f24c6b89e4ecdac24354b9e99caa3f6d3761402cd",
b"d7c134aa264366862a18302575d0fb98d116bc4b6ddebca3a5a7939f",
1, 35),
("brainpoolp256r1",
b"7d5a0975fc2c3057eef67530417affe7fb8055c126dc5c6ce94a4b44f330b5d9",
b"26dc5c6ce94a4b44f330b5d9bbd77cbf958416295cf7e1ce6bccdc18ff8c07b6",
b"a9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5377",
b"8bd2aeb9cb7e57cb2c4b482ffc81b7afb9de27e1e3bd23c23a4453bd9ace3262",
b"547ef835c3dac4fd97f8461a14611dc9c27745132ded8e545c1d54c72f046997",
b"a9fb57dba1eea9bc3e660a909d838d718c397aa3b561a6f7901e0e82974856a7",
1, 40),
("brainpoolp320r1",
b"3ee30b568fbab0f883ccebd46d3f3bb8a2a73513f5eb79da66190eb085ffa9f49"
b"2f375a97d860eb4",
b"520883949dfdbc42d3ad198640688a6fe13f41349554b49acc31dccd884539816"
b"f5eb4ac8fb1f1a6",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa6f6f40def4f92b9ec7893ec28f"
b"cd412b1f1b32e27",
b"43bd7e9afb53d8b85289bcc48ee5bfe6f20137d10a087eb6e7871e2a10a599c71"
b"0af8d0d39e20611",
b"14fdd05545ec1cc8ab4093247f77275e0743ffed117182eaa9c77877aaac6ac7d"
b"35245d1692e8ee1",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa5b68f12a32d482ec7ee8658e98"
b"691555b44c59311", 1, 50),
("brainpoolp384r1",
b"7bc382c63d8c150c3c72080ace05afa0c2bea28e4fb22787139165efba91f90f8"
b"aa5814a503ad4eb04a8c7dd22ce2826",
b"04a8c7dd22ce28268b39b55416f0447c2fb77de107dcd2a62e880ea53eeb62d57"
b"cb4390295dbc9943ab78696fa504c11",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b412b1da197fb71123a"
b"cd3a729901d1a71874700133107ec53",
b"1d1c64f068cf45ffa2a63a81b7c13f6b8847a3e77ef14fe3db7fcafe0cbd10e8e"
b"826e03436d646aaef87b2e247d4af1e",
b"8abe1d7520f9c2a45cb1eb8e95cfd55262b70b29feec5864e19c054ff99129280"
b"e4646217791811142820341263c5315",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b31f166e6cac0425a7c"
b"f3ab6af6b7fc3103b883202e9046565", 1, 60),
("brainpoolp512r1",
b"7830a3318b603b89e2327145ac234cc594cbdd8d3df91610a83441caea9863bc2"
b"ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72bf2c7b9e7c1ac4d77fc94"
b"ca",
b"3df91610a83441caea9863bc2ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72"
b"bf2c7b9e7c1ac4d77fc94cadc083e67984050b75ebae5dd2809bd638016f7"
b"23",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308717"
b"d4d9b009bc66842aecda12ae6a380e62881ff2f2d82c68528aa6056583a48"
b"f3",
b"81aee4bdd82ed9645a21322e9c4c6a9385ed9f70b5d916c1b43b62eef4d0098ef"
b"f3b1f78e2d0d48d50d1687b93b97d5f7c6d5047406a5e688b352209bcb9f8"
b"22",
b"7dde385d566332ecc0eabfa9cf7822fdf209f70024a57b1aa000c55b881f8111b"
b"2dcde494a5f485e5bca4bd88a2763aed1ca2b2fa8f0540678cd1e0f3ad808"
b"92",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308705"
b"53e5c414ca92619418661197fac10471db1d381085ddaddb58796829ca900"
b"69",
1, 79),
)
curves = [r[0] for r in RAW_CURVES]
# Arithmetic on elliptic curves
# #########################################################
class JacobianPoint(object):
def __init__(self, x, y, z, curve):
self.x = x
self.y = y
self.z = z
self.curve = curve
def to_affine(self):
if self.z == 0:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
h = gmpy.invert(self.z, m)
y = (h * h) % m
x = (self.x * y) % m
y = (y * h) % m
y = (y * self.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def double(self):
if not self.z:
return self
if not self.y:
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t1 = (self.x * self.x) % m
t2 = (t1 + t1) % m
t2 = (t2 + t1) % m
t1 = (self.z * self.z) % m
t1 = (t1 * t1) % m
t1 = (t1 * a) % m
t1 = (t1 + t2) % m
z = (self.z * self.y) % m
z = (z + z) % m
y = (self.y * self.y) % m
y = (y + y) % m
t2 = (self.x * y) % m
t2 = (t2 + t2) % m
x = (t1 * t1) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
t1 = (t1 * t2) % m
t2 = (y * y) % m
t2 = (t2 + t2) % m
y = (t1 - t2) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self.z:
return other.to_jacobian()
m = self.curve.m
t1 = (self.z * self.z) % m
t2 = (t1 * other.x) % m
t1 = (t1 * self.z) % m
t1 = (t1 * other.y) % m
if self.x == t2:
if self.y == t1:
return self.double()
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
x = (self.x - t2) % m
y = (self.y - t1) % m
z = (self.z * x) % m
t3 = (x * x) % m
t2 = (t2 * t3) % m
t3 = (t3 * x) % m
t1 = (t1 * t3) % m
x = (y * y) % m
x = (x - t3) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
y = (y * t2) % m
y = (y - t1) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __repr__(self):
return "<JacobianPoint (%s, %s, %s) of %s>" % (
self.x, self.y, self.z, self.curve.name)
class AffinePoint(object):
def __init__(self, x, y, curve):
self.x = x
self.y = y
self.curve = curve
@property
def on_curve(self):
if not self:
return True
m = self.curve.m
a = self.curve.a
b = self.curve.b
h1 = (self.x * self.x) % m
h1 = (h1 + a) % m
h1 = (h1 * self.x) % m
h1 = (h1 + b) % m
h2 = (self.y * self.y) % m
return h1 == h2
def to_jacobian(self):
if not self:
return JacobianPoint(x=0, y=0, z=0, curve=self.curve)
return JacobianPoint(x=self.x, y=self.y, z=1, curve=self.curve)
def double(self):
if not self.y:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t2 = (self.x * self.x) % m
t1 = (t2 + t2) % m
t1 = (t1 + t2) % m
t1 = (t1 + a) % m
t2 = (self.y + self.y) % m
t2 = gmpy.invert(t2, m)
t1 = (t1 * t2) % m
t2 = (t1 * t1) % m
t2 = (t2 - self.x) % m
t2 = (t2 - self.x) % m
x = (self.x - t2) % m
t1 = (t1 * x) % m
y = (t1 - self.y) % m
x = t2
return AffinePoint(x=x, y=y, curve=self.curve)
def __mul__(self, exp):
n = exp.numdigits(2)
r = JacobianPoint(x=0, y=0, z=0, curve=self.curve)
while n:
r = r.double()
n -= 1
if exp.getbit(n):
r = r + self
R = r.to_affine()
assert R.on_curve
return R
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self:
return other
if self.x == other.x:
if self.y == other.y:
return self.double()
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
t = (self.y - other.y) % m
y = (self.x - other.x) % m
y = gmpy.invert(y, m)
y = (t * y) % m
t = (y * y) % m
x = (self.x + other.x) % m
x = (t - x) % m
t = (other.x - x) % m
y = (y * t) % m
y = (y - other.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def __nonzero__(self):
return bool(self.x or self.y)
__bool__ = __nonzero__
def __repr__(self):
return "<AffinePoint (%s, %s) of %s>" % (
self.x, self.y, self.curve.name)
def __eq__(self, other):
if not isinstance(other, AffinePoint):
return False
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.to_string(SER_COMPACT)
def to_bytes(self, fmt=SER_BINARY):
outlen = (self.curve.pk_len_compact if fmt == SER_COMPACT
else self.curve.pk_len_bin)
if self._point_compress():
return serialize_number(self.x + self.curve.m, fmt, outlen)
return serialize_number(self.x, fmt, outlen)
def to_string(self, fmt=SER_BINARY):
return self.to_bytes(fmt).decode()
def _point_compress(self):
return self.y.getbit(0) == 1
def _ECIES_KDF(self, R):
h = hashlib.sha512()
h.update(serialize_number(self.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.y, SER_BINARY, self.curve.elem_len_bin))
return h.digest()
def _ECIES_encryption(self):
while True:
k = gmpy.mpz(
Crypto.Random.random.randrange(
0, int(
self.curve.order - 1)))
R = self.curve.base * k
k = k * self.curve.cofactor
Z = self * k
if Z:
break
return (Z._ECIES_KDF(R), R)
def _ECIES_decryption(self, d):
if isinstance(d, PrivKey):
d = d.e
e = d * self.curve.cofactor
if not self.valid_embedded_key:
raise ValueError
Z = self * e
if not Z:
raise ValueError
return Z._ECIES_KDF(self)
def _ECDSA_verify(self, md, sig):
order = self.curve.order
s, r = divmod(sig, order)
if s <= 0 or order <= s or r <= 0 or order <= r:
return False
e = deserialize_number(md, SER_BINARY) % order
s = gmpy.invert(s, order)
e = (e * s) % order
X1 = self.curve.base * e
e = (r * s) % order
X2 = self * e
X1 = X1 + X2
if not X1:
return False
s = X1.x % order
return s == r
@property
def valid_embedded_key(self):
if (self.x < 0 or self.x >= self.curve.m or self.y < 0 or
self.y > self.curve.m):
return False
if not self:
return False
if not self.on_curve:
return False
return True
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
def verify(self, h, sig, sig_fmt=SER_BINARY):
""" Verifies that `sig' is a signature for a message with
SHA-512 hash `h'. """
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
@contextlib.contextmanager
def encrypt_to(self, f, mac_bytes=10):
""" Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. """
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
class PrivKey(object):
""" A secret exponent """
def __init__(self, e, curve):
self.e = e
self.curve = curve
@contextlib.contextmanager
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError("s should be bytes")
instream = BytesIO(s)
with self.decrypt_from(instream, mac_bytes) as f:
return f.read()
def sign(self, h, sig_format=SER_BINARY):
""" Signs the message with SHA-512 hash `h' with this private key. """
outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT
else self.curve.sig_len_bin)
sig = self._ECDSA_sign(h)
return serialize_number(sig, sig_format, outlen)
def __repr__(self):
return "<PrivKey %s>" % self.e
def __str__(self):
return str(self.e)
def _ECDSA_sign(self, md):
# Get the pseudo-random exponent from the messagedigest
# and the private key.
order = self.curve.order
hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)
h = hmac.new(hmk, digestmod=hashlib.sha256)
h.update(md)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cprng = Crypto.Cipher.AES.new(h.digest(),
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
r = 0
s = 0
while s == 0:
while r == 0:
buf = cprng.encrypt(b'\0' * self.curve.order_len_bin)
k = self.curve._buf_to_exponent(buf)
p1 = self.curve.base * k
r = p1.x % order
e = deserialize_number(md, SER_BINARY)
e = (e % order)
s = (self.e * r) % order
s = (s + e) % order
e = gmpy.invert(k, order)
s = (s * e) % order
s = s * order
s = s + r
return s
# Encryption and decryption contexts
# #########################################################
class EncryptionContext(object):
""" Holds state of encryption. Use AffinePoint.encrypt_to """
def __init__(self, f, p, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
key, R = p._ECIES_encryption()
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
f.write(R.to_bytes(SER_BINARY))
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
def write(self, s):
if not self.f:
raise IOError("closed")
ct = self.cipher.encrypt(s)
self.f.write(ct)
self.h.update(ct)
def finish(self):
if not self.f:
raise IOError("closed")
self.f.write(self.h.digest()[:self.mac_bytes])
self.f = None
class DecryptionContext(object):
""" Holds state of decryption. Use Curve.decrypt_from """
def __init__(self, curve, f, privkey, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
R = curve.point_from_string(f.read(curve.pk_len_bin), SER_BINARY)
key = R._ECIES_decryption(privkey)
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
self.ahead = f.read(mac_bytes)
def read(self, n=None):
if not self.f:
return ''
if n is None:
tmp = self.ahead + self.f.read()
else:
tmp = self.ahead + self.f.read(n)
ct = tmp[:-self.mac_bytes]
self.ahead = tmp[-self.mac_bytes:]
self.h.update(ct)
pt = self.cipher.decrypt(ct)
if n is None or len(ct) < n:
if self.h.digest()[:self.mac_bytes] != self.ahead:
raise IntegrityError
self.f = None
return pt
# The main Curve objects
# #########################################################
class Curve(object):
""" Represents a Elliptic Curve """
@staticmethod
def by_name_substring(substring):
substring = substring.lower()
candidates = []
for raw_curve in RAW_CURVES:
if substring in raw_curve[0]:
candidates.append(raw_curve)
if len(candidates) != 1:
raise KeyError
return Curve(candidates[0])
@staticmethod
def by_name(name):
for raw_curve in RAW_CURVES:
if raw_curve[0] == name:
return Curve(raw_curve)
raise KeyError
@staticmethod
def by_pk_len(pk_len):
for raw_curve in RAW_CURVES:
if raw_curve[8] == pk_len:
return Curve(raw_curve)
raise KeyError
def __init__(self, raw_curve_params):
""" Initialize a new curve from raw curve parameters.
Use `Curve.by_pk_len' instead """
r = raw_curve_parameters(*raw_curve_params)
# Store domain parameters
self.name = r.name
self.a = deserialize_number(binascii.unhexlify(r.a), SER_BINARY)
self.b = deserialize_number(binascii.unhexlify(r.b), SER_BINARY)
self.m = deserialize_number(binascii.unhexlify(r.m), SER_BINARY)
self.order = deserialize_number(
binascii.unhexlify(r.order), SER_BINARY)
self.base = AffinePoint(
curve=self, x=deserialize_number(
binascii.unhexlify(
r.base_x), SER_BINARY), y=deserialize_number(
binascii.unhexlify(
r.base_y), SER_BINARY))
self.cofactor = r.cofactor
# Calculate some other parameters
self.pk_len_bin = get_serialized_number_len(
(2 * self.m) - 1, SER_BINARY)
self.pk_len_compact = get_serialized_number_len(
(2 * self.m) - 1, SER_COMPACT)
assert self.pk_len_compact == r.pk_len_compact
self.sig_len_bin = get_serialized_number_len(
(self.order * self.order) - 1, SER_BINARY)
self.sig_len_compact = get_serialized_number_len(
(self.order * self.order) - 1, SER_COMPACT)
self.dh_len_bin = min((self.order.numdigits(2) // 2 + 7) // 8, 32)
self.dh_len_compact = get_serialized_number_len(
2 ** self.dh_len_bin - 1, SER_COMPACT)
self.elem_len_bin = get_serialized_number_len(self.m, SER_BINARY)
self.order_len_bin = get_serialized_number_len(self.order, SER_BINARY)
@property
def key_bytes(self):
""" The approximate number of bytes of information in a key. """
return self.pk_len_bin
def __repr__(self):
return "<Curve %s>" % self.name
def point_from_string(self, s, fmt=SER_BINARY):
x = deserialize_number(s, fmt)
yflag = x >= self.m
if yflag:
x = x - self.m
assert 0 < x and x <= self.m
return self._point_decompress(x, yflag)
def pubkey_from_string(self, s, fmt=SER_BINARY):
return PubKey(self.point_from_string(s, fmt))
def _point_decompress(self, x, yflag):
m = self.m
h = (x * x) % m
h = (h + self.a) % m
h = (h * x) % m
h = (h + self.b) % m
y = mod_root(h, m)
if y or not yflag:
if bool(y.getbit(0)) == yflag:
return AffinePoint(x=x, y=y, curve=self)
return AffinePoint(x=x, y=m - y, curve=self)
def hash_to_exponent(self, h):
""" Converts a 32 byte hash to an exponent """
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cipher = Crypto.Cipher.AES.new(h,
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
buf = cipher.encrypt(b'\0' * self.order_len_bin)
return self._buf_to_exponent(buf)
def _buf_to_exponent(self, buf):
a = deserialize_number(buf, SER_BINARY)
a = (a % (self.order - 1)) + 1
return a
def passphrase_to_pubkey(self, passphrase):
return PubKey(self.base * self.passphrase_to_privkey(passphrase).e)
def passphrase_to_privkey(self, passphrase):
if isinstance(passphrase, six.text_type):
raise ValueError(
"Encode `passphrase` to a bytestring yourself to" +
" prevent problems with different default encodings")
h = _passphrase_to_hash(passphrase)
return PrivKey(self.hash_to_exponent(h), self)
@contextlib.contextmanager
def decrypt_from(self, f, privkey, mac_bytes=10):
ctx = DecryptionContext(self, f, privkey, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, privkey, mac_bytes=10):
instream = BytesIO(s)
with self.decrypt_from(instream, privkey, mac_bytes) as f:
return f.read()
# Helpers
# #########################################################
def _passphrase_to_hash(passphrase):
""" Converts a passphrase to a hash. """
return hashlib.sha256(passphrase).digest()
def encrypt(s, pk, pk_format=SER_COMPACT, mac_bytes=10, curve=None):
""" Encrypts `s' for public key `pk' """
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.encrypt(s, mac_bytes)
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):
""" Decrypts `s' with passphrase `passphrase' """
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.decrypt(s, mac_bytes)
def encrypt_file(in_path_or_file, out_path_or_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
""" Encrypts `in_file' to `out_file' for pubkey `pk' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_encrypt_file(in_file, out_file, pk, pk_format, mac_bytes, chunk_size,
curve)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def decrypt_file(in_path_or_file, out_path_or_file, passphrase,
curve='secp160r1', mac_bytes=10, chunk_size=4096):
""" Decrypts `in_file' to `out_file' with passphrase `passphrase' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_decrypt_file(in_file, out_file, passphrase, curve, mac_bytes,
chunk_size)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def _encrypt_file(in_file, out_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
with p.encrypt_to(out_file, mac_bytes) as encrypted_out:
while True:
buff = in_file.read(chunk_size)
if not buff:
break
encrypted_out.write(buff)
def _decrypt_file(in_file, out_file, passphrase, curve='secp160r1',
mac_bytes=10, chunk_size=4096):
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
with privkey.decrypt_from(in_file, mac_bytes) as decrypted_in:
while True:
buff = decrypted_in.read(chunk_size)
if not buff:
break
out_file.write(buff)
def sign(s, passphrase, sig_format=SER_COMPACT, curve='secp160r1'):
""" Signs `s' with passphrase `passphrase' """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.sign(hashlib.sha512(s).digest(), sig_format)
def passphrase_to_pubkey(passphrase, curve='secp160r1'):
curve = Curve.by_name(curve)
return curve.passphrase_to_pubkey(passphrase)
def generate_keypair(curve='secp160r1', randfunc=None):
""" Convenience function to generate a random
new keypair (passphrase, pubkey). """
if randfunc is None:
randfunc = Crypto.Random.new().read
curve = Curve.by_name(curve)
raw_privkey = randfunc(curve.order_len_bin)
privkey = serialize_number(deserialize_number(raw_privkey), SER_COMPACT)
pubkey = str(passphrase_to_pubkey(privkey))
return (privkey, pubkey)
|
bwesterb/py-seccure
|
src/__init__.py
|
sign
|
python
|
def sign(s, passphrase, sig_format=SER_COMPACT, curve='secp160r1'):
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.sign(hashlib.sha512(s).digest(), sig_format)
|
Signs `s' with passphrase `passphrase'
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L998-L1005
|
[
"def sign(self, h, sig_format=SER_BINARY):\n \"\"\" Signs the message with SHA-512 hash `h' with this private key. \"\"\"\n outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT\n else self.curve.sig_len_bin)\n sig = self._ECDSA_sign(h)\n return serialize_number(sig, sig_format, outlen)\n",
"def by_name(name):\n for raw_curve in RAW_CURVES:\n if raw_curve[0] == name:\n return Curve(raw_curve)\n raise KeyError\n",
"def passphrase_to_privkey(self, passphrase):\n if isinstance(passphrase, six.text_type):\n raise ValueError(\n \"Encode `passphrase` to a bytestring yourself to\" +\n \" prevent problems with different default encodings\")\n h = _passphrase_to_hash(passphrase)\n return PrivKey(self.hash_to_exponent(h), self)\n"
] |
""" Elliptic Curve cryptography compatible with SECCURE:
http://point-at-infinity.org/seccure/ """
import hmac
import hashlib
import logging
import binascii
import contextlib
import collections
from ._version import __version__ # noqa: F401
# PyCrypto
import Crypto.Util
import Crypto.Cipher.AES
import Crypto.Random.random
# gmpy
import gmpy
# six
import six
# TODO replace with six.byte2int, when it is released
if six.PY3:
from io import BytesIO as BytesIO
def byte2int(b): return b
def stringlike(x): return isinstance(x, (str, bytes))
else:
from cStringIO import StringIO as BytesIO
def byte2int(b): return ord(b)
def stringlike(x): return isinstance(x, basestring)
l = logging.getLogger(__name__)
class IntegrityError(ValueError):
pass
# Serialization of numbers
# #########################################################
SER_COMPACT = 0
SER_BINARY = 1
COMPACT_DIGITS = (b'!#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'[]^_abcdefghijklmnopqrstuvwxyz{|}~')
R_COMPACT_DIGITS = {} # TODO is a tuple/list faster?
for i, c in enumerate(COMPACT_DIGITS):
R_COMPACT_DIGITS[c] = i
def serialize_number(x, fmt=SER_BINARY, outlen=None):
""" Serializes `x' to a string of length `outlen' in format `fmt' """
ret = b''
if fmt == SER_BINARY:
while x:
x, r = divmod(x, 256)
ret = six.int2byte(int(r)) + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, b'\0')
return ret
assert fmt == SER_COMPACT
while x:
x, r = divmod(x, len(COMPACT_DIGITS))
ret = COMPACT_DIGITS[r:r + 1] + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])
return ret
def deserialize_number(s, fmt=SER_BINARY):
""" Deserializes a number from a string `s' in format `fmt' """
ret = gmpy.mpz(0)
if fmt == SER_BINARY:
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
for c in s:
ret *= 256
ret += byte2int(c)
return ret
assert fmt == SER_COMPACT
if isinstance(s, six.text_type):
s = s.encode('ascii')
for c in s:
ret *= len(COMPACT_DIGITS)
ret += R_COMPACT_DIGITS[c]
return ret
def get_serialized_number_len(x, fmt=SER_BINARY):
if fmt == SER_BINARY:
return (x.numdigits(2) + 7) // 8
assert fmt == SER_COMPACT
res = 0
while x != 0:
x = x // len(COMPACT_DIGITS)
res += 1
return res
# Some modular arithmetic
# #########################################################
def mod_issquare(a, p):
""" Returns whether `a' is a square modulo p """
if not a:
return True
p1 = p // 2
p2 = pow(a, p1, p)
return p2 == 1
def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x
# Raw curve parameters
# #########################################################
raw_curve_parameters = collections.namedtuple(
'raw_curve_parameters',
('name',
'a',
'b',
'm',
'base_x',
'base_y',
'order',
'cofactor',
'pk_len_compact'))
RAW_CURVES = (
("secp112r1",
b"db7c2abf62e35e668076bead2088",
b"659ef8ba043916eede8911702b22",
b"db7c2abf62e35e668076bead208b",
b"09487239995a5ee76b55f9c2f098",
b"a89ce5af8724c0a23e0e0ff77500",
b"db7c2abf62e35e7628dfac6561c5", 1, 18),
("secp128r1",
b"fffffffdfffffffffffffffffffffffc",
b"e87579c11079f43dd824993c2cee5ed3",
b"fffffffdffffffffffffffffffffffff",
b"161ff7528b899b2d0c28607ca52c5b86",
b"cf5ac8395bafeb13c02da292dded7a83",
b"fffffffe0000000075a30d1b9038a115", 1, 20),
("secp160r1",
b"ffffffffffffffffffffffffffffffff7ffffffc",
b"1c97befc54bd7a8b65acf89f81d4d4adc565fa45",
b"ffffffffffffffffffffffffffffffff7fffffff",
b"4a96b5688ef573284664698968c38bb913cbfc82",
b"23a628553168947d59dcc912042351377ac5fb32",
b"0100000000000000000001f4c8f927aed3ca752257", 1, 25),
("secp192r1/nistp192",
b"fffffffffffffffffffffffffffffffefffffffffffffffc",
b"64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1",
b"fffffffffffffffffffffffffffffffeffffffffffffffff",
b"188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012",
b"07192b95ffc8da78631011ed6b24cdd573f977a11e794811",
b"ffffffffffffffffffffffff99def836146bc9b1b4d22831", 1, 30),
("secp224r1/nistp224",
b"fffffffffffffffffffffffffffffffefffffffffffffffffffffffe",
b"b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4",
b"ffffffffffffffffffffffffffffffff000000000000000000000001",
b"b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
b"bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34",
b"ffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", 1, 35),
("secp256r1/nistp256",
b"ffffffff00000001000000000000000000000000fffffffffffffffffffffffc",
b"5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b",
b"ffffffff00000001000000000000000000000000ffffffffffffffffffffffff",
b"6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
b"4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5",
b"ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
1, 40),
("secp384r1/nistp384",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000fffffffc",
b"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875a"
b"c656398d8a2ed19d2a85c8edd3ec2aef",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000ffffffff",
b"aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a38"
b"5502f25dbf55296c3a545e3872760ab7",
b"3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c0"
b"0a60b1ce1d7e819d7a431d7c90ea0e5f",
b"ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf"
b"581a0db248b0a77aecec196accc52973", 1, 60),
("secp521r1/nistp521",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffffffc",
b"0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef1"
b"09e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd4"
b"6b503f00",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffff",
b"00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d"
b"3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31"
b"c2e5bd66",
b"011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e"
b"662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be9476"
b"9fd16650",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e"
b"91386409", 1, 81),
("brainpoolp160r1",
b"340e7be2a280eb74e2be61bada745d97e8f7c300",
b"1e589a8595423412134faa2dbdec95c8d8675e58",
b"e95e4a5f737059dc60dfc7ad95b3d8139515620f",
b"bed5af16ea3f6a4f62938c4631eb5af7bdbcdbc3",
b"1667cb477a1a8ec338f94741669c976316da6321",
b"e95e4a5f737059dc60df5991d45029409e60fc09", 1, 25),
("brainpoolp192r1",
b"6a91174076b1e0e19c39c031fe8685c1cae040e5c69a28ef",
b"469a28ef7c28cca3dc721d044f4496bcca7ef4146fbf25c9",
b"c302f41d932a36cda7a3463093d18db78fce476de1a86297",
b"c0a0647eaab6a48753b033c56cb0f0900a2f5c4853375fd6",
b"14b690866abd5bb88b5f4828c1490002e6773fa2fa299b8f",
b"c302f41d932a36cda7a3462f9e9e916b5be8f1029ac4acc1", 1, 30),
("brainpoolp224r1",
b"68a5e62ca9ce6c1c299803a6c1530b514e182ad8b0042a59cad29f43",
b"2580f63ccfe44138870713b1a92369e33e2135d266dbb372386c400b",
b"d7c134aa264366862a18302575d1d787b09f075797da89f57ec8c0ff",
b"0d9029ad2c7e5cf4340823b2a87dc68c9e4ce3174c1e6efdee12c07d",
b"58aa56f772c0726f24c6b89e4ecdac24354b9e99caa3f6d3761402cd",
b"d7c134aa264366862a18302575d0fb98d116bc4b6ddebca3a5a7939f",
1, 35),
("brainpoolp256r1",
b"7d5a0975fc2c3057eef67530417affe7fb8055c126dc5c6ce94a4b44f330b5d9",
b"26dc5c6ce94a4b44f330b5d9bbd77cbf958416295cf7e1ce6bccdc18ff8c07b6",
b"a9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5377",
b"8bd2aeb9cb7e57cb2c4b482ffc81b7afb9de27e1e3bd23c23a4453bd9ace3262",
b"547ef835c3dac4fd97f8461a14611dc9c27745132ded8e545c1d54c72f046997",
b"a9fb57dba1eea9bc3e660a909d838d718c397aa3b561a6f7901e0e82974856a7",
1, 40),
("brainpoolp320r1",
b"3ee30b568fbab0f883ccebd46d3f3bb8a2a73513f5eb79da66190eb085ffa9f49"
b"2f375a97d860eb4",
b"520883949dfdbc42d3ad198640688a6fe13f41349554b49acc31dccd884539816"
b"f5eb4ac8fb1f1a6",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa6f6f40def4f92b9ec7893ec28f"
b"cd412b1f1b32e27",
b"43bd7e9afb53d8b85289bcc48ee5bfe6f20137d10a087eb6e7871e2a10a599c71"
b"0af8d0d39e20611",
b"14fdd05545ec1cc8ab4093247f77275e0743ffed117182eaa9c77877aaac6ac7d"
b"35245d1692e8ee1",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa5b68f12a32d482ec7ee8658e98"
b"691555b44c59311", 1, 50),
("brainpoolp384r1",
b"7bc382c63d8c150c3c72080ace05afa0c2bea28e4fb22787139165efba91f90f8"
b"aa5814a503ad4eb04a8c7dd22ce2826",
b"04a8c7dd22ce28268b39b55416f0447c2fb77de107dcd2a62e880ea53eeb62d57"
b"cb4390295dbc9943ab78696fa504c11",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b412b1da197fb71123a"
b"cd3a729901d1a71874700133107ec53",
b"1d1c64f068cf45ffa2a63a81b7c13f6b8847a3e77ef14fe3db7fcafe0cbd10e8e"
b"826e03436d646aaef87b2e247d4af1e",
b"8abe1d7520f9c2a45cb1eb8e95cfd55262b70b29feec5864e19c054ff99129280"
b"e4646217791811142820341263c5315",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b31f166e6cac0425a7c"
b"f3ab6af6b7fc3103b883202e9046565", 1, 60),
("brainpoolp512r1",
b"7830a3318b603b89e2327145ac234cc594cbdd8d3df91610a83441caea9863bc2"
b"ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72bf2c7b9e7c1ac4d77fc94"
b"ca",
b"3df91610a83441caea9863bc2ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72"
b"bf2c7b9e7c1ac4d77fc94cadc083e67984050b75ebae5dd2809bd638016f7"
b"23",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308717"
b"d4d9b009bc66842aecda12ae6a380e62881ff2f2d82c68528aa6056583a48"
b"f3",
b"81aee4bdd82ed9645a21322e9c4c6a9385ed9f70b5d916c1b43b62eef4d0098ef"
b"f3b1f78e2d0d48d50d1687b93b97d5f7c6d5047406a5e688b352209bcb9f8"
b"22",
b"7dde385d566332ecc0eabfa9cf7822fdf209f70024a57b1aa000c55b881f8111b"
b"2dcde494a5f485e5bca4bd88a2763aed1ca2b2fa8f0540678cd1e0f3ad808"
b"92",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308705"
b"53e5c414ca92619418661197fac10471db1d381085ddaddb58796829ca900"
b"69",
1, 79),
)
curves = [r[0] for r in RAW_CURVES]
# Arithmetic on elliptic curves
# #########################################################
class JacobianPoint(object):
def __init__(self, x, y, z, curve):
self.x = x
self.y = y
self.z = z
self.curve = curve
def to_affine(self):
if self.z == 0:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
h = gmpy.invert(self.z, m)
y = (h * h) % m
x = (self.x * y) % m
y = (y * h) % m
y = (y * self.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def double(self):
if not self.z:
return self
if not self.y:
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t1 = (self.x * self.x) % m
t2 = (t1 + t1) % m
t2 = (t2 + t1) % m
t1 = (self.z * self.z) % m
t1 = (t1 * t1) % m
t1 = (t1 * a) % m
t1 = (t1 + t2) % m
z = (self.z * self.y) % m
z = (z + z) % m
y = (self.y * self.y) % m
y = (y + y) % m
t2 = (self.x * y) % m
t2 = (t2 + t2) % m
x = (t1 * t1) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
t1 = (t1 * t2) % m
t2 = (y * y) % m
t2 = (t2 + t2) % m
y = (t1 - t2) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self.z:
return other.to_jacobian()
m = self.curve.m
t1 = (self.z * self.z) % m
t2 = (t1 * other.x) % m
t1 = (t1 * self.z) % m
t1 = (t1 * other.y) % m
if self.x == t2:
if self.y == t1:
return self.double()
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
x = (self.x - t2) % m
y = (self.y - t1) % m
z = (self.z * x) % m
t3 = (x * x) % m
t2 = (t2 * t3) % m
t3 = (t3 * x) % m
t1 = (t1 * t3) % m
x = (y * y) % m
x = (x - t3) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
y = (y * t2) % m
y = (y - t1) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __repr__(self):
return "<JacobianPoint (%s, %s, %s) of %s>" % (
self.x, self.y, self.z, self.curve.name)
class AffinePoint(object):
def __init__(self, x, y, curve):
self.x = x
self.y = y
self.curve = curve
@property
def on_curve(self):
if not self:
return True
m = self.curve.m
a = self.curve.a
b = self.curve.b
h1 = (self.x * self.x) % m
h1 = (h1 + a) % m
h1 = (h1 * self.x) % m
h1 = (h1 + b) % m
h2 = (self.y * self.y) % m
return h1 == h2
def to_jacobian(self):
if not self:
return JacobianPoint(x=0, y=0, z=0, curve=self.curve)
return JacobianPoint(x=self.x, y=self.y, z=1, curve=self.curve)
def double(self):
if not self.y:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t2 = (self.x * self.x) % m
t1 = (t2 + t2) % m
t1 = (t1 + t2) % m
t1 = (t1 + a) % m
t2 = (self.y + self.y) % m
t2 = gmpy.invert(t2, m)
t1 = (t1 * t2) % m
t2 = (t1 * t1) % m
t2 = (t2 - self.x) % m
t2 = (t2 - self.x) % m
x = (self.x - t2) % m
t1 = (t1 * x) % m
y = (t1 - self.y) % m
x = t2
return AffinePoint(x=x, y=y, curve=self.curve)
def __mul__(self, exp):
n = exp.numdigits(2)
r = JacobianPoint(x=0, y=0, z=0, curve=self.curve)
while n:
r = r.double()
n -= 1
if exp.getbit(n):
r = r + self
R = r.to_affine()
assert R.on_curve
return R
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self:
return other
if self.x == other.x:
if self.y == other.y:
return self.double()
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
t = (self.y - other.y) % m
y = (self.x - other.x) % m
y = gmpy.invert(y, m)
y = (t * y) % m
t = (y * y) % m
x = (self.x + other.x) % m
x = (t - x) % m
t = (other.x - x) % m
y = (y * t) % m
y = (y - other.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def __nonzero__(self):
return bool(self.x or self.y)
__bool__ = __nonzero__
def __repr__(self):
return "<AffinePoint (%s, %s) of %s>" % (
self.x, self.y, self.curve.name)
def __eq__(self, other):
if not isinstance(other, AffinePoint):
return False
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.to_string(SER_COMPACT)
def to_bytes(self, fmt=SER_BINARY):
outlen = (self.curve.pk_len_compact if fmt == SER_COMPACT
else self.curve.pk_len_bin)
if self._point_compress():
return serialize_number(self.x + self.curve.m, fmt, outlen)
return serialize_number(self.x, fmt, outlen)
def to_string(self, fmt=SER_BINARY):
return self.to_bytes(fmt).decode()
def _point_compress(self):
return self.y.getbit(0) == 1
def _ECIES_KDF(self, R):
h = hashlib.sha512()
h.update(serialize_number(self.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.y, SER_BINARY, self.curve.elem_len_bin))
return h.digest()
def _ECIES_encryption(self):
while True:
k = gmpy.mpz(
Crypto.Random.random.randrange(
0, int(
self.curve.order - 1)))
R = self.curve.base * k
k = k * self.curve.cofactor
Z = self * k
if Z:
break
return (Z._ECIES_KDF(R), R)
def _ECIES_decryption(self, d):
if isinstance(d, PrivKey):
d = d.e
e = d * self.curve.cofactor
if not self.valid_embedded_key:
raise ValueError
Z = self * e
if not Z:
raise ValueError
return Z._ECIES_KDF(self)
def _ECDSA_verify(self, md, sig):
order = self.curve.order
s, r = divmod(sig, order)
if s <= 0 or order <= s or r <= 0 or order <= r:
return False
e = deserialize_number(md, SER_BINARY) % order
s = gmpy.invert(s, order)
e = (e * s) % order
X1 = self.curve.base * e
e = (r * s) % order
X2 = self * e
X1 = X1 + X2
if not X1:
return False
s = X1.x % order
return s == r
@property
def valid_embedded_key(self):
if (self.x < 0 or self.x >= self.curve.m or self.y < 0 or
self.y > self.curve.m):
return False
if not self:
return False
if not self.on_curve:
return False
return True
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
def verify(self, h, sig, sig_fmt=SER_BINARY):
""" Verifies that `sig' is a signature for a message with
SHA-512 hash `h'. """
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
@contextlib.contextmanager
def encrypt_to(self, f, mac_bytes=10):
""" Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. """
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
class PrivKey(object):
""" A secret exponent """
def __init__(self, e, curve):
self.e = e
self.curve = curve
@contextlib.contextmanager
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError("s should be bytes")
instream = BytesIO(s)
with self.decrypt_from(instream, mac_bytes) as f:
return f.read()
def sign(self, h, sig_format=SER_BINARY):
""" Signs the message with SHA-512 hash `h' with this private key. """
outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT
else self.curve.sig_len_bin)
sig = self._ECDSA_sign(h)
return serialize_number(sig, sig_format, outlen)
def __repr__(self):
return "<PrivKey %s>" % self.e
def __str__(self):
return str(self.e)
def _ECDSA_sign(self, md):
# Get the pseudo-random exponent from the messagedigest
# and the private key.
order = self.curve.order
hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)
h = hmac.new(hmk, digestmod=hashlib.sha256)
h.update(md)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cprng = Crypto.Cipher.AES.new(h.digest(),
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
r = 0
s = 0
while s == 0:
while r == 0:
buf = cprng.encrypt(b'\0' * self.curve.order_len_bin)
k = self.curve._buf_to_exponent(buf)
p1 = self.curve.base * k
r = p1.x % order
e = deserialize_number(md, SER_BINARY)
e = (e % order)
s = (self.e * r) % order
s = (s + e) % order
e = gmpy.invert(k, order)
s = (s * e) % order
s = s * order
s = s + r
return s
# Encryption and decryption contexts
# #########################################################
class EncryptionContext(object):
""" Holds state of encryption. Use AffinePoint.encrypt_to """
def __init__(self, f, p, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
key, R = p._ECIES_encryption()
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
f.write(R.to_bytes(SER_BINARY))
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
def write(self, s):
if not self.f:
raise IOError("closed")
ct = self.cipher.encrypt(s)
self.f.write(ct)
self.h.update(ct)
def finish(self):
if not self.f:
raise IOError("closed")
self.f.write(self.h.digest()[:self.mac_bytes])
self.f = None
class DecryptionContext(object):
""" Holds state of decryption. Use Curve.decrypt_from """
def __init__(self, curve, f, privkey, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
R = curve.point_from_string(f.read(curve.pk_len_bin), SER_BINARY)
key = R._ECIES_decryption(privkey)
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
self.ahead = f.read(mac_bytes)
def read(self, n=None):
if not self.f:
return ''
if n is None:
tmp = self.ahead + self.f.read()
else:
tmp = self.ahead + self.f.read(n)
ct = tmp[:-self.mac_bytes]
self.ahead = tmp[-self.mac_bytes:]
self.h.update(ct)
pt = self.cipher.decrypt(ct)
if n is None or len(ct) < n:
if self.h.digest()[:self.mac_bytes] != self.ahead:
raise IntegrityError
self.f = None
return pt
# The main Curve objects
# #########################################################
class Curve(object):
""" Represents a Elliptic Curve """
@staticmethod
def by_name_substring(substring):
substring = substring.lower()
candidates = []
for raw_curve in RAW_CURVES:
if substring in raw_curve[0]:
candidates.append(raw_curve)
if len(candidates) != 1:
raise KeyError
return Curve(candidates[0])
@staticmethod
def by_name(name):
for raw_curve in RAW_CURVES:
if raw_curve[0] == name:
return Curve(raw_curve)
raise KeyError
@staticmethod
def by_pk_len(pk_len):
for raw_curve in RAW_CURVES:
if raw_curve[8] == pk_len:
return Curve(raw_curve)
raise KeyError
def __init__(self, raw_curve_params):
""" Initialize a new curve from raw curve parameters.
Use `Curve.by_pk_len' instead """
r = raw_curve_parameters(*raw_curve_params)
# Store domain parameters
self.name = r.name
self.a = deserialize_number(binascii.unhexlify(r.a), SER_BINARY)
self.b = deserialize_number(binascii.unhexlify(r.b), SER_BINARY)
self.m = deserialize_number(binascii.unhexlify(r.m), SER_BINARY)
self.order = deserialize_number(
binascii.unhexlify(r.order), SER_BINARY)
self.base = AffinePoint(
curve=self, x=deserialize_number(
binascii.unhexlify(
r.base_x), SER_BINARY), y=deserialize_number(
binascii.unhexlify(
r.base_y), SER_BINARY))
self.cofactor = r.cofactor
# Calculate some other parameters
self.pk_len_bin = get_serialized_number_len(
(2 * self.m) - 1, SER_BINARY)
self.pk_len_compact = get_serialized_number_len(
(2 * self.m) - 1, SER_COMPACT)
assert self.pk_len_compact == r.pk_len_compact
self.sig_len_bin = get_serialized_number_len(
(self.order * self.order) - 1, SER_BINARY)
self.sig_len_compact = get_serialized_number_len(
(self.order * self.order) - 1, SER_COMPACT)
self.dh_len_bin = min((self.order.numdigits(2) // 2 + 7) // 8, 32)
self.dh_len_compact = get_serialized_number_len(
2 ** self.dh_len_bin - 1, SER_COMPACT)
self.elem_len_bin = get_serialized_number_len(self.m, SER_BINARY)
self.order_len_bin = get_serialized_number_len(self.order, SER_BINARY)
@property
def key_bytes(self):
""" The approximate number of bytes of information in a key. """
return self.pk_len_bin
def __repr__(self):
return "<Curve %s>" % self.name
def point_from_string(self, s, fmt=SER_BINARY):
x = deserialize_number(s, fmt)
yflag = x >= self.m
if yflag:
x = x - self.m
assert 0 < x and x <= self.m
return self._point_decompress(x, yflag)
def pubkey_from_string(self, s, fmt=SER_BINARY):
return PubKey(self.point_from_string(s, fmt))
def _point_decompress(self, x, yflag):
m = self.m
h = (x * x) % m
h = (h + self.a) % m
h = (h * x) % m
h = (h + self.b) % m
y = mod_root(h, m)
if y or not yflag:
if bool(y.getbit(0)) == yflag:
return AffinePoint(x=x, y=y, curve=self)
return AffinePoint(x=x, y=m - y, curve=self)
def hash_to_exponent(self, h):
""" Converts a 32 byte hash to an exponent """
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cipher = Crypto.Cipher.AES.new(h,
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
buf = cipher.encrypt(b'\0' * self.order_len_bin)
return self._buf_to_exponent(buf)
def _buf_to_exponent(self, buf):
a = deserialize_number(buf, SER_BINARY)
a = (a % (self.order - 1)) + 1
return a
def passphrase_to_pubkey(self, passphrase):
return PubKey(self.base * self.passphrase_to_privkey(passphrase).e)
def passphrase_to_privkey(self, passphrase):
if isinstance(passphrase, six.text_type):
raise ValueError(
"Encode `passphrase` to a bytestring yourself to" +
" prevent problems with different default encodings")
h = _passphrase_to_hash(passphrase)
return PrivKey(self.hash_to_exponent(h), self)
@contextlib.contextmanager
def decrypt_from(self, f, privkey, mac_bytes=10):
ctx = DecryptionContext(self, f, privkey, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, privkey, mac_bytes=10):
instream = BytesIO(s)
with self.decrypt_from(instream, privkey, mac_bytes) as f:
return f.read()
# Helpers
# #########################################################
def _passphrase_to_hash(passphrase):
""" Converts a passphrase to a hash. """
return hashlib.sha256(passphrase).digest()
def encrypt(s, pk, pk_format=SER_COMPACT, mac_bytes=10, curve=None):
""" Encrypts `s' for public key `pk' """
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.encrypt(s, mac_bytes)
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):
""" Decrypts `s' with passphrase `passphrase' """
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.decrypt(s, mac_bytes)
def encrypt_file(in_path_or_file, out_path_or_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
""" Encrypts `in_file' to `out_file' for pubkey `pk' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_encrypt_file(in_file, out_file, pk, pk_format, mac_bytes, chunk_size,
curve)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def decrypt_file(in_path_or_file, out_path_or_file, passphrase,
curve='secp160r1', mac_bytes=10, chunk_size=4096):
""" Decrypts `in_file' to `out_file' with passphrase `passphrase' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_decrypt_file(in_file, out_file, passphrase, curve, mac_bytes,
chunk_size)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def _encrypt_file(in_file, out_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
with p.encrypt_to(out_file, mac_bytes) as encrypted_out:
while True:
buff = in_file.read(chunk_size)
if not buff:
break
encrypted_out.write(buff)
def _decrypt_file(in_file, out_file, passphrase, curve='secp160r1',
mac_bytes=10, chunk_size=4096):
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
with privkey.decrypt_from(in_file, mac_bytes) as decrypted_in:
while True:
buff = decrypted_in.read(chunk_size)
if not buff:
break
out_file.write(buff)
def verify(s, sig, pk, sig_format=SER_COMPACT, pk_format=SER_COMPACT,
curve=None):
""" Verifies that `sig' is a signature of pubkey `pk' for the
message `s'. """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.verify(hashlib.sha512(s).digest(), sig, sig_format)
def passphrase_to_pubkey(passphrase, curve='secp160r1'):
curve = Curve.by_name(curve)
return curve.passphrase_to_pubkey(passphrase)
def generate_keypair(curve='secp160r1', randfunc=None):
""" Convenience function to generate a random
new keypair (passphrase, pubkey). """
if randfunc is None:
randfunc = Crypto.Random.new().read
curve = Curve.by_name(curve)
raw_privkey = randfunc(curve.order_len_bin)
privkey = serialize_number(deserialize_number(raw_privkey), SER_COMPACT)
pubkey = str(passphrase_to_pubkey(privkey))
return (privkey, pubkey)
|
bwesterb/py-seccure
|
src/__init__.py
|
generate_keypair
|
python
|
def generate_keypair(curve='secp160r1', randfunc=None):
if randfunc is None:
randfunc = Crypto.Random.new().read
curve = Curve.by_name(curve)
raw_privkey = randfunc(curve.order_len_bin)
privkey = serialize_number(deserialize_number(raw_privkey), SER_COMPACT)
pubkey = str(passphrase_to_pubkey(privkey))
return (privkey, pubkey)
|
Convenience function to generate a random
new keypair (passphrase, pubkey).
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L1013-L1022
|
[
"def serialize_number(x, fmt=SER_BINARY, outlen=None):\n \"\"\" Serializes `x' to a string of length `outlen' in format `fmt' \"\"\"\n ret = b''\n if fmt == SER_BINARY:\n while x:\n x, r = divmod(x, 256)\n ret = six.int2byte(int(r)) + ret\n if outlen is not None:\n assert len(ret) <= outlen\n ret = ret.rjust(outlen, b'\\0')\n return ret\n assert fmt == SER_COMPACT\n while x:\n x, r = divmod(x, len(COMPACT_DIGITS))\n ret = COMPACT_DIGITS[r:r + 1] + ret\n if outlen is not None:\n assert len(ret) <= outlen\n ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])\n return ret\n",
"def deserialize_number(s, fmt=SER_BINARY):\n \"\"\" Deserializes a number from a string `s' in format `fmt' \"\"\"\n ret = gmpy.mpz(0)\n if fmt == SER_BINARY:\n if isinstance(s, six.text_type):\n raise ValueError(\n \"Encode `s` to a bytestring yourself to\" +\n \" prevent problems with different default encodings\")\n for c in s:\n ret *= 256\n ret += byte2int(c)\n return ret\n assert fmt == SER_COMPACT\n if isinstance(s, six.text_type):\n s = s.encode('ascii')\n for c in s:\n ret *= len(COMPACT_DIGITS)\n ret += R_COMPACT_DIGITS[c]\n return ret\n",
"def passphrase_to_pubkey(passphrase, curve='secp160r1'):\n curve = Curve.by_name(curve)\n return curve.passphrase_to_pubkey(passphrase)\n",
"def by_name(name):\n for raw_curve in RAW_CURVES:\n if raw_curve[0] == name:\n return Curve(raw_curve)\n raise KeyError\n"
] |
""" Elliptic Curve cryptography compatible with SECCURE:
http://point-at-infinity.org/seccure/ """
import hmac
import hashlib
import logging
import binascii
import contextlib
import collections
from ._version import __version__ # noqa: F401
# PyCrypto
import Crypto.Util
import Crypto.Cipher.AES
import Crypto.Random.random
# gmpy
import gmpy
# six
import six
# TODO replace with six.byte2int, when it is released
if six.PY3:
from io import BytesIO as BytesIO
def byte2int(b): return b
def stringlike(x): return isinstance(x, (str, bytes))
else:
from cStringIO import StringIO as BytesIO
def byte2int(b): return ord(b)
def stringlike(x): return isinstance(x, basestring)
l = logging.getLogger(__name__)
class IntegrityError(ValueError):
pass
# Serialization of numbers
# #########################################################
SER_COMPACT = 0
SER_BINARY = 1
COMPACT_DIGITS = (b'!#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'[]^_abcdefghijklmnopqrstuvwxyz{|}~')
R_COMPACT_DIGITS = {} # TODO is a tuple/list faster?
for i, c in enumerate(COMPACT_DIGITS):
R_COMPACT_DIGITS[c] = i
def serialize_number(x, fmt=SER_BINARY, outlen=None):
""" Serializes `x' to a string of length `outlen' in format `fmt' """
ret = b''
if fmt == SER_BINARY:
while x:
x, r = divmod(x, 256)
ret = six.int2byte(int(r)) + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, b'\0')
return ret
assert fmt == SER_COMPACT
while x:
x, r = divmod(x, len(COMPACT_DIGITS))
ret = COMPACT_DIGITS[r:r + 1] + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])
return ret
def deserialize_number(s, fmt=SER_BINARY):
""" Deserializes a number from a string `s' in format `fmt' """
ret = gmpy.mpz(0)
if fmt == SER_BINARY:
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
for c in s:
ret *= 256
ret += byte2int(c)
return ret
assert fmt == SER_COMPACT
if isinstance(s, six.text_type):
s = s.encode('ascii')
for c in s:
ret *= len(COMPACT_DIGITS)
ret += R_COMPACT_DIGITS[c]
return ret
def get_serialized_number_len(x, fmt=SER_BINARY):
if fmt == SER_BINARY:
return (x.numdigits(2) + 7) // 8
assert fmt == SER_COMPACT
res = 0
while x != 0:
x = x // len(COMPACT_DIGITS)
res += 1
return res
# Some modular arithmetic
# #########################################################
def mod_issquare(a, p):
""" Returns whether `a' is a square modulo p """
if not a:
return True
p1 = p // 2
p2 = pow(a, p1, p)
return p2 == 1
def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x
# Raw curve parameters
# #########################################################
raw_curve_parameters = collections.namedtuple(
'raw_curve_parameters',
('name',
'a',
'b',
'm',
'base_x',
'base_y',
'order',
'cofactor',
'pk_len_compact'))
RAW_CURVES = (
("secp112r1",
b"db7c2abf62e35e668076bead2088",
b"659ef8ba043916eede8911702b22",
b"db7c2abf62e35e668076bead208b",
b"09487239995a5ee76b55f9c2f098",
b"a89ce5af8724c0a23e0e0ff77500",
b"db7c2abf62e35e7628dfac6561c5", 1, 18),
("secp128r1",
b"fffffffdfffffffffffffffffffffffc",
b"e87579c11079f43dd824993c2cee5ed3",
b"fffffffdffffffffffffffffffffffff",
b"161ff7528b899b2d0c28607ca52c5b86",
b"cf5ac8395bafeb13c02da292dded7a83",
b"fffffffe0000000075a30d1b9038a115", 1, 20),
("secp160r1",
b"ffffffffffffffffffffffffffffffff7ffffffc",
b"1c97befc54bd7a8b65acf89f81d4d4adc565fa45",
b"ffffffffffffffffffffffffffffffff7fffffff",
b"4a96b5688ef573284664698968c38bb913cbfc82",
b"23a628553168947d59dcc912042351377ac5fb32",
b"0100000000000000000001f4c8f927aed3ca752257", 1, 25),
("secp192r1/nistp192",
b"fffffffffffffffffffffffffffffffefffffffffffffffc",
b"64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1",
b"fffffffffffffffffffffffffffffffeffffffffffffffff",
b"188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012",
b"07192b95ffc8da78631011ed6b24cdd573f977a11e794811",
b"ffffffffffffffffffffffff99def836146bc9b1b4d22831", 1, 30),
("secp224r1/nistp224",
b"fffffffffffffffffffffffffffffffefffffffffffffffffffffffe",
b"b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4",
b"ffffffffffffffffffffffffffffffff000000000000000000000001",
b"b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
b"bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34",
b"ffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", 1, 35),
("secp256r1/nistp256",
b"ffffffff00000001000000000000000000000000fffffffffffffffffffffffc",
b"5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b",
b"ffffffff00000001000000000000000000000000ffffffffffffffffffffffff",
b"6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
b"4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5",
b"ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
1, 40),
("secp384r1/nistp384",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000fffffffc",
b"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875a"
b"c656398d8a2ed19d2a85c8edd3ec2aef",
b"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
b"ffffffff0000000000000000ffffffff",
b"aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a38"
b"5502f25dbf55296c3a545e3872760ab7",
b"3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c0"
b"0a60b1ce1d7e819d7a431d7c90ea0e5f",
b"ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf"
b"581a0db248b0a77aecec196accc52973", 1, 60),
("secp521r1/nistp521",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffffffc",
b"0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef1"
b"09e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd4"
b"6b503f00",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"ffffffff",
b"00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d"
b"3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31"
b"c2e5bd66",
b"011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e"
b"662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be9476"
b"9fd16650",
b"01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
b"fffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e"
b"91386409", 1, 81),
("brainpoolp160r1",
b"340e7be2a280eb74e2be61bada745d97e8f7c300",
b"1e589a8595423412134faa2dbdec95c8d8675e58",
b"e95e4a5f737059dc60dfc7ad95b3d8139515620f",
b"bed5af16ea3f6a4f62938c4631eb5af7bdbcdbc3",
b"1667cb477a1a8ec338f94741669c976316da6321",
b"e95e4a5f737059dc60df5991d45029409e60fc09", 1, 25),
("brainpoolp192r1",
b"6a91174076b1e0e19c39c031fe8685c1cae040e5c69a28ef",
b"469a28ef7c28cca3dc721d044f4496bcca7ef4146fbf25c9",
b"c302f41d932a36cda7a3463093d18db78fce476de1a86297",
b"c0a0647eaab6a48753b033c56cb0f0900a2f5c4853375fd6",
b"14b690866abd5bb88b5f4828c1490002e6773fa2fa299b8f",
b"c302f41d932a36cda7a3462f9e9e916b5be8f1029ac4acc1", 1, 30),
("brainpoolp224r1",
b"68a5e62ca9ce6c1c299803a6c1530b514e182ad8b0042a59cad29f43",
b"2580f63ccfe44138870713b1a92369e33e2135d266dbb372386c400b",
b"d7c134aa264366862a18302575d1d787b09f075797da89f57ec8c0ff",
b"0d9029ad2c7e5cf4340823b2a87dc68c9e4ce3174c1e6efdee12c07d",
b"58aa56f772c0726f24c6b89e4ecdac24354b9e99caa3f6d3761402cd",
b"d7c134aa264366862a18302575d0fb98d116bc4b6ddebca3a5a7939f",
1, 35),
("brainpoolp256r1",
b"7d5a0975fc2c3057eef67530417affe7fb8055c126dc5c6ce94a4b44f330b5d9",
b"26dc5c6ce94a4b44f330b5d9bbd77cbf958416295cf7e1ce6bccdc18ff8c07b6",
b"a9fb57dba1eea9bc3e660a909d838d726e3bf623d52620282013481d1f6e5377",
b"8bd2aeb9cb7e57cb2c4b482ffc81b7afb9de27e1e3bd23c23a4453bd9ace3262",
b"547ef835c3dac4fd97f8461a14611dc9c27745132ded8e545c1d54c72f046997",
b"a9fb57dba1eea9bc3e660a909d838d718c397aa3b561a6f7901e0e82974856a7",
1, 40),
("brainpoolp320r1",
b"3ee30b568fbab0f883ccebd46d3f3bb8a2a73513f5eb79da66190eb085ffa9f49"
b"2f375a97d860eb4",
b"520883949dfdbc42d3ad198640688a6fe13f41349554b49acc31dccd884539816"
b"f5eb4ac8fb1f1a6",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa6f6f40def4f92b9ec7893ec28f"
b"cd412b1f1b32e27",
b"43bd7e9afb53d8b85289bcc48ee5bfe6f20137d10a087eb6e7871e2a10a599c71"
b"0af8d0d39e20611",
b"14fdd05545ec1cc8ab4093247f77275e0743ffed117182eaa9c77877aaac6ac7d"
b"35245d1692e8ee1",
b"d35e472036bc4fb7e13c785ed201e065f98fcfa5b68f12a32d482ec7ee8658e98"
b"691555b44c59311", 1, 50),
("brainpoolp384r1",
b"7bc382c63d8c150c3c72080ace05afa0c2bea28e4fb22787139165efba91f90f8"
b"aa5814a503ad4eb04a8c7dd22ce2826",
b"04a8c7dd22ce28268b39b55416f0447c2fb77de107dcd2a62e880ea53eeb62d57"
b"cb4390295dbc9943ab78696fa504c11",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b412b1da197fb71123a"
b"cd3a729901d1a71874700133107ec53",
b"1d1c64f068cf45ffa2a63a81b7c13f6b8847a3e77ef14fe3db7fcafe0cbd10e8e"
b"826e03436d646aaef87b2e247d4af1e",
b"8abe1d7520f9c2a45cb1eb8e95cfd55262b70b29feec5864e19c054ff99129280"
b"e4646217791811142820341263c5315",
b"8cb91e82a3386d280f5d6f7e50e641df152f7109ed5456b31f166e6cac0425a7c"
b"f3ab6af6b7fc3103b883202e9046565", 1, 60),
("brainpoolp512r1",
b"7830a3318b603b89e2327145ac234cc594cbdd8d3df91610a83441caea9863bc2"
b"ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72bf2c7b9e7c1ac4d77fc94"
b"ca",
b"3df91610a83441caea9863bc2ded5d5aa8253aa10a2ef1c98b9ac8b57f1117a72"
b"bf2c7b9e7c1ac4d77fc94cadc083e67984050b75ebae5dd2809bd638016f7"
b"23",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308717"
b"d4d9b009bc66842aecda12ae6a380e62881ff2f2d82c68528aa6056583a48"
b"f3",
b"81aee4bdd82ed9645a21322e9c4c6a9385ed9f70b5d916c1b43b62eef4d0098ef"
b"f3b1f78e2d0d48d50d1687b93b97d5f7c6d5047406a5e688b352209bcb9f8"
b"22",
b"7dde385d566332ecc0eabfa9cf7822fdf209f70024a57b1aa000c55b881f8111b"
b"2dcde494a5f485e5bca4bd88a2763aed1ca2b2fa8f0540678cd1e0f3ad808"
b"92",
b"aadd9db8dbe9c48b3fd4e6ae33c9fc07cb308db3b3c9d20ed6639cca703308705"
b"53e5c414ca92619418661197fac10471db1d381085ddaddb58796829ca900"
b"69",
1, 79),
)
curves = [r[0] for r in RAW_CURVES]
# Arithmetic on elliptic curves
# #########################################################
class JacobianPoint(object):
def __init__(self, x, y, z, curve):
self.x = x
self.y = y
self.z = z
self.curve = curve
def to_affine(self):
if self.z == 0:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
h = gmpy.invert(self.z, m)
y = (h * h) % m
x = (self.x * y) % m
y = (y * h) % m
y = (y * self.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def double(self):
if not self.z:
return self
if not self.y:
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t1 = (self.x * self.x) % m
t2 = (t1 + t1) % m
t2 = (t2 + t1) % m
t1 = (self.z * self.z) % m
t1 = (t1 * t1) % m
t1 = (t1 * a) % m
t1 = (t1 + t2) % m
z = (self.z * self.y) % m
z = (z + z) % m
y = (self.y * self.y) % m
y = (y + y) % m
t2 = (self.x * y) % m
t2 = (t2 + t2) % m
x = (t1 * t1) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
t1 = (t1 * t2) % m
t2 = (y * y) % m
t2 = (t2 + t2) % m
y = (t1 - t2) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self.z:
return other.to_jacobian()
m = self.curve.m
t1 = (self.z * self.z) % m
t2 = (t1 * other.x) % m
t1 = (t1 * self.z) % m
t1 = (t1 * other.y) % m
if self.x == t2:
if self.y == t1:
return self.double()
return JacobianPoint(x=self.x, y=self.y, z=0, curve=self.curve)
x = (self.x - t2) % m
y = (self.y - t1) % m
z = (self.z * x) % m
t3 = (x * x) % m
t2 = (t2 * t3) % m
t3 = (t3 * x) % m
t1 = (t1 * t3) % m
x = (y * y) % m
x = (x - t3) % m
x = (x - t2) % m
x = (x - t2) % m
t2 = (t2 - x) % m
y = (y * t2) % m
y = (y - t1) % m
return JacobianPoint(x=x, y=y, z=z, curve=self.curve)
def __repr__(self):
return "<JacobianPoint (%s, %s, %s) of %s>" % (
self.x, self.y, self.z, self.curve.name)
class AffinePoint(object):
def __init__(self, x, y, curve):
self.x = x
self.y = y
self.curve = curve
@property
def on_curve(self):
if not self:
return True
m = self.curve.m
a = self.curve.a
b = self.curve.b
h1 = (self.x * self.x) % m
h1 = (h1 + a) % m
h1 = (h1 * self.x) % m
h1 = (h1 + b) % m
h2 = (self.y * self.y) % m
return h1 == h2
def to_jacobian(self):
if not self:
return JacobianPoint(x=0, y=0, z=0, curve=self.curve)
return JacobianPoint(x=self.x, y=self.y, z=1, curve=self.curve)
def double(self):
if not self.y:
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
a = self.curve.a
t2 = (self.x * self.x) % m
t1 = (t2 + t2) % m
t1 = (t1 + t2) % m
t1 = (t1 + a) % m
t2 = (self.y + self.y) % m
t2 = gmpy.invert(t2, m)
t1 = (t1 * t2) % m
t2 = (t1 * t1) % m
t2 = (t2 - self.x) % m
t2 = (t2 - self.x) % m
x = (self.x - t2) % m
t1 = (t1 * x) % m
y = (t1 - self.y) % m
x = t2
return AffinePoint(x=x, y=y, curve=self.curve)
def __mul__(self, exp):
n = exp.numdigits(2)
r = JacobianPoint(x=0, y=0, z=0, curve=self.curve)
while n:
r = r.double()
n -= 1
if exp.getbit(n):
r = r + self
R = r.to_affine()
assert R.on_curve
return R
def __add__(self, other):
if not isinstance(other, AffinePoint):
raise NotImplementedError
if not other:
return self
if not self:
return other
if self.x == other.x:
if self.y == other.y:
return self.double()
return AffinePoint(x=0, y=0, curve=self.curve)
m = self.curve.m
t = (self.y - other.y) % m
y = (self.x - other.x) % m
y = gmpy.invert(y, m)
y = (t * y) % m
t = (y * y) % m
x = (self.x + other.x) % m
x = (t - x) % m
t = (other.x - x) % m
y = (y * t) % m
y = (y - other.y) % m
return AffinePoint(x=x, y=y, curve=self.curve)
def __nonzero__(self):
return bool(self.x or self.y)
__bool__ = __nonzero__
def __repr__(self):
return "<AffinePoint (%s, %s) of %s>" % (
self.x, self.y, self.curve.name)
def __eq__(self, other):
if not isinstance(other, AffinePoint):
return False
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.to_string(SER_COMPACT)
def to_bytes(self, fmt=SER_BINARY):
outlen = (self.curve.pk_len_compact if fmt == SER_COMPACT
else self.curve.pk_len_bin)
if self._point_compress():
return serialize_number(self.x + self.curve.m, fmt, outlen)
return serialize_number(self.x, fmt, outlen)
def to_string(self, fmt=SER_BINARY):
return self.to_bytes(fmt).decode()
def _point_compress(self):
return self.y.getbit(0) == 1
def _ECIES_KDF(self, R):
h = hashlib.sha512()
h.update(serialize_number(self.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.x, SER_BINARY, self.curve.elem_len_bin))
h.update(serialize_number(R.y, SER_BINARY, self.curve.elem_len_bin))
return h.digest()
def _ECIES_encryption(self):
while True:
k = gmpy.mpz(
Crypto.Random.random.randrange(
0, int(
self.curve.order - 1)))
R = self.curve.base * k
k = k * self.curve.cofactor
Z = self * k
if Z:
break
return (Z._ECIES_KDF(R), R)
def _ECIES_decryption(self, d):
if isinstance(d, PrivKey):
d = d.e
e = d * self.curve.cofactor
if not self.valid_embedded_key:
raise ValueError
Z = self * e
if not Z:
raise ValueError
return Z._ECIES_KDF(self)
def _ECDSA_verify(self, md, sig):
order = self.curve.order
s, r = divmod(sig, order)
if s <= 0 or order <= s or r <= 0 or order <= r:
return False
e = deserialize_number(md, SER_BINARY) % order
s = gmpy.invert(s, order)
e = (e * s) % order
X1 = self.curve.base * e
e = (r * s) % order
X2 = self * e
X1 = X1 + X2
if not X1:
return False
s = X1.x % order
return s == r
@property
def valid_embedded_key(self):
if (self.x < 0 or self.x >= self.curve.m or self.y < 0 or
self.y > self.curve.m):
return False
if not self:
return False
if not self.on_curve:
return False
return True
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
def verify(self, h, sig, sig_fmt=SER_BINARY):
""" Verifies that `sig' is a signature for a message with
SHA-512 hash `h'. """
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
@contextlib.contextmanager
def encrypt_to(self, f, mac_bytes=10):
""" Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. """
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
class PrivKey(object):
""" A secret exponent """
def __init__(self, e, curve):
self.e = e
self.curve = curve
@contextlib.contextmanager
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError("s should be bytes")
instream = BytesIO(s)
with self.decrypt_from(instream, mac_bytes) as f:
return f.read()
def sign(self, h, sig_format=SER_BINARY):
""" Signs the message with SHA-512 hash `h' with this private key. """
outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT
else self.curve.sig_len_bin)
sig = self._ECDSA_sign(h)
return serialize_number(sig, sig_format, outlen)
def __repr__(self):
return "<PrivKey %s>" % self.e
def __str__(self):
return str(self.e)
def _ECDSA_sign(self, md):
# Get the pseudo-random exponent from the messagedigest
# and the private key.
order = self.curve.order
hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)
h = hmac.new(hmk, digestmod=hashlib.sha256)
h.update(md)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cprng = Crypto.Cipher.AES.new(h.digest(),
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
r = 0
s = 0
while s == 0:
while r == 0:
buf = cprng.encrypt(b'\0' * self.curve.order_len_bin)
k = self.curve._buf_to_exponent(buf)
p1 = self.curve.base * k
r = p1.x % order
e = deserialize_number(md, SER_BINARY)
e = (e % order)
s = (self.e * r) % order
s = (s + e) % order
e = gmpy.invert(k, order)
s = (s * e) % order
s = s * order
s = s + r
return s
# Encryption and decryption contexts
# #########################################################
class EncryptionContext(object):
""" Holds state of encryption. Use AffinePoint.encrypt_to """
def __init__(self, f, p, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
key, R = p._ECIES_encryption()
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
f.write(R.to_bytes(SER_BINARY))
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
def write(self, s):
if not self.f:
raise IOError("closed")
ct = self.cipher.encrypt(s)
self.f.write(ct)
self.h.update(ct)
def finish(self):
if not self.f:
raise IOError("closed")
self.f.write(self.h.digest()[:self.mac_bytes])
self.f = None
class DecryptionContext(object):
""" Holds state of decryption. Use Curve.decrypt_from """
def __init__(self, curve, f, privkey, mac_bytes=10):
self.f = f
self.mac_bytes = mac_bytes
R = curve.point_from_string(f.read(curve.pk_len_bin), SER_BINARY)
key = R._ECIES_decryption(privkey)
self.h = hmac.new(key[32:], digestmod=hashlib.sha256)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
self.cipher = Crypto.Cipher.AES.new(
key[:32], Crypto.Cipher.AES.MODE_CTR, counter=ctr)
self.ahead = f.read(mac_bytes)
def read(self, n=None):
if not self.f:
return ''
if n is None:
tmp = self.ahead + self.f.read()
else:
tmp = self.ahead + self.f.read(n)
ct = tmp[:-self.mac_bytes]
self.ahead = tmp[-self.mac_bytes:]
self.h.update(ct)
pt = self.cipher.decrypt(ct)
if n is None or len(ct) < n:
if self.h.digest()[:self.mac_bytes] != self.ahead:
raise IntegrityError
self.f = None
return pt
# The main Curve objects
# #########################################################
class Curve(object):
""" Represents a Elliptic Curve """
@staticmethod
def by_name_substring(substring):
substring = substring.lower()
candidates = []
for raw_curve in RAW_CURVES:
if substring in raw_curve[0]:
candidates.append(raw_curve)
if len(candidates) != 1:
raise KeyError
return Curve(candidates[0])
@staticmethod
def by_name(name):
for raw_curve in RAW_CURVES:
if raw_curve[0] == name:
return Curve(raw_curve)
raise KeyError
@staticmethod
def by_pk_len(pk_len):
for raw_curve in RAW_CURVES:
if raw_curve[8] == pk_len:
return Curve(raw_curve)
raise KeyError
def __init__(self, raw_curve_params):
""" Initialize a new curve from raw curve parameters.
Use `Curve.by_pk_len' instead """
r = raw_curve_parameters(*raw_curve_params)
# Store domain parameters
self.name = r.name
self.a = deserialize_number(binascii.unhexlify(r.a), SER_BINARY)
self.b = deserialize_number(binascii.unhexlify(r.b), SER_BINARY)
self.m = deserialize_number(binascii.unhexlify(r.m), SER_BINARY)
self.order = deserialize_number(
binascii.unhexlify(r.order), SER_BINARY)
self.base = AffinePoint(
curve=self, x=deserialize_number(
binascii.unhexlify(
r.base_x), SER_BINARY), y=deserialize_number(
binascii.unhexlify(
r.base_y), SER_BINARY))
self.cofactor = r.cofactor
# Calculate some other parameters
self.pk_len_bin = get_serialized_number_len(
(2 * self.m) - 1, SER_BINARY)
self.pk_len_compact = get_serialized_number_len(
(2 * self.m) - 1, SER_COMPACT)
assert self.pk_len_compact == r.pk_len_compact
self.sig_len_bin = get_serialized_number_len(
(self.order * self.order) - 1, SER_BINARY)
self.sig_len_compact = get_serialized_number_len(
(self.order * self.order) - 1, SER_COMPACT)
self.dh_len_bin = min((self.order.numdigits(2) // 2 + 7) // 8, 32)
self.dh_len_compact = get_serialized_number_len(
2 ** self.dh_len_bin - 1, SER_COMPACT)
self.elem_len_bin = get_serialized_number_len(self.m, SER_BINARY)
self.order_len_bin = get_serialized_number_len(self.order, SER_BINARY)
@property
def key_bytes(self):
""" The approximate number of bytes of information in a key. """
return self.pk_len_bin
def __repr__(self):
return "<Curve %s>" % self.name
def point_from_string(self, s, fmt=SER_BINARY):
x = deserialize_number(s, fmt)
yflag = x >= self.m
if yflag:
x = x - self.m
assert 0 < x and x <= self.m
return self._point_decompress(x, yflag)
def pubkey_from_string(self, s, fmt=SER_BINARY):
return PubKey(self.point_from_string(s, fmt))
def _point_decompress(self, x, yflag):
m = self.m
h = (x * x) % m
h = (h + self.a) % m
h = (h * x) % m
h = (h + self.b) % m
y = mod_root(h, m)
if y or not yflag:
if bool(y.getbit(0)) == yflag:
return AffinePoint(x=x, y=y, curve=self)
return AffinePoint(x=x, y=m - y, curve=self)
def hash_to_exponent(self, h):
""" Converts a 32 byte hash to an exponent """
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cipher = Crypto.Cipher.AES.new(h,
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
buf = cipher.encrypt(b'\0' * self.order_len_bin)
return self._buf_to_exponent(buf)
def _buf_to_exponent(self, buf):
a = deserialize_number(buf, SER_BINARY)
a = (a % (self.order - 1)) + 1
return a
def passphrase_to_pubkey(self, passphrase):
return PubKey(self.base * self.passphrase_to_privkey(passphrase).e)
def passphrase_to_privkey(self, passphrase):
if isinstance(passphrase, six.text_type):
raise ValueError(
"Encode `passphrase` to a bytestring yourself to" +
" prevent problems with different default encodings")
h = _passphrase_to_hash(passphrase)
return PrivKey(self.hash_to_exponent(h), self)
@contextlib.contextmanager
def decrypt_from(self, f, privkey, mac_bytes=10):
ctx = DecryptionContext(self, f, privkey, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, privkey, mac_bytes=10):
instream = BytesIO(s)
with self.decrypt_from(instream, privkey, mac_bytes) as f:
return f.read()
# Helpers
# #########################################################
def _passphrase_to_hash(passphrase):
""" Converts a passphrase to a hash. """
return hashlib.sha256(passphrase).digest()
def encrypt(s, pk, pk_format=SER_COMPACT, mac_bytes=10, curve=None):
""" Encrypts `s' for public key `pk' """
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.encrypt(s, mac_bytes)
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):
""" Decrypts `s' with passphrase `passphrase' """
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.decrypt(s, mac_bytes)
def encrypt_file(in_path_or_file, out_path_or_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
""" Encrypts `in_file' to `out_file' for pubkey `pk' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_encrypt_file(in_file, out_file, pk, pk_format, mac_bytes, chunk_size,
curve)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def decrypt_file(in_path_or_file, out_path_or_file, passphrase,
curve='secp160r1', mac_bytes=10, chunk_size=4096):
""" Decrypts `in_file' to `out_file' with passphrase `passphrase' """
close_in, close_out = False, False
in_file, out_file = in_path_or_file, out_path_or_file
try:
if stringlike(in_path_or_file):
in_file = open(in_path_or_file, 'rb')
close_in = True
if stringlike(out_path_or_file):
out_file = open(out_path_or_file, 'wb')
close_out = True
_decrypt_file(in_file, out_file, passphrase, curve, mac_bytes,
chunk_size)
finally:
if close_out:
out_file.close()
if close_in:
in_file.close()
def _encrypt_file(in_file, out_file, pk, pk_format=SER_COMPACT,
mac_bytes=10, chunk_size=4096, curve=None):
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
with p.encrypt_to(out_file, mac_bytes) as encrypted_out:
while True:
buff = in_file.read(chunk_size)
if not buff:
break
encrypted_out.write(buff)
def _decrypt_file(in_file, out_file, passphrase, curve='secp160r1',
mac_bytes=10, chunk_size=4096):
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
with privkey.decrypt_from(in_file, mac_bytes) as decrypted_in:
while True:
buff = decrypted_in.read(chunk_size)
if not buff:
break
out_file.write(buff)
def verify(s, sig, pk, sig_format=SER_COMPACT, pk_format=SER_COMPACT,
curve=None):
""" Verifies that `sig' is a signature of pubkey `pk' for the
message `s'. """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = (Curve.by_pk_len(len(pk)) if curve is None
else Curve.by_name(curve))
p = curve.pubkey_from_string(pk, pk_format)
return p.verify(hashlib.sha512(s).digest(), sig, sig_format)
def sign(s, passphrase, sig_format=SER_COMPACT, curve='secp160r1'):
""" Signs `s' with passphrase `passphrase' """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.sign(hashlib.sha512(s).digest(), sig_format)
def passphrase_to_pubkey(passphrase, curve='secp160r1'):
curve = Curve.by_name(curve)
return curve.passphrase_to_pubkey(passphrase)
|
bwesterb/py-seccure
|
src/__init__.py
|
PubKey.verify
|
python
|
def verify(self, h, sig, sig_fmt=SER_BINARY):
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
|
Verifies that `sig' is a signature for a message with
SHA-512 hash `h'.
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L597-L601
|
[
"def deserialize_number(s, fmt=SER_BINARY):\n \"\"\" Deserializes a number from a string `s' in format `fmt' \"\"\"\n ret = gmpy.mpz(0)\n if fmt == SER_BINARY:\n if isinstance(s, six.text_type):\n raise ValueError(\n \"Encode `s` to a bytestring yourself to\" +\n \" prevent problems with different default encodings\")\n for c in s:\n ret *= 256\n ret += byte2int(c)\n return ret\n assert fmt == SER_COMPACT\n if isinstance(s, six.text_type):\n s = s.encode('ascii')\n for c in s:\n ret *= len(COMPACT_DIGITS)\n ret += R_COMPACT_DIGITS[c]\n return ret\n"
] |
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
@contextlib.contextmanager
def encrypt_to(self, f, mac_bytes=10):
""" Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. """
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
|
bwesterb/py-seccure
|
src/__init__.py
|
PubKey.encrypt_to
|
python
|
def encrypt_to(self, f, mac_bytes=10):
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
|
Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'.
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L604-L609
|
[
"def finish(self):\n if not self.f:\n raise IOError(\"closed\")\n self.f.write(self.h.digest()[:self.mac_bytes])\n self.f = None\n"
] |
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
def verify(self, h, sig, sig_fmt=SER_BINARY):
""" Verifies that `sig' is a signature for a message with
SHA-512 hash `h'. """
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
@contextlib.contextmanager
def encrypt(self, s, mac_bytes=10):
""" Encrypt `s' for this pubkey. """
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
|
bwesterb/py-seccure
|
src/__init__.py
|
PubKey.encrypt
|
python
|
def encrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
|
Encrypt `s' for this pubkey.
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L611-L620
| null |
class PubKey(object):
""" A public affine point """
def __init__(self, p):
self.p = p
def verify(self, h, sig, sig_fmt=SER_BINARY):
""" Verifies that `sig' is a signature for a message with
SHA-512 hash `h'. """
s = deserialize_number(sig, sig_fmt)
return self.p._ECDSA_verify(h, s)
@contextlib.contextmanager
def encrypt_to(self, f, mac_bytes=10):
""" Returns a file like object `ef'. Anything written to `ef'
will be encrypted for this pubkey and written to `f'. """
ctx = EncryptionContext(f, self.p, mac_bytes)
yield ctx
ctx.finish()
def to_bytes(self, fmt=SER_BINARY):
return self.p.to_bytes(fmt)
def to_string(self, fmt=SER_BINARY):
return self.p.to_string(fmt)
def __str__(self):
return self.to_string(SER_COMPACT)
def __repr__(self):
return "<PubKey %s>" % self
|
bwesterb/py-seccure
|
src/__init__.py
|
PrivKey.decrypt_from
|
python
|
def decrypt_from(self, f, mac_bytes=10):
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
|
Decrypts a message from f.
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L643-L647
|
[
"def read(self, n=None):\n if not self.f:\n return ''\n if n is None:\n tmp = self.ahead + self.f.read()\n else:\n tmp = self.ahead + self.f.read(n)\n ct = tmp[:-self.mac_bytes]\n self.ahead = tmp[-self.mac_bytes:]\n self.h.update(ct)\n pt = self.cipher.decrypt(ct)\n if n is None or len(ct) < n:\n if self.h.digest()[:self.mac_bytes] != self.ahead:\n raise IntegrityError\n self.f = None\n return pt\n"
] |
class PrivKey(object):
""" A secret exponent """
def __init__(self, e, curve):
self.e = e
self.curve = curve
@contextlib.contextmanager
def decrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError("s should be bytes")
instream = BytesIO(s)
with self.decrypt_from(instream, mac_bytes) as f:
return f.read()
def sign(self, h, sig_format=SER_BINARY):
""" Signs the message with SHA-512 hash `h' with this private key. """
outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT
else self.curve.sig_len_bin)
sig = self._ECDSA_sign(h)
return serialize_number(sig, sig_format, outlen)
def __repr__(self):
return "<PrivKey %s>" % self.e
def __str__(self):
return str(self.e)
def _ECDSA_sign(self, md):
# Get the pseudo-random exponent from the messagedigest
# and the private key.
order = self.curve.order
hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)
h = hmac.new(hmk, digestmod=hashlib.sha256)
h.update(md)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cprng = Crypto.Cipher.AES.new(h.digest(),
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
r = 0
s = 0
while s == 0:
while r == 0:
buf = cprng.encrypt(b'\0' * self.curve.order_len_bin)
k = self.curve._buf_to_exponent(buf)
p1 = self.curve.base * k
r = p1.x % order
e = deserialize_number(md, SER_BINARY)
e = (e % order)
s = (self.e * r) % order
s = (s + e) % order
e = gmpy.invert(k, order)
s = (s * e) % order
s = s * order
s = s + r
return s
|
bwesterb/py-seccure
|
src/__init__.py
|
PrivKey.sign
|
python
|
def sign(self, h, sig_format=SER_BINARY):
outlen = (self.curve.sig_len_compact if sig_format == SER_COMPACT
else self.curve.sig_len_bin)
sig = self._ECDSA_sign(h)
return serialize_number(sig, sig_format, outlen)
|
Signs the message with SHA-512 hash `h' with this private key.
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L656-L661
|
[
"def serialize_number(x, fmt=SER_BINARY, outlen=None):\n \"\"\" Serializes `x' to a string of length `outlen' in format `fmt' \"\"\"\n ret = b''\n if fmt == SER_BINARY:\n while x:\n x, r = divmod(x, 256)\n ret = six.int2byte(int(r)) + ret\n if outlen is not None:\n assert len(ret) <= outlen\n ret = ret.rjust(outlen, b'\\0')\n return ret\n assert fmt == SER_COMPACT\n while x:\n x, r = divmod(x, len(COMPACT_DIGITS))\n ret = COMPACT_DIGITS[r:r + 1] + ret\n if outlen is not None:\n assert len(ret) <= outlen\n ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])\n return ret\n",
"def _ECDSA_sign(self, md):\n # Get the pseudo-random exponent from the messagedigest\n # and the private key.\n order = self.curve.order\n hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)\n h = hmac.new(hmk, digestmod=hashlib.sha256)\n h.update(md)\n ctr = Crypto.Util.Counter.new(128, initial_value=0)\n cprng = Crypto.Cipher.AES.new(h.digest(),\n Crypto.Cipher.AES.MODE_CTR, counter=ctr)\n r = 0\n s = 0\n while s == 0:\n while r == 0:\n buf = cprng.encrypt(b'\\0' * self.curve.order_len_bin)\n k = self.curve._buf_to_exponent(buf)\n p1 = self.curve.base * k\n r = p1.x % order\n e = deserialize_number(md, SER_BINARY)\n e = (e % order)\n s = (self.e * r) % order\n s = (s + e) % order\n e = gmpy.invert(k, order)\n s = (s * e) % order\n s = s * order\n s = s + r\n return s\n"
] |
class PrivKey(object):
""" A secret exponent """
def __init__(self, e, curve):
self.e = e
self.curve = curve
@contextlib.contextmanager
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError("s should be bytes")
instream = BytesIO(s)
with self.decrypt_from(instream, mac_bytes) as f:
return f.read()
def __repr__(self):
return "<PrivKey %s>" % self.e
def __str__(self):
return str(self.e)
def _ECDSA_sign(self, md):
# Get the pseudo-random exponent from the messagedigest
# and the private key.
order = self.curve.order
hmk = serialize_number(self.e, SER_BINARY, self.curve.order_len_bin)
h = hmac.new(hmk, digestmod=hashlib.sha256)
h.update(md)
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cprng = Crypto.Cipher.AES.new(h.digest(),
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
r = 0
s = 0
while s == 0:
while r == 0:
buf = cprng.encrypt(b'\0' * self.curve.order_len_bin)
k = self.curve._buf_to_exponent(buf)
p1 = self.curve.base * k
r = p1.x % order
e = deserialize_number(md, SER_BINARY)
e = (e % order)
s = (self.e * r) % order
s = (s + e) % order
e = gmpy.invert(k, order)
s = (s * e) % order
s = s * order
s = s + r
return s
|
bwesterb/py-seccure
|
src/__init__.py
|
Curve.hash_to_exponent
|
python
|
def hash_to_exponent(self, h):
ctr = Crypto.Util.Counter.new(128, initial_value=0)
cipher = Crypto.Cipher.AES.new(h,
Crypto.Cipher.AES.MODE_CTR, counter=ctr)
buf = cipher.encrypt(b'\0' * self.order_len_bin)
return self._buf_to_exponent(buf)
|
Converts a 32 byte hash to an exponent
|
train
|
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L859-L865
|
[
"def _buf_to_exponent(self, buf):\n a = deserialize_number(buf, SER_BINARY)\n a = (a % (self.order - 1)) + 1\n return a\n"
] |
class Curve(object):
""" Represents a Elliptic Curve """
@staticmethod
def by_name_substring(substring):
substring = substring.lower()
candidates = []
for raw_curve in RAW_CURVES:
if substring in raw_curve[0]:
candidates.append(raw_curve)
if len(candidates) != 1:
raise KeyError
return Curve(candidates[0])
@staticmethod
def by_name(name):
for raw_curve in RAW_CURVES:
if raw_curve[0] == name:
return Curve(raw_curve)
raise KeyError
@staticmethod
def by_pk_len(pk_len):
for raw_curve in RAW_CURVES:
if raw_curve[8] == pk_len:
return Curve(raw_curve)
raise KeyError
def __init__(self, raw_curve_params):
""" Initialize a new curve from raw curve parameters.
Use `Curve.by_pk_len' instead """
r = raw_curve_parameters(*raw_curve_params)
# Store domain parameters
self.name = r.name
self.a = deserialize_number(binascii.unhexlify(r.a), SER_BINARY)
self.b = deserialize_number(binascii.unhexlify(r.b), SER_BINARY)
self.m = deserialize_number(binascii.unhexlify(r.m), SER_BINARY)
self.order = deserialize_number(
binascii.unhexlify(r.order), SER_BINARY)
self.base = AffinePoint(
curve=self, x=deserialize_number(
binascii.unhexlify(
r.base_x), SER_BINARY), y=deserialize_number(
binascii.unhexlify(
r.base_y), SER_BINARY))
self.cofactor = r.cofactor
# Calculate some other parameters
self.pk_len_bin = get_serialized_number_len(
(2 * self.m) - 1, SER_BINARY)
self.pk_len_compact = get_serialized_number_len(
(2 * self.m) - 1, SER_COMPACT)
assert self.pk_len_compact == r.pk_len_compact
self.sig_len_bin = get_serialized_number_len(
(self.order * self.order) - 1, SER_BINARY)
self.sig_len_compact = get_serialized_number_len(
(self.order * self.order) - 1, SER_COMPACT)
self.dh_len_bin = min((self.order.numdigits(2) // 2 + 7) // 8, 32)
self.dh_len_compact = get_serialized_number_len(
2 ** self.dh_len_bin - 1, SER_COMPACT)
self.elem_len_bin = get_serialized_number_len(self.m, SER_BINARY)
self.order_len_bin = get_serialized_number_len(self.order, SER_BINARY)
@property
def key_bytes(self):
""" The approximate number of bytes of information in a key. """
return self.pk_len_bin
def __repr__(self):
return "<Curve %s>" % self.name
def point_from_string(self, s, fmt=SER_BINARY):
x = deserialize_number(s, fmt)
yflag = x >= self.m
if yflag:
x = x - self.m
assert 0 < x and x <= self.m
return self._point_decompress(x, yflag)
def pubkey_from_string(self, s, fmt=SER_BINARY):
return PubKey(self.point_from_string(s, fmt))
def _point_decompress(self, x, yflag):
m = self.m
h = (x * x) % m
h = (h + self.a) % m
h = (h * x) % m
h = (h + self.b) % m
y = mod_root(h, m)
if y or not yflag:
if bool(y.getbit(0)) == yflag:
return AffinePoint(x=x, y=y, curve=self)
return AffinePoint(x=x, y=m - y, curve=self)
def _buf_to_exponent(self, buf):
a = deserialize_number(buf, SER_BINARY)
a = (a % (self.order - 1)) + 1
return a
def passphrase_to_pubkey(self, passphrase):
return PubKey(self.base * self.passphrase_to_privkey(passphrase).e)
def passphrase_to_privkey(self, passphrase):
if isinstance(passphrase, six.text_type):
raise ValueError(
"Encode `passphrase` to a bytestring yourself to" +
" prevent problems with different default encodings")
h = _passphrase_to_hash(passphrase)
return PrivKey(self.hash_to_exponent(h), self)
@contextlib.contextmanager
def decrypt_from(self, f, privkey, mac_bytes=10):
ctx = DecryptionContext(self, f, privkey, mac_bytes)
yield ctx
ctx.read()
def decrypt(self, s, privkey, mac_bytes=10):
instream = BytesIO(s)
with self.decrypt_from(instream, privkey, mac_bytes) as f:
return f.read()
|
bitprophet/releases
|
releases/line_manager.py
|
LineManager.add_family
|
python
|
def add_family(self, major_number):
# Normally, we have separate buckets for bugfixes vs features
keys = ['unreleased_bugfix', 'unreleased_feature']
# But unstable prehistorical releases roll all up into just
# 'unreleased'
if major_number == 0 and self.config.releases_unstable_prehistory:
keys = ['unreleased']
# Either way, the buckets default to an empty list
self[major_number] = {key: [] for key in keys}
|
Expand to a new release line with given ``major_number``.
This will flesh out mandatory buckets like ``unreleased_bugfix`` and do
other necessary bookkeeping.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/line_manager.py#L23-L37
| null |
class LineManager(dict):
"""
Manages multiple release lines/families as well as related config state.
"""
def __init__(self, app):
"""
Initialize new line manager dict.
:param app: The core Sphinx app object. Mostly used for config.
"""
super(LineManager, self).__init__()
self.app = app
@property
def config(self):
"""
Return Sphinx config object.
"""
return self.app.config
@property
def unstable_prehistory(self):
"""
Returns True if 'unstable prehistory' behavior should be applied.
Specifically, checks config & whether any non-0.x releases exist.
"""
return (
self.config.releases_unstable_prehistory and
not self.has_stable_releases
)
@property
def stable_families(self):
"""
Returns release family numbers which aren't 0 (i.e. prehistory).
"""
return [x for x in self if x != 0]
@property
def has_stable_releases(self):
"""
Returns whether stable (post-0.x) releases seem to exist.
"""
nonzeroes = self.stable_families
# Nothing but 0.x releases -> yup we're prehistory
if not nonzeroes:
return False
# Presumably, if there's >1 major family besides 0.x, we're at least
# one release into the 1.0 (or w/e) line.
if len(nonzeroes) > 1:
return True
# If there's only one, we may still be in the space before its N.0.0 as
# well; we can check by testing for existence of bugfix buckets
return any(
x for x in self[nonzeroes[0]] if not x.startswith('unreleased')
)
|
bitprophet/releases
|
releases/line_manager.py
|
LineManager.has_stable_releases
|
python
|
def has_stable_releases(self):
nonzeroes = self.stable_families
# Nothing but 0.x releases -> yup we're prehistory
if not nonzeroes:
return False
# Presumably, if there's >1 major family besides 0.x, we're at least
# one release into the 1.0 (or w/e) line.
if len(nonzeroes) > 1:
return True
# If there's only one, we may still be in the space before its N.0.0 as
# well; we can check by testing for existence of bugfix buckets
return any(
x for x in self[nonzeroes[0]] if not x.startswith('unreleased')
)
|
Returns whether stable (post-0.x) releases seem to exist.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/line_manager.py#L59-L75
| null |
class LineManager(dict):
"""
Manages multiple release lines/families as well as related config state.
"""
def __init__(self, app):
"""
Initialize new line manager dict.
:param app: The core Sphinx app object. Mostly used for config.
"""
super(LineManager, self).__init__()
self.app = app
@property
def config(self):
"""
Return Sphinx config object.
"""
return self.app.config
def add_family(self, major_number):
"""
Expand to a new release line with given ``major_number``.
This will flesh out mandatory buckets like ``unreleased_bugfix`` and do
other necessary bookkeeping.
"""
# Normally, we have separate buckets for bugfixes vs features
keys = ['unreleased_bugfix', 'unreleased_feature']
# But unstable prehistorical releases roll all up into just
# 'unreleased'
if major_number == 0 and self.config.releases_unstable_prehistory:
keys = ['unreleased']
# Either way, the buckets default to an empty list
self[major_number] = {key: [] for key in keys}
@property
def unstable_prehistory(self):
"""
Returns True if 'unstable prehistory' behavior should be applied.
Specifically, checks config & whether any non-0.x releases exist.
"""
return (
self.config.releases_unstable_prehistory and
not self.has_stable_releases
)
@property
def stable_families(self):
"""
Returns release family numbers which aren't 0 (i.e. prehistory).
"""
return [x for x in self if x != 0]
@property
|
bitprophet/releases
|
releases/util.py
|
parse_changelog
|
python
|
def parse_changelog(path, **kwargs):
app, doctree = get_doctree(path, **kwargs)
# Have to semi-reproduce the 'find first bullet list' bit from main code,
# which is unfortunately side-effect-heavy (thanks to Sphinx plugin
# design).
first_list = None
for node in doctree[0]:
if isinstance(node, bullet_list):
first_list = node
break
# Initial parse into the structures Releases finds useful internally
releases, manager = construct_releases(first_list.children, app)
ret = changelog2dict(releases)
# Stitch them together into something an end-user would find better:
# - nuke unreleased_N.N_Y as their contents will be represented in the
# per-line buckets
for key in ret.copy():
if key.startswith('unreleased'):
del ret[key]
for family in manager:
# - remove unreleased_bugfix, as they are accounted for in the per-line
# buckets too. No need to store anywhere.
manager[family].pop('unreleased_bugfix', None)
# - bring over each major family's unreleased_feature as
# unreleased_N_feature
unreleased = manager[family].pop('unreleased_feature', None)
if unreleased is not None:
ret['unreleased_{}_feature'.format(family)] = unreleased
# - bring over all per-line buckets from manager (flattening)
# Here, all that's left in the per-family bucket should be lines, not
# unreleased_*
ret.update(manager[family])
return ret
|
Load and parse changelog file from ``path``, returning data structures.
This function does not alter any files on disk; it is solely for
introspecting a Releases ``changelog.rst`` and programmatically answering
questions like "are there any unreleased bugfixes for the 2.3 line?" or
"what was included in release 1.2.1?".
For example, answering the above questions is as simple as::
changelog = parse_changelog("/path/to/changelog")
print("Unreleased issues for 2.3.x: {}".format(changelog['2.3']))
print("Contents of v1.2.1: {}".format(changelog['1.2.1']))
Aside from the documented arguments, any additional keyword arguments are
passed unmodified into an internal `get_doctree` call (which then passes
them to `make_app`).
:param str path: A relative or absolute file path string.
:returns:
A dict whose keys map to lists of ``releases.models.Issue`` objects, as
follows:
- Actual releases are full version number keys, such as ``"1.2.1"`` or
``"2.0.0"``.
- Unreleased bugs (or bug-like issues; see the Releases docs) are
stored in minor-release buckets, e.g. ``"1.2"`` or ``"2.0"``.
- Unreleased features (or feature-like issues) are found in
``"unreleased_N_feature"``, where ``N`` is one of the major release
families (so, a changelog spanning only 1.x will only have
``unreleased_1_feature``, whereas one with 1.x and 2.x releases will
have ``unreleased_1_feature`` and ``unreleased_2_feature``, etc).
.. versionchanged:: 1.6
Added support for passing kwargs to `get_doctree`/`make_app`.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/util.py#L37-L106
|
[
"def construct_releases(entries, app):\n log = partial(_log, config=app.config)\n # Walk from back to front, consuming entries & copying them into\n # per-release buckets as releases are encountered. Store releases in order.\n releases = []\n # Release lines, to be organized by major releases, then by major+minor,\n # alongside per-major-release 'unreleased' bugfix/feature buckets.\n # NOTE: With exception of unstable_prehistory=True, which triggers use of a\n # separate, undifferentiated 'unreleased' bucket (albeit still within the\n # '0' major line family).\n manager = LineManager(app)\n # Also keep a master hash of issues by number to detect duplicates & assist\n # in explicitly defined release lists.\n issues = {}\n\n reversed_entries = list(reversed(entries))\n # For the lookahead, so we're not doing this stripping O(n) times.\n # TODO: probs just merge the two into e.g. a list of 2-tuples of \"actual\n # entry obj + rest\"?\n stripped_entries = [x[0][0] for x in reversed_entries]\n # Perform an initial lookahead to prime manager with the 1st major release\n handle_first_release_line(stripped_entries, manager)\n # Start crawling...\n for index, obj in enumerate(reversed_entries):\n # Issue object is always found in obj (LI) index 0 (first, often only\n # P) and is the 1st item within that (index 0 again).\n # Preserve all other contents of 'obj'.\n focus = obj[0].pop(0)\n rest = obj\n log(repr(focus))\n # Releases 'eat' the entries in their line's list and get added to the\n # final data structure. They also inform new release-line 'buffers'.\n # Release lines, once the release obj is removed, should be empty or a\n # comma-separated list of issue numbers.\n if isinstance(focus, Release):\n construct_entry_with_release(\n focus, issues, manager, log, releases, rest\n )\n # After each release is handled, look ahead to see if we're\n # entering \"last stretch before a major release\". If so,\n # pre-emptively update the line-manager so upcoming features are\n # correctly sorted into that major release by default (re: logic in\n # Release.add_to_manager)\n handle_upcoming_major_release(\n stripped_entries[index + 1:], manager\n )\n\n # Entries get copied into release line buckets as follows:\n # * Features and support go into 'unreleased_feature' for use in new\n # feature releases.\n # * Bugfixes go into all release lines (so they can be printed in >1\n # bugfix release as appropriate) as well as 'unreleased_bugfix' (so\n # they can be displayed prior to release'). Caveats include bugs marked\n # 'major' (they go into unreleased_feature instead) or with 'N.N+'\n # (meaning they only go into release line buckets for that release and\n # up.)\n # * Support/feature entries marked as 'backported' go into all\n # release lines as well, on the assumption that they were released to\n # all active branches.\n # * The 'rest' variable (which here is the bug description, vitally\n # important!) is preserved by stuffing it into the focus (issue)\n # object - it will get unpacked by construct_nodes() later.\n else:\n construct_entry_without_release(focus, issues, manager, log, rest)\n\n if manager.unstable_prehistory:\n releases.append(generate_unreleased_entry(\n header=\"Next release\",\n line=\"unreleased\",\n issues=manager[0]['unreleased'],\n manager=manager,\n app=app,\n ))\n else:\n append_unreleased_entries(app, manager, releases)\n\n reorder_release_entries(releases)\n\n return releases, manager\n",
"def changelog2dict(changelog):\n \"\"\"\n Helper turning internal list-o-releases structure into a dict.\n\n See `parse_changelog` docstring for return value details.\n \"\"\"\n return {r['obj'].number: r['entries'] for r in changelog}\n",
"def get_doctree(path, **kwargs):\n \"\"\"\n Obtain a Sphinx doctree from the RST file at ``path``.\n\n Performs no Releases-specific processing; this code would, ideally, be in\n Sphinx itself, but things there are pretty tightly coupled. So we wrote\n this.\n\n Any additional kwargs are passed unmodified into an internal `make_app`\n call.\n\n :param str path: A relative or absolute file path string.\n\n :returns:\n A two-tuple of the generated ``sphinx.application.Sphinx`` app and the\n doctree (a ``docutils.document`` object).\n\n .. versionchanged:: 1.6\n Added support for passing kwargs to `make_app`.\n \"\"\"\n root, filename = os.path.split(path)\n docname, _ = os.path.splitext(filename)\n # TODO: this only works for top level changelog files (i.e. ones where\n # their dirname is the project/doc root)\n app = make_app(srcdir=root, **kwargs)\n # Create & init a BuildEnvironment. Mm, tasty side effects.\n app._init_env(freshenv=True)\n env = app.env\n # More arity/API changes: Sphinx 1.3/1.4-ish require one to pass in the app\n # obj in BuildEnvironment.update(); modern Sphinx performs that inside\n # Application._init_env() (which we just called above) and so that kwarg is\n # removed from update(). EAFP.\n kwargs = dict(\n config=app.config,\n srcdir=root,\n doctreedir=app.doctreedir,\n app=app,\n )\n try:\n env.update(**kwargs)\n except TypeError:\n # Assume newer Sphinx w/o an app= kwarg\n del kwargs['app']\n env.update(**kwargs)\n # Code taken from sphinx.environment.read_doc; easier to manually call\n # it with a working Environment object, instead of doing more random crap\n # to trick the higher up build system into thinking our single changelog\n # document was \"updated\".\n env.temp_data['docname'] = docname\n env.app = app\n # NOTE: SphinxStandaloneReader API changed in 1.4 :(\n reader_kwargs = {\n 'app': app,\n 'parsers': env.config.source_parsers,\n }\n if sphinx.version_info[:2] < (1, 4):\n del reader_kwargs['app']\n # This monkeypatches (!!!) docutils to 'inject' all registered Sphinx\n # domains' roles & so forth. Without this, rendering the doctree lacks\n # almost all Sphinx magic, including things like :ref: and :doc:!\n with sphinx_domains(env):\n try:\n reader = SphinxStandaloneReader(**reader_kwargs)\n except TypeError:\n # If we import from io, this happens automagically, not in API\n del reader_kwargs['parsers']\n reader = SphinxStandaloneReader(**reader_kwargs)\n pub = Publisher(reader=reader,\n writer=SphinxDummyWriter(),\n destination_class=NullOutput)\n pub.set_components(None, 'restructuredtext', None)\n pub.process_programmatic_settings(None, env.settings, None)\n # NOTE: docname derived higher up, from our given path\n src_path = env.doc2path(docname)\n source = SphinxFileInput(\n app,\n env,\n source=None,\n source_path=src_path,\n encoding=env.config.source_encoding,\n )\n pub.source = source\n pub.settings._source = src_path\n pub.set_destination(None, None)\n pub.publish()\n return app, pub.document\n"
] |
"""
Utility functions, such as helpers for standalone changelog parsing.
"""
import logging
import os
from tempfile import mkdtemp
import sphinx
from docutils.core import Publisher
from docutils.io import NullOutput
from docutils.nodes import bullet_list
from sphinx.application import Sphinx # not exposed at top level
try:
from sphinx.io import (
SphinxStandaloneReader, SphinxFileInput, SphinxDummyWriter,
)
except ImportError:
# NOTE: backwards compat with Sphinx 1.3
from sphinx.environment import (
SphinxStandaloneReader, SphinxFileInput, SphinxDummyWriter,
)
# sphinx_domains is only in Sphinx 1.5+, but is presumably necessary from then
# onwards.
try:
from sphinx.util.docutils import sphinx_domains
except ImportError:
# Just dummy it up.
from contextlib import contextmanager
@contextmanager
def sphinx_domains(env):
yield
from . import construct_releases, setup
def get_doctree(path, **kwargs):
"""
Obtain a Sphinx doctree from the RST file at ``path``.
Performs no Releases-specific processing; this code would, ideally, be in
Sphinx itself, but things there are pretty tightly coupled. So we wrote
this.
Any additional kwargs are passed unmodified into an internal `make_app`
call.
:param str path: A relative or absolute file path string.
:returns:
A two-tuple of the generated ``sphinx.application.Sphinx`` app and the
doctree (a ``docutils.document`` object).
.. versionchanged:: 1.6
Added support for passing kwargs to `make_app`.
"""
root, filename = os.path.split(path)
docname, _ = os.path.splitext(filename)
# TODO: this only works for top level changelog files (i.e. ones where
# their dirname is the project/doc root)
app = make_app(srcdir=root, **kwargs)
# Create & init a BuildEnvironment. Mm, tasty side effects.
app._init_env(freshenv=True)
env = app.env
# More arity/API changes: Sphinx 1.3/1.4-ish require one to pass in the app
# obj in BuildEnvironment.update(); modern Sphinx performs that inside
# Application._init_env() (which we just called above) and so that kwarg is
# removed from update(). EAFP.
kwargs = dict(
config=app.config,
srcdir=root,
doctreedir=app.doctreedir,
app=app,
)
try:
env.update(**kwargs)
except TypeError:
# Assume newer Sphinx w/o an app= kwarg
del kwargs['app']
env.update(**kwargs)
# Code taken from sphinx.environment.read_doc; easier to manually call
# it with a working Environment object, instead of doing more random crap
# to trick the higher up build system into thinking our single changelog
# document was "updated".
env.temp_data['docname'] = docname
env.app = app
# NOTE: SphinxStandaloneReader API changed in 1.4 :(
reader_kwargs = {
'app': app,
'parsers': env.config.source_parsers,
}
if sphinx.version_info[:2] < (1, 4):
del reader_kwargs['app']
# This monkeypatches (!!!) docutils to 'inject' all registered Sphinx
# domains' roles & so forth. Without this, rendering the doctree lacks
# almost all Sphinx magic, including things like :ref: and :doc:!
with sphinx_domains(env):
try:
reader = SphinxStandaloneReader(**reader_kwargs)
except TypeError:
# If we import from io, this happens automagically, not in API
del reader_kwargs['parsers']
reader = SphinxStandaloneReader(**reader_kwargs)
pub = Publisher(reader=reader,
writer=SphinxDummyWriter(),
destination_class=NullOutput)
pub.set_components(None, 'restructuredtext', None)
pub.process_programmatic_settings(None, env.settings, None)
# NOTE: docname derived higher up, from our given path
src_path = env.doc2path(docname)
source = SphinxFileInput(
app,
env,
source=None,
source_path=src_path,
encoding=env.config.source_encoding,
)
pub.source = source
pub.settings._source = src_path
pub.set_destination(None, None)
pub.publish()
return app, pub.document
def load_conf(srcdir):
"""
Load ``conf.py`` from given ``srcdir``.
:returns: Dictionary derived from the conf module.
"""
path = os.path.join(srcdir, 'conf.py')
mylocals = {'__file__': path}
with open(path) as fd:
exec(fd.read(), mylocals)
return mylocals
def make_app(**kwargs):
"""
Create a dummy Sphinx app, filling in various hardcoded assumptions.
For example, Sphinx assumes the existence of various source/dest
directories, even if you're only calling internals that never generate (or
sometimes, even read!) on-disk files. This function creates safe temp
directories for these instances.
It also neuters Sphinx's internal logging, which otherwise causes verbosity
in one's own test output and/or debug logs.
Finally, it does load the given srcdir's ``conf.py``, but only to read
specific bits like ``extensions`` (if requested); most of it is ignored.
All args are stored in a single ``**kwargs``. Aside from the params listed
below (all of which are optional), all kwargs given are turned into
'releases_xxx' config settings; e.g. ``make_app(foo='bar')`` is like
setting ``releases_foo = 'bar'`` in ``conf.py``.
:param str docname:
Override the document name used (mostly for internal testing).
:param str srcdir:
Sphinx source directory path.
:param str dstdir:
Sphinx dest directory path.
:param str doctreedir:
Sphinx doctree directory path.
:param bool load_extensions:
Whether to load the real ``conf.py`` and setup any extensions it
configures. Default: ``False``.
:returns: A Sphinx ``Application`` instance.
.. versionchanged:: 1.6
Added the ``load_extensions`` kwarg.
"""
srcdir = kwargs.pop('srcdir', mkdtemp())
dstdir = kwargs.pop('dstdir', mkdtemp())
doctreedir = kwargs.pop('doctreedir', mkdtemp())
load_extensions = kwargs.pop('load_extensions', False)
real_conf = None
try:
# Sphinx <1.6ish
Sphinx._log = lambda self, message, wfile, nonl=False: None
# Sphinx >=1.6ish. Technically still lets Very Bad Things through,
# unlike the total muting above, but probably OK.
# NOTE: used to just do 'sphinx' but that stopped working, even on
# sphinx 1.6.x. Weird. Unsure why hierarchy not functioning.
for name in ('sphinx', 'sphinx.sphinx.application'):
logging.getLogger(name).setLevel(logging.ERROR)
# App API seems to work on all versions so far.
app = Sphinx(
srcdir=srcdir,
confdir=None,
outdir=dstdir,
doctreedir=doctreedir,
buildername='html',
)
# Might as well load the conf file here too.
if load_extensions:
real_conf = load_conf(srcdir)
finally:
for d in (srcdir, dstdir, doctreedir):
# Only remove empty dirs; non-empty dirs are implicitly something
# that existed before we ran, and should not be touched.
try:
os.rmdir(d)
except OSError:
pass
setup(app)
# Mock out the config within. More assumptions by Sphinx :(
# TODO: just use real config and overlay what truly needs changing? is that
# feasible given the rest of the weird ordering we have to do? If it is,
# maybe just literally slap this over the return value of load_conf()...
config = {
'releases_release_uri': 'foo_%s',
'releases_issue_uri': 'bar_%s',
'releases_debug': False,
'master_doc': 'index',
}
# Allow tinkering with document filename
if 'docname' in kwargs:
app.env.temp_data['docname'] = kwargs.pop('docname')
# Allow config overrides via kwargs
for name in kwargs:
config['releases_{}'.format(name)] = kwargs[name]
# Stitch together as the sphinx app init() usually does w/ real conf files
app.config._raw_config = config
# init_values() requires a 'warn' runner on Sphinx 1.3-1.6, so if we seem
# to be hitting arity errors, give it a dummy such callable. Hopefully
# calling twice doesn't introduce any wacko state issues :(
try:
app.config.init_values()
except TypeError: # boy I wish Python had an ArityError or w/e
app.config.init_values(lambda x: x)
# Initialize extensions (the internal call to this happens at init time,
# which of course had no valid config yet here...)
if load_extensions:
for extension in real_conf.get('extensions', []):
# But don't set up ourselves again, that causes errors
if extension == 'releases':
continue
app.setup_extension(extension)
return app
def changelog2dict(changelog):
"""
Helper turning internal list-o-releases structure into a dict.
See `parse_changelog` docstring for return value details.
"""
return {r['obj'].number: r['entries'] for r in changelog}
|
bitprophet/releases
|
releases/util.py
|
get_doctree
|
python
|
def get_doctree(path, **kwargs):
root, filename = os.path.split(path)
docname, _ = os.path.splitext(filename)
# TODO: this only works for top level changelog files (i.e. ones where
# their dirname is the project/doc root)
app = make_app(srcdir=root, **kwargs)
# Create & init a BuildEnvironment. Mm, tasty side effects.
app._init_env(freshenv=True)
env = app.env
# More arity/API changes: Sphinx 1.3/1.4-ish require one to pass in the app
# obj in BuildEnvironment.update(); modern Sphinx performs that inside
# Application._init_env() (which we just called above) and so that kwarg is
# removed from update(). EAFP.
kwargs = dict(
config=app.config,
srcdir=root,
doctreedir=app.doctreedir,
app=app,
)
try:
env.update(**kwargs)
except TypeError:
# Assume newer Sphinx w/o an app= kwarg
del kwargs['app']
env.update(**kwargs)
# Code taken from sphinx.environment.read_doc; easier to manually call
# it with a working Environment object, instead of doing more random crap
# to trick the higher up build system into thinking our single changelog
# document was "updated".
env.temp_data['docname'] = docname
env.app = app
# NOTE: SphinxStandaloneReader API changed in 1.4 :(
reader_kwargs = {
'app': app,
'parsers': env.config.source_parsers,
}
if sphinx.version_info[:2] < (1, 4):
del reader_kwargs['app']
# This monkeypatches (!!!) docutils to 'inject' all registered Sphinx
# domains' roles & so forth. Without this, rendering the doctree lacks
# almost all Sphinx magic, including things like :ref: and :doc:!
with sphinx_domains(env):
try:
reader = SphinxStandaloneReader(**reader_kwargs)
except TypeError:
# If we import from io, this happens automagically, not in API
del reader_kwargs['parsers']
reader = SphinxStandaloneReader(**reader_kwargs)
pub = Publisher(reader=reader,
writer=SphinxDummyWriter(),
destination_class=NullOutput)
pub.set_components(None, 'restructuredtext', None)
pub.process_programmatic_settings(None, env.settings, None)
# NOTE: docname derived higher up, from our given path
src_path = env.doc2path(docname)
source = SphinxFileInput(
app,
env,
source=None,
source_path=src_path,
encoding=env.config.source_encoding,
)
pub.source = source
pub.settings._source = src_path
pub.set_destination(None, None)
pub.publish()
return app, pub.document
|
Obtain a Sphinx doctree from the RST file at ``path``.
Performs no Releases-specific processing; this code would, ideally, be in
Sphinx itself, but things there are pretty tightly coupled. So we wrote
this.
Any additional kwargs are passed unmodified into an internal `make_app`
call.
:param str path: A relative or absolute file path string.
:returns:
A two-tuple of the generated ``sphinx.application.Sphinx`` app and the
doctree (a ``docutils.document`` object).
.. versionchanged:: 1.6
Added support for passing kwargs to `make_app`.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/util.py#L109-L194
|
[
"def make_app(**kwargs):\n \"\"\"\n Create a dummy Sphinx app, filling in various hardcoded assumptions.\n\n For example, Sphinx assumes the existence of various source/dest\n directories, even if you're only calling internals that never generate (or\n sometimes, even read!) on-disk files. This function creates safe temp\n directories for these instances.\n\n It also neuters Sphinx's internal logging, which otherwise causes verbosity\n in one's own test output and/or debug logs.\n\n Finally, it does load the given srcdir's ``conf.py``, but only to read\n specific bits like ``extensions`` (if requested); most of it is ignored.\n\n All args are stored in a single ``**kwargs``. Aside from the params listed\n below (all of which are optional), all kwargs given are turned into\n 'releases_xxx' config settings; e.g. ``make_app(foo='bar')`` is like\n setting ``releases_foo = 'bar'`` in ``conf.py``.\n\n :param str docname:\n Override the document name used (mostly for internal testing).\n\n :param str srcdir:\n Sphinx source directory path.\n\n :param str dstdir:\n Sphinx dest directory path.\n\n :param str doctreedir:\n Sphinx doctree directory path.\n\n :param bool load_extensions:\n Whether to load the real ``conf.py`` and setup any extensions it\n configures. Default: ``False``.\n\n :returns: A Sphinx ``Application`` instance.\n\n .. versionchanged:: 1.6\n Added the ``load_extensions`` kwarg.\n \"\"\"\n srcdir = kwargs.pop('srcdir', mkdtemp())\n dstdir = kwargs.pop('dstdir', mkdtemp())\n doctreedir = kwargs.pop('doctreedir', mkdtemp())\n load_extensions = kwargs.pop('load_extensions', False)\n real_conf = None\n try:\n # Sphinx <1.6ish\n Sphinx._log = lambda self, message, wfile, nonl=False: None\n # Sphinx >=1.6ish. Technically still lets Very Bad Things through,\n # unlike the total muting above, but probably OK.\n # NOTE: used to just do 'sphinx' but that stopped working, even on\n # sphinx 1.6.x. Weird. Unsure why hierarchy not functioning.\n for name in ('sphinx', 'sphinx.sphinx.application'):\n logging.getLogger(name).setLevel(logging.ERROR)\n # App API seems to work on all versions so far.\n app = Sphinx(\n srcdir=srcdir,\n confdir=None,\n outdir=dstdir,\n doctreedir=doctreedir,\n buildername='html',\n )\n # Might as well load the conf file here too.\n if load_extensions:\n real_conf = load_conf(srcdir)\n finally:\n for d in (srcdir, dstdir, doctreedir):\n # Only remove empty dirs; non-empty dirs are implicitly something\n # that existed before we ran, and should not be touched.\n try:\n os.rmdir(d)\n except OSError:\n pass\n setup(app)\n # Mock out the config within. More assumptions by Sphinx :(\n # TODO: just use real config and overlay what truly needs changing? is that\n # feasible given the rest of the weird ordering we have to do? If it is,\n # maybe just literally slap this over the return value of load_conf()...\n config = {\n 'releases_release_uri': 'foo_%s',\n 'releases_issue_uri': 'bar_%s',\n 'releases_debug': False,\n 'master_doc': 'index',\n }\n # Allow tinkering with document filename\n if 'docname' in kwargs:\n app.env.temp_data['docname'] = kwargs.pop('docname')\n # Allow config overrides via kwargs\n for name in kwargs:\n config['releases_{}'.format(name)] = kwargs[name]\n # Stitch together as the sphinx app init() usually does w/ real conf files\n app.config._raw_config = config\n # init_values() requires a 'warn' runner on Sphinx 1.3-1.6, so if we seem\n # to be hitting arity errors, give it a dummy such callable. Hopefully\n # calling twice doesn't introduce any wacko state issues :(\n try:\n app.config.init_values()\n except TypeError: # boy I wish Python had an ArityError or w/e\n app.config.init_values(lambda x: x)\n # Initialize extensions (the internal call to this happens at init time,\n # which of course had no valid config yet here...)\n if load_extensions:\n for extension in real_conf.get('extensions', []):\n # But don't set up ourselves again, that causes errors\n if extension == 'releases':\n continue\n app.setup_extension(extension)\n return app\n"
] |
"""
Utility functions, such as helpers for standalone changelog parsing.
"""
import logging
import os
from tempfile import mkdtemp
import sphinx
from docutils.core import Publisher
from docutils.io import NullOutput
from docutils.nodes import bullet_list
from sphinx.application import Sphinx # not exposed at top level
try:
from sphinx.io import (
SphinxStandaloneReader, SphinxFileInput, SphinxDummyWriter,
)
except ImportError:
# NOTE: backwards compat with Sphinx 1.3
from sphinx.environment import (
SphinxStandaloneReader, SphinxFileInput, SphinxDummyWriter,
)
# sphinx_domains is only in Sphinx 1.5+, but is presumably necessary from then
# onwards.
try:
from sphinx.util.docutils import sphinx_domains
except ImportError:
# Just dummy it up.
from contextlib import contextmanager
@contextmanager
def sphinx_domains(env):
yield
from . import construct_releases, setup
def parse_changelog(path, **kwargs):
"""
Load and parse changelog file from ``path``, returning data structures.
This function does not alter any files on disk; it is solely for
introspecting a Releases ``changelog.rst`` and programmatically answering
questions like "are there any unreleased bugfixes for the 2.3 line?" or
"what was included in release 1.2.1?".
For example, answering the above questions is as simple as::
changelog = parse_changelog("/path/to/changelog")
print("Unreleased issues for 2.3.x: {}".format(changelog['2.3']))
print("Contents of v1.2.1: {}".format(changelog['1.2.1']))
Aside from the documented arguments, any additional keyword arguments are
passed unmodified into an internal `get_doctree` call (which then passes
them to `make_app`).
:param str path: A relative or absolute file path string.
:returns:
A dict whose keys map to lists of ``releases.models.Issue`` objects, as
follows:
- Actual releases are full version number keys, such as ``"1.2.1"`` or
``"2.0.0"``.
- Unreleased bugs (or bug-like issues; see the Releases docs) are
stored in minor-release buckets, e.g. ``"1.2"`` or ``"2.0"``.
- Unreleased features (or feature-like issues) are found in
``"unreleased_N_feature"``, where ``N`` is one of the major release
families (so, a changelog spanning only 1.x will only have
``unreleased_1_feature``, whereas one with 1.x and 2.x releases will
have ``unreleased_1_feature`` and ``unreleased_2_feature``, etc).
.. versionchanged:: 1.6
Added support for passing kwargs to `get_doctree`/`make_app`.
"""
app, doctree = get_doctree(path, **kwargs)
# Have to semi-reproduce the 'find first bullet list' bit from main code,
# which is unfortunately side-effect-heavy (thanks to Sphinx plugin
# design).
first_list = None
for node in doctree[0]:
if isinstance(node, bullet_list):
first_list = node
break
# Initial parse into the structures Releases finds useful internally
releases, manager = construct_releases(first_list.children, app)
ret = changelog2dict(releases)
# Stitch them together into something an end-user would find better:
# - nuke unreleased_N.N_Y as their contents will be represented in the
# per-line buckets
for key in ret.copy():
if key.startswith('unreleased'):
del ret[key]
for family in manager:
# - remove unreleased_bugfix, as they are accounted for in the per-line
# buckets too. No need to store anywhere.
manager[family].pop('unreleased_bugfix', None)
# - bring over each major family's unreleased_feature as
# unreleased_N_feature
unreleased = manager[family].pop('unreleased_feature', None)
if unreleased is not None:
ret['unreleased_{}_feature'.format(family)] = unreleased
# - bring over all per-line buckets from manager (flattening)
# Here, all that's left in the per-family bucket should be lines, not
# unreleased_*
ret.update(manager[family])
return ret
def load_conf(srcdir):
"""
Load ``conf.py`` from given ``srcdir``.
:returns: Dictionary derived from the conf module.
"""
path = os.path.join(srcdir, 'conf.py')
mylocals = {'__file__': path}
with open(path) as fd:
exec(fd.read(), mylocals)
return mylocals
def make_app(**kwargs):
"""
Create a dummy Sphinx app, filling in various hardcoded assumptions.
For example, Sphinx assumes the existence of various source/dest
directories, even if you're only calling internals that never generate (or
sometimes, even read!) on-disk files. This function creates safe temp
directories for these instances.
It also neuters Sphinx's internal logging, which otherwise causes verbosity
in one's own test output and/or debug logs.
Finally, it does load the given srcdir's ``conf.py``, but only to read
specific bits like ``extensions`` (if requested); most of it is ignored.
All args are stored in a single ``**kwargs``. Aside from the params listed
below (all of which are optional), all kwargs given are turned into
'releases_xxx' config settings; e.g. ``make_app(foo='bar')`` is like
setting ``releases_foo = 'bar'`` in ``conf.py``.
:param str docname:
Override the document name used (mostly for internal testing).
:param str srcdir:
Sphinx source directory path.
:param str dstdir:
Sphinx dest directory path.
:param str doctreedir:
Sphinx doctree directory path.
:param bool load_extensions:
Whether to load the real ``conf.py`` and setup any extensions it
configures. Default: ``False``.
:returns: A Sphinx ``Application`` instance.
.. versionchanged:: 1.6
Added the ``load_extensions`` kwarg.
"""
srcdir = kwargs.pop('srcdir', mkdtemp())
dstdir = kwargs.pop('dstdir', mkdtemp())
doctreedir = kwargs.pop('doctreedir', mkdtemp())
load_extensions = kwargs.pop('load_extensions', False)
real_conf = None
try:
# Sphinx <1.6ish
Sphinx._log = lambda self, message, wfile, nonl=False: None
# Sphinx >=1.6ish. Technically still lets Very Bad Things through,
# unlike the total muting above, but probably OK.
# NOTE: used to just do 'sphinx' but that stopped working, even on
# sphinx 1.6.x. Weird. Unsure why hierarchy not functioning.
for name in ('sphinx', 'sphinx.sphinx.application'):
logging.getLogger(name).setLevel(logging.ERROR)
# App API seems to work on all versions so far.
app = Sphinx(
srcdir=srcdir,
confdir=None,
outdir=dstdir,
doctreedir=doctreedir,
buildername='html',
)
# Might as well load the conf file here too.
if load_extensions:
real_conf = load_conf(srcdir)
finally:
for d in (srcdir, dstdir, doctreedir):
# Only remove empty dirs; non-empty dirs are implicitly something
# that existed before we ran, and should not be touched.
try:
os.rmdir(d)
except OSError:
pass
setup(app)
# Mock out the config within. More assumptions by Sphinx :(
# TODO: just use real config and overlay what truly needs changing? is that
# feasible given the rest of the weird ordering we have to do? If it is,
# maybe just literally slap this over the return value of load_conf()...
config = {
'releases_release_uri': 'foo_%s',
'releases_issue_uri': 'bar_%s',
'releases_debug': False,
'master_doc': 'index',
}
# Allow tinkering with document filename
if 'docname' in kwargs:
app.env.temp_data['docname'] = kwargs.pop('docname')
# Allow config overrides via kwargs
for name in kwargs:
config['releases_{}'.format(name)] = kwargs[name]
# Stitch together as the sphinx app init() usually does w/ real conf files
app.config._raw_config = config
# init_values() requires a 'warn' runner on Sphinx 1.3-1.6, so if we seem
# to be hitting arity errors, give it a dummy such callable. Hopefully
# calling twice doesn't introduce any wacko state issues :(
try:
app.config.init_values()
except TypeError: # boy I wish Python had an ArityError or w/e
app.config.init_values(lambda x: x)
# Initialize extensions (the internal call to this happens at init time,
# which of course had no valid config yet here...)
if load_extensions:
for extension in real_conf.get('extensions', []):
# But don't set up ourselves again, that causes errors
if extension == 'releases':
continue
app.setup_extension(extension)
return app
def changelog2dict(changelog):
"""
Helper turning internal list-o-releases structure into a dict.
See `parse_changelog` docstring for return value details.
"""
return {r['obj'].number: r['entries'] for r in changelog}
|
bitprophet/releases
|
releases/util.py
|
load_conf
|
python
|
def load_conf(srcdir):
path = os.path.join(srcdir, 'conf.py')
mylocals = {'__file__': path}
with open(path) as fd:
exec(fd.read(), mylocals)
return mylocals
|
Load ``conf.py`` from given ``srcdir``.
:returns: Dictionary derived from the conf module.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/util.py#L197-L207
| null |
"""
Utility functions, such as helpers for standalone changelog parsing.
"""
import logging
import os
from tempfile import mkdtemp
import sphinx
from docutils.core import Publisher
from docutils.io import NullOutput
from docutils.nodes import bullet_list
from sphinx.application import Sphinx # not exposed at top level
try:
from sphinx.io import (
SphinxStandaloneReader, SphinxFileInput, SphinxDummyWriter,
)
except ImportError:
# NOTE: backwards compat with Sphinx 1.3
from sphinx.environment import (
SphinxStandaloneReader, SphinxFileInput, SphinxDummyWriter,
)
# sphinx_domains is only in Sphinx 1.5+, but is presumably necessary from then
# onwards.
try:
from sphinx.util.docutils import sphinx_domains
except ImportError:
# Just dummy it up.
from contextlib import contextmanager
@contextmanager
def sphinx_domains(env):
yield
from . import construct_releases, setup
def parse_changelog(path, **kwargs):
"""
Load and parse changelog file from ``path``, returning data structures.
This function does not alter any files on disk; it is solely for
introspecting a Releases ``changelog.rst`` and programmatically answering
questions like "are there any unreleased bugfixes for the 2.3 line?" or
"what was included in release 1.2.1?".
For example, answering the above questions is as simple as::
changelog = parse_changelog("/path/to/changelog")
print("Unreleased issues for 2.3.x: {}".format(changelog['2.3']))
print("Contents of v1.2.1: {}".format(changelog['1.2.1']))
Aside from the documented arguments, any additional keyword arguments are
passed unmodified into an internal `get_doctree` call (which then passes
them to `make_app`).
:param str path: A relative or absolute file path string.
:returns:
A dict whose keys map to lists of ``releases.models.Issue`` objects, as
follows:
- Actual releases are full version number keys, such as ``"1.2.1"`` or
``"2.0.0"``.
- Unreleased bugs (or bug-like issues; see the Releases docs) are
stored in minor-release buckets, e.g. ``"1.2"`` or ``"2.0"``.
- Unreleased features (or feature-like issues) are found in
``"unreleased_N_feature"``, where ``N`` is one of the major release
families (so, a changelog spanning only 1.x will only have
``unreleased_1_feature``, whereas one with 1.x and 2.x releases will
have ``unreleased_1_feature`` and ``unreleased_2_feature``, etc).
.. versionchanged:: 1.6
Added support for passing kwargs to `get_doctree`/`make_app`.
"""
app, doctree = get_doctree(path, **kwargs)
# Have to semi-reproduce the 'find first bullet list' bit from main code,
# which is unfortunately side-effect-heavy (thanks to Sphinx plugin
# design).
first_list = None
for node in doctree[0]:
if isinstance(node, bullet_list):
first_list = node
break
# Initial parse into the structures Releases finds useful internally
releases, manager = construct_releases(first_list.children, app)
ret = changelog2dict(releases)
# Stitch them together into something an end-user would find better:
# - nuke unreleased_N.N_Y as their contents will be represented in the
# per-line buckets
for key in ret.copy():
if key.startswith('unreleased'):
del ret[key]
for family in manager:
# - remove unreleased_bugfix, as they are accounted for in the per-line
# buckets too. No need to store anywhere.
manager[family].pop('unreleased_bugfix', None)
# - bring over each major family's unreleased_feature as
# unreleased_N_feature
unreleased = manager[family].pop('unreleased_feature', None)
if unreleased is not None:
ret['unreleased_{}_feature'.format(family)] = unreleased
# - bring over all per-line buckets from manager (flattening)
# Here, all that's left in the per-family bucket should be lines, not
# unreleased_*
ret.update(manager[family])
return ret
def get_doctree(path, **kwargs):
"""
Obtain a Sphinx doctree from the RST file at ``path``.
Performs no Releases-specific processing; this code would, ideally, be in
Sphinx itself, but things there are pretty tightly coupled. So we wrote
this.
Any additional kwargs are passed unmodified into an internal `make_app`
call.
:param str path: A relative or absolute file path string.
:returns:
A two-tuple of the generated ``sphinx.application.Sphinx`` app and the
doctree (a ``docutils.document`` object).
.. versionchanged:: 1.6
Added support for passing kwargs to `make_app`.
"""
root, filename = os.path.split(path)
docname, _ = os.path.splitext(filename)
# TODO: this only works for top level changelog files (i.e. ones where
# their dirname is the project/doc root)
app = make_app(srcdir=root, **kwargs)
# Create & init a BuildEnvironment. Mm, tasty side effects.
app._init_env(freshenv=True)
env = app.env
# More arity/API changes: Sphinx 1.3/1.4-ish require one to pass in the app
# obj in BuildEnvironment.update(); modern Sphinx performs that inside
# Application._init_env() (which we just called above) and so that kwarg is
# removed from update(). EAFP.
kwargs = dict(
config=app.config,
srcdir=root,
doctreedir=app.doctreedir,
app=app,
)
try:
env.update(**kwargs)
except TypeError:
# Assume newer Sphinx w/o an app= kwarg
del kwargs['app']
env.update(**kwargs)
# Code taken from sphinx.environment.read_doc; easier to manually call
# it with a working Environment object, instead of doing more random crap
# to trick the higher up build system into thinking our single changelog
# document was "updated".
env.temp_data['docname'] = docname
env.app = app
# NOTE: SphinxStandaloneReader API changed in 1.4 :(
reader_kwargs = {
'app': app,
'parsers': env.config.source_parsers,
}
if sphinx.version_info[:2] < (1, 4):
del reader_kwargs['app']
# This monkeypatches (!!!) docutils to 'inject' all registered Sphinx
# domains' roles & so forth. Without this, rendering the doctree lacks
# almost all Sphinx magic, including things like :ref: and :doc:!
with sphinx_domains(env):
try:
reader = SphinxStandaloneReader(**reader_kwargs)
except TypeError:
# If we import from io, this happens automagically, not in API
del reader_kwargs['parsers']
reader = SphinxStandaloneReader(**reader_kwargs)
pub = Publisher(reader=reader,
writer=SphinxDummyWriter(),
destination_class=NullOutput)
pub.set_components(None, 'restructuredtext', None)
pub.process_programmatic_settings(None, env.settings, None)
# NOTE: docname derived higher up, from our given path
src_path = env.doc2path(docname)
source = SphinxFileInput(
app,
env,
source=None,
source_path=src_path,
encoding=env.config.source_encoding,
)
pub.source = source
pub.settings._source = src_path
pub.set_destination(None, None)
pub.publish()
return app, pub.document
def make_app(**kwargs):
"""
Create a dummy Sphinx app, filling in various hardcoded assumptions.
For example, Sphinx assumes the existence of various source/dest
directories, even if you're only calling internals that never generate (or
sometimes, even read!) on-disk files. This function creates safe temp
directories for these instances.
It also neuters Sphinx's internal logging, which otherwise causes verbosity
in one's own test output and/or debug logs.
Finally, it does load the given srcdir's ``conf.py``, but only to read
specific bits like ``extensions`` (if requested); most of it is ignored.
All args are stored in a single ``**kwargs``. Aside from the params listed
below (all of which are optional), all kwargs given are turned into
'releases_xxx' config settings; e.g. ``make_app(foo='bar')`` is like
setting ``releases_foo = 'bar'`` in ``conf.py``.
:param str docname:
Override the document name used (mostly for internal testing).
:param str srcdir:
Sphinx source directory path.
:param str dstdir:
Sphinx dest directory path.
:param str doctreedir:
Sphinx doctree directory path.
:param bool load_extensions:
Whether to load the real ``conf.py`` and setup any extensions it
configures. Default: ``False``.
:returns: A Sphinx ``Application`` instance.
.. versionchanged:: 1.6
Added the ``load_extensions`` kwarg.
"""
srcdir = kwargs.pop('srcdir', mkdtemp())
dstdir = kwargs.pop('dstdir', mkdtemp())
doctreedir = kwargs.pop('doctreedir', mkdtemp())
load_extensions = kwargs.pop('load_extensions', False)
real_conf = None
try:
# Sphinx <1.6ish
Sphinx._log = lambda self, message, wfile, nonl=False: None
# Sphinx >=1.6ish. Technically still lets Very Bad Things through,
# unlike the total muting above, but probably OK.
# NOTE: used to just do 'sphinx' but that stopped working, even on
# sphinx 1.6.x. Weird. Unsure why hierarchy not functioning.
for name in ('sphinx', 'sphinx.sphinx.application'):
logging.getLogger(name).setLevel(logging.ERROR)
# App API seems to work on all versions so far.
app = Sphinx(
srcdir=srcdir,
confdir=None,
outdir=dstdir,
doctreedir=doctreedir,
buildername='html',
)
# Might as well load the conf file here too.
if load_extensions:
real_conf = load_conf(srcdir)
finally:
for d in (srcdir, dstdir, doctreedir):
# Only remove empty dirs; non-empty dirs are implicitly something
# that existed before we ran, and should not be touched.
try:
os.rmdir(d)
except OSError:
pass
setup(app)
# Mock out the config within. More assumptions by Sphinx :(
# TODO: just use real config and overlay what truly needs changing? is that
# feasible given the rest of the weird ordering we have to do? If it is,
# maybe just literally slap this over the return value of load_conf()...
config = {
'releases_release_uri': 'foo_%s',
'releases_issue_uri': 'bar_%s',
'releases_debug': False,
'master_doc': 'index',
}
# Allow tinkering with document filename
if 'docname' in kwargs:
app.env.temp_data['docname'] = kwargs.pop('docname')
# Allow config overrides via kwargs
for name in kwargs:
config['releases_{}'.format(name)] = kwargs[name]
# Stitch together as the sphinx app init() usually does w/ real conf files
app.config._raw_config = config
# init_values() requires a 'warn' runner on Sphinx 1.3-1.6, so if we seem
# to be hitting arity errors, give it a dummy such callable. Hopefully
# calling twice doesn't introduce any wacko state issues :(
try:
app.config.init_values()
except TypeError: # boy I wish Python had an ArityError or w/e
app.config.init_values(lambda x: x)
# Initialize extensions (the internal call to this happens at init time,
# which of course had no valid config yet here...)
if load_extensions:
for extension in real_conf.get('extensions', []):
# But don't set up ourselves again, that causes errors
if extension == 'releases':
continue
app.setup_extension(extension)
return app
def changelog2dict(changelog):
"""
Helper turning internal list-o-releases structure into a dict.
See `parse_changelog` docstring for return value details.
"""
return {r['obj'].number: r['entries'] for r in changelog}
|
bitprophet/releases
|
releases/util.py
|
make_app
|
python
|
def make_app(**kwargs):
srcdir = kwargs.pop('srcdir', mkdtemp())
dstdir = kwargs.pop('dstdir', mkdtemp())
doctreedir = kwargs.pop('doctreedir', mkdtemp())
load_extensions = kwargs.pop('load_extensions', False)
real_conf = None
try:
# Sphinx <1.6ish
Sphinx._log = lambda self, message, wfile, nonl=False: None
# Sphinx >=1.6ish. Technically still lets Very Bad Things through,
# unlike the total muting above, but probably OK.
# NOTE: used to just do 'sphinx' but that stopped working, even on
# sphinx 1.6.x. Weird. Unsure why hierarchy not functioning.
for name in ('sphinx', 'sphinx.sphinx.application'):
logging.getLogger(name).setLevel(logging.ERROR)
# App API seems to work on all versions so far.
app = Sphinx(
srcdir=srcdir,
confdir=None,
outdir=dstdir,
doctreedir=doctreedir,
buildername='html',
)
# Might as well load the conf file here too.
if load_extensions:
real_conf = load_conf(srcdir)
finally:
for d in (srcdir, dstdir, doctreedir):
# Only remove empty dirs; non-empty dirs are implicitly something
# that existed before we ran, and should not be touched.
try:
os.rmdir(d)
except OSError:
pass
setup(app)
# Mock out the config within. More assumptions by Sphinx :(
# TODO: just use real config and overlay what truly needs changing? is that
# feasible given the rest of the weird ordering we have to do? If it is,
# maybe just literally slap this over the return value of load_conf()...
config = {
'releases_release_uri': 'foo_%s',
'releases_issue_uri': 'bar_%s',
'releases_debug': False,
'master_doc': 'index',
}
# Allow tinkering with document filename
if 'docname' in kwargs:
app.env.temp_data['docname'] = kwargs.pop('docname')
# Allow config overrides via kwargs
for name in kwargs:
config['releases_{}'.format(name)] = kwargs[name]
# Stitch together as the sphinx app init() usually does w/ real conf files
app.config._raw_config = config
# init_values() requires a 'warn' runner on Sphinx 1.3-1.6, so if we seem
# to be hitting arity errors, give it a dummy such callable. Hopefully
# calling twice doesn't introduce any wacko state issues :(
try:
app.config.init_values()
except TypeError: # boy I wish Python had an ArityError or w/e
app.config.init_values(lambda x: x)
# Initialize extensions (the internal call to this happens at init time,
# which of course had no valid config yet here...)
if load_extensions:
for extension in real_conf.get('extensions', []):
# But don't set up ourselves again, that causes errors
if extension == 'releases':
continue
app.setup_extension(extension)
return app
|
Create a dummy Sphinx app, filling in various hardcoded assumptions.
For example, Sphinx assumes the existence of various source/dest
directories, even if you're only calling internals that never generate (or
sometimes, even read!) on-disk files. This function creates safe temp
directories for these instances.
It also neuters Sphinx's internal logging, which otherwise causes verbosity
in one's own test output and/or debug logs.
Finally, it does load the given srcdir's ``conf.py``, but only to read
specific bits like ``extensions`` (if requested); most of it is ignored.
All args are stored in a single ``**kwargs``. Aside from the params listed
below (all of which are optional), all kwargs given are turned into
'releases_xxx' config settings; e.g. ``make_app(foo='bar')`` is like
setting ``releases_foo = 'bar'`` in ``conf.py``.
:param str docname:
Override the document name used (mostly for internal testing).
:param str srcdir:
Sphinx source directory path.
:param str dstdir:
Sphinx dest directory path.
:param str doctreedir:
Sphinx doctree directory path.
:param bool load_extensions:
Whether to load the real ``conf.py`` and setup any extensions it
configures. Default: ``False``.
:returns: A Sphinx ``Application`` instance.
.. versionchanged:: 1.6
Added the ``load_extensions`` kwarg.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/util.py#L210-L318
|
[
"def setup(app):\n for key, default in (\n # Issue base URI setting: releases_issue_uri\n # E.g. 'https://github.com/fabric/fabric/issues/'\n ('issue_uri', None),\n # Release-tag base URI setting: releases_release_uri\n # E.g. 'https://github.com/fabric/fabric/tree/'\n ('release_uri', None),\n # Convenience Github version of above\n ('github_path', None),\n # Which document to use as the changelog\n ('document_name', ['changelog']),\n # Debug output\n ('debug', False),\n # Whether to enable linear history during 0.x release timeline\n # TODO: flip this to True by default in our 2.0 release\n ('unstable_prehistory', False),\n ):\n app.add_config_value(\n name='releases_{}'.format(key), default=default, rebuild='html'\n )\n # if a string is given for `document_name`, convert it to a list\n # done to maintain backwards compatibility\n # https://stackoverflow.com/questions/1303243/how-to-find-out-if-a-python-object-is-a-string\n PY2 = sys.version_info[0] == 2\n if PY2:\n string_types = (basestring,)\n else:\n string_types = (str,)\n\n if isinstance(app.config.releases_document_name, string_types):\n app.config.releases_document_name = [app.config.releases_document_name]\n\n # Register intermediate roles\n for x in list(ISSUE_TYPES) + ['issue']:\n add_role(app, x, issues_role)\n add_role(app, 'release', release_role)\n # Hook in our changelog transmutation at appropriate step\n app.connect('doctree-read', generate_changelog)\n\n # identifies the version of our extension\n return {'version': __version__}\n",
"def load_conf(srcdir):\n \"\"\"\n Load ``conf.py`` from given ``srcdir``.\n\n :returns: Dictionary derived from the conf module.\n \"\"\"\n path = os.path.join(srcdir, 'conf.py')\n mylocals = {'__file__': path}\n with open(path) as fd:\n exec(fd.read(), mylocals)\n return mylocals\n"
] |
"""
Utility functions, such as helpers for standalone changelog parsing.
"""
import logging
import os
from tempfile import mkdtemp
import sphinx
from docutils.core import Publisher
from docutils.io import NullOutput
from docutils.nodes import bullet_list
from sphinx.application import Sphinx # not exposed at top level
try:
from sphinx.io import (
SphinxStandaloneReader, SphinxFileInput, SphinxDummyWriter,
)
except ImportError:
# NOTE: backwards compat with Sphinx 1.3
from sphinx.environment import (
SphinxStandaloneReader, SphinxFileInput, SphinxDummyWriter,
)
# sphinx_domains is only in Sphinx 1.5+, but is presumably necessary from then
# onwards.
try:
from sphinx.util.docutils import sphinx_domains
except ImportError:
# Just dummy it up.
from contextlib import contextmanager
@contextmanager
def sphinx_domains(env):
yield
from . import construct_releases, setup
def parse_changelog(path, **kwargs):
"""
Load and parse changelog file from ``path``, returning data structures.
This function does not alter any files on disk; it is solely for
introspecting a Releases ``changelog.rst`` and programmatically answering
questions like "are there any unreleased bugfixes for the 2.3 line?" or
"what was included in release 1.2.1?".
For example, answering the above questions is as simple as::
changelog = parse_changelog("/path/to/changelog")
print("Unreleased issues for 2.3.x: {}".format(changelog['2.3']))
print("Contents of v1.2.1: {}".format(changelog['1.2.1']))
Aside from the documented arguments, any additional keyword arguments are
passed unmodified into an internal `get_doctree` call (which then passes
them to `make_app`).
:param str path: A relative or absolute file path string.
:returns:
A dict whose keys map to lists of ``releases.models.Issue`` objects, as
follows:
- Actual releases are full version number keys, such as ``"1.2.1"`` or
``"2.0.0"``.
- Unreleased bugs (or bug-like issues; see the Releases docs) are
stored in minor-release buckets, e.g. ``"1.2"`` or ``"2.0"``.
- Unreleased features (or feature-like issues) are found in
``"unreleased_N_feature"``, where ``N`` is one of the major release
families (so, a changelog spanning only 1.x will only have
``unreleased_1_feature``, whereas one with 1.x and 2.x releases will
have ``unreleased_1_feature`` and ``unreleased_2_feature``, etc).
.. versionchanged:: 1.6
Added support for passing kwargs to `get_doctree`/`make_app`.
"""
app, doctree = get_doctree(path, **kwargs)
# Have to semi-reproduce the 'find first bullet list' bit from main code,
# which is unfortunately side-effect-heavy (thanks to Sphinx plugin
# design).
first_list = None
for node in doctree[0]:
if isinstance(node, bullet_list):
first_list = node
break
# Initial parse into the structures Releases finds useful internally
releases, manager = construct_releases(first_list.children, app)
ret = changelog2dict(releases)
# Stitch them together into something an end-user would find better:
# - nuke unreleased_N.N_Y as their contents will be represented in the
# per-line buckets
for key in ret.copy():
if key.startswith('unreleased'):
del ret[key]
for family in manager:
# - remove unreleased_bugfix, as they are accounted for in the per-line
# buckets too. No need to store anywhere.
manager[family].pop('unreleased_bugfix', None)
# - bring over each major family's unreleased_feature as
# unreleased_N_feature
unreleased = manager[family].pop('unreleased_feature', None)
if unreleased is not None:
ret['unreleased_{}_feature'.format(family)] = unreleased
# - bring over all per-line buckets from manager (flattening)
# Here, all that's left in the per-family bucket should be lines, not
# unreleased_*
ret.update(manager[family])
return ret
def get_doctree(path, **kwargs):
"""
Obtain a Sphinx doctree from the RST file at ``path``.
Performs no Releases-specific processing; this code would, ideally, be in
Sphinx itself, but things there are pretty tightly coupled. So we wrote
this.
Any additional kwargs are passed unmodified into an internal `make_app`
call.
:param str path: A relative or absolute file path string.
:returns:
A two-tuple of the generated ``sphinx.application.Sphinx`` app and the
doctree (a ``docutils.document`` object).
.. versionchanged:: 1.6
Added support for passing kwargs to `make_app`.
"""
root, filename = os.path.split(path)
docname, _ = os.path.splitext(filename)
# TODO: this only works for top level changelog files (i.e. ones where
# their dirname is the project/doc root)
app = make_app(srcdir=root, **kwargs)
# Create & init a BuildEnvironment. Mm, tasty side effects.
app._init_env(freshenv=True)
env = app.env
# More arity/API changes: Sphinx 1.3/1.4-ish require one to pass in the app
# obj in BuildEnvironment.update(); modern Sphinx performs that inside
# Application._init_env() (which we just called above) and so that kwarg is
# removed from update(). EAFP.
kwargs = dict(
config=app.config,
srcdir=root,
doctreedir=app.doctreedir,
app=app,
)
try:
env.update(**kwargs)
except TypeError:
# Assume newer Sphinx w/o an app= kwarg
del kwargs['app']
env.update(**kwargs)
# Code taken from sphinx.environment.read_doc; easier to manually call
# it with a working Environment object, instead of doing more random crap
# to trick the higher up build system into thinking our single changelog
# document was "updated".
env.temp_data['docname'] = docname
env.app = app
# NOTE: SphinxStandaloneReader API changed in 1.4 :(
reader_kwargs = {
'app': app,
'parsers': env.config.source_parsers,
}
if sphinx.version_info[:2] < (1, 4):
del reader_kwargs['app']
# This monkeypatches (!!!) docutils to 'inject' all registered Sphinx
# domains' roles & so forth. Without this, rendering the doctree lacks
# almost all Sphinx magic, including things like :ref: and :doc:!
with sphinx_domains(env):
try:
reader = SphinxStandaloneReader(**reader_kwargs)
except TypeError:
# If we import from io, this happens automagically, not in API
del reader_kwargs['parsers']
reader = SphinxStandaloneReader(**reader_kwargs)
pub = Publisher(reader=reader,
writer=SphinxDummyWriter(),
destination_class=NullOutput)
pub.set_components(None, 'restructuredtext', None)
pub.process_programmatic_settings(None, env.settings, None)
# NOTE: docname derived higher up, from our given path
src_path = env.doc2path(docname)
source = SphinxFileInput(
app,
env,
source=None,
source_path=src_path,
encoding=env.config.source_encoding,
)
pub.source = source
pub.settings._source = src_path
pub.set_destination(None, None)
pub.publish()
return app, pub.document
def load_conf(srcdir):
"""
Load ``conf.py`` from given ``srcdir``.
:returns: Dictionary derived from the conf module.
"""
path = os.path.join(srcdir, 'conf.py')
mylocals = {'__file__': path}
with open(path) as fd:
exec(fd.read(), mylocals)
return mylocals
def changelog2dict(changelog):
"""
Helper turning internal list-o-releases structure into a dict.
See `parse_changelog` docstring for return value details.
"""
return {r['obj'].number: r['entries'] for r in changelog}
|
bitprophet/releases
|
releases/__init__.py
|
_log
|
python
|
def _log(txt, config):
if config.releases_debug:
sys.stderr.write(str(txt) + "\n")
sys.stderr.flush()
|
Log debug output if debug setting is on.
Intended to be partial'd w/ config at top of functions. Meh.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L15-L23
| null |
import itertools
import re
import sys
from functools import partial
from docutils import nodes, utils
from docutils.parsers.rst import roles
import six
from .models import Issue, ISSUE_TYPES, Release, Version, Spec
from .line_manager import LineManager
from ._version import __version__
def issue_nodelist(name, identifier=None):
which = '[<span style="color: #%s;">%s</span>]' % (
ISSUE_TYPES[name], name.capitalize()
)
signifier = [nodes.raw(text=which, format='html')]
id_nodelist = [nodes.inline(text=" "), identifier] if identifier else []
trail = [] if identifier else [nodes.inline(text=" ")]
return signifier + id_nodelist + [nodes.inline(text=":")] + trail
release_line_re = re.compile(r'^(\d+\.\d+)\+$') # e.g. '1.2+'
def scan_for_spec(keyword):
"""
Attempt to return some sort of Spec from given keyword value.
Returns None if one could not be derived.
"""
# Both 'spec' formats are wrapped in parens, discard
keyword = keyword.lstrip('(').rstrip(')')
# First, test for intermediate '1.2+' style
matches = release_line_re.findall(keyword)
if matches:
return Spec(">={}".format(matches[0]))
# Failing that, see if Spec can make sense of it
try:
return Spec(keyword)
# I've only ever seen Spec fail with ValueError.
except ValueError:
return None
def issues_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Use: :issue|bug|feature|support:`ticket_number`
When invoked as :issue:, turns into just a "#NN" hyperlink to
`releases_issue_uri`.
When invoked otherwise, turns into "[Type] <#NN hyperlink>: ".
Spaces present in the "ticket number" are used as fields for keywords
(major, backported) and/or specs (e.g. '>=1.0'). This data is removed &
used when constructing the object.
May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink.
"""
parts = utils.unescape(text).split()
issue_no = parts.pop(0)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
if issue_no not in ('-', '0'):
ref = None
if config.releases_issue_uri:
# TODO: deal with % vs .format()
ref = config.releases_issue_uri % issue_no
elif config.releases_github_path:
ref = "https://github.com/{}/issues/{}".format(
config.releases_github_path, issue_no)
# Only generate a reference/link if we were able to make a URI
if ref:
identifier = nodes.reference(
rawtext, '#' + issue_no, refuri=ref, **options
)
# Otherwise, just make it regular text
else:
identifier = nodes.raw(
rawtext=rawtext, text='#' + issue_no, format='html',
**options
)
else:
identifier = None
issue_no = None # So it doesn't gum up dupe detection later
# Additional 'new-style changelog' stuff
if name in ISSUE_TYPES:
nodelist = issue_nodelist(name, identifier)
spec = None
keyword = None
# TODO: sanity checks re: e.g. >2 parts, >1 instance of keywords, >1
# instance of specs, etc.
for part in parts:
maybe_spec = scan_for_spec(part)
if maybe_spec:
spec = maybe_spec
else:
if part in ('backported', 'major'):
keyword = part
else:
err = "Gave unknown keyword {!r} for issue no. {}"
raise ValueError(err.format(keyword, issue_no))
# Create temporary node w/ data & final nodes to publish
node = Issue(
number=issue_no,
type_=name,
nodelist=nodelist,
backported=(keyword == 'backported'),
major=(keyword == 'major'),
spec=spec,
)
return [node], []
# Return old style info for 'issue' for older changelog entries
else:
return [identifier], []
def release_nodes(text, slug, date, config):
# Doesn't seem possible to do this "cleanly" (i.e. just say "make me a
# title and give it these HTML attributes during render time) so...fuckit.
# We were already doing fully raw elements elsewhere anyway. And who cares
# about a PDF of a changelog? :x
uri = None
if config.releases_release_uri:
# TODO: % vs .format()
uri = config.releases_release_uri % slug
elif config.releases_github_path:
uri = "https://github.com/{}/tree/{}".format(
config.releases_github_path, slug)
# Only construct link tag if user actually configured release URIs somehow
if uri:
link = '<a class="reference external" href="{}">{}</a>'.format(
uri, text,
)
else:
link = text
datespan = ''
if date:
datespan = ' <span style="font-size: 75%;">{}</span>'.format(date)
header = '<h2 style="margin-bottom: 0.3em;">{}{}</h2>'.format(
link, datespan)
return nodes.section('',
nodes.raw(rawtext='', text=header, format='html'),
ids=[text]
)
year_arg_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
def release_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Invoked as :release:`N.N.N <YYYY-MM-DD>`.
Turns into useful release header + link to GH tree for the tag.
"""
# Make sure year has been specified
match = year_arg_re.match(text)
if not match:
msg = inliner.reporter.error("Must specify release date!")
return [inliner.problematic(rawtext, rawtext, msg)], [msg]
number, date = match.group(1), match.group(2)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
nodelist = [release_nodes(number, number, date, config)]
# Return intermediate node
node = Release(number=number, date=date, nodelist=nodelist)
return [node], []
def generate_unreleased_entry(header, line, issues, manager, app):
log = partial(_log, config=app.config)
nodelist = [release_nodes(
header,
# TODO: should link to master for newest family and...what
# exactly, for the others? Expectation isn't necessarily to
# have a branch per family? Or is there? Maybe there must be..
'master',
None,
app.config
)]
log("Creating {!r} faux-release with {!r}".format(line, issues))
return {
'obj': Release(number=line, date=None, nodelist=nodelist),
'entries': issues,
}
def append_unreleased_entries(app, manager, releases):
"""
Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored.
"""
for family, lines in six.iteritems(manager):
for type_ in ('bugfix', 'feature'):
bucket = 'unreleased_{}'.format(type_)
if bucket not in lines: # Implies unstable prehistory + 0.x fam
continue
issues = lines[bucket]
fam_prefix = "{}.x ".format(family) if len(manager) > 1 else ""
header = "Next {}{} release".format(fam_prefix, type_)
line = "unreleased_{}.x_{}".format(family, type_)
releases.append(
generate_unreleased_entry(header, line, issues, manager, app)
)
def reorder_release_entries(releases):
"""
Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc.
"""
order = {'feature': 0, 'bug': 1, 'support': 2}
for release in releases:
entries = release['entries'][:]
release['entries'] = sorted(entries, key=lambda x: order[x.type])
def construct_entry_with_release(focus, issues, manager, log, releases, rest):
"""
Releases 'eat' the entries in their line's list and get added to the
final data structure. They also inform new release-line 'buffers'.
Release lines, once the release obj is removed, should be empty or a
comma-separated list of issue numbers.
"""
log("release for line %r" % focus.minor)
# Check for explicitly listed issues first
explicit = None
if rest[0].children:
explicit = [x.strip() for x in rest[0][0].split(',')]
# Do those by themselves since they override all other logic
if explicit:
log("Explicit issues requested: %r" % (explicit,))
# First scan global issue dict, dying if not found
missing = [i for i in explicit if i not in issues]
if missing:
raise ValueError(
"Couldn't find issue(s) #{} in the changelog!".format(
', '.join(missing)))
# Obtain the explicitly named issues from global list
entries = []
for i in explicit:
for flattened_issue_item in itertools.chain(issues[i]):
entries.append(flattened_issue_item)
# Create release
log("entries in this release: %r" % (entries,))
releases.append({
'obj': focus,
'entries': entries,
})
# Introspect these entries to determine which buckets they should get
# removed from (it's not "all of them"!)
for obj in entries:
if obj.type == 'bug':
# Major bugfix: remove from unreleased_feature
if obj.major:
log("Removing #%s from unreleased" % obj.number)
# TODO: consider making a LineManager method somehow
manager[focus.family]['unreleased_feature'].remove(obj)
# Regular bugfix: remove from bucket for this release's
# line + unreleased_bugfix
else:
if obj in manager[focus.family]['unreleased_bugfix']:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_bugfix'].remove(obj)
if obj in manager[focus.family][focus.minor]:
log("Removing #%s from %s" % (obj.number, focus.minor))
manager[focus.family][focus.minor].remove(obj)
# Regular feature/support: remove from unreleased_feature
# Backported feature/support: remove from bucket for this
# release's line (if applicable) + unreleased_feature
else:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_feature'].remove(obj)
if obj in manager[focus.family].get(focus.minor, []):
manager[focus.family][focus.minor].remove(obj)
# Implicit behavior otherwise
else:
# Unstable prehistory -> just dump 'unreleased' and continue
if manager.unstable_prehistory:
# TODO: need to continue making LineManager actually OO, i.e. do
# away with the subdicts + keys, move to sub-objects with methods
# answering questions like "what should I give you for a release"
# or whatever
log("in unstable prehistory, dumping 'unreleased'")
releases.append({
'obj': focus,
# NOTE: explicitly dumping 0, not focus.family, since this
# might be the last pre-historical release and thus not 0.x
'entries': manager[0]['unreleased'][:],
})
manager[0]['unreleased'] = []
# If this isn't a 0.x release, it signals end of prehistory, make a
# new release bucket (as is also done below in regular behavior).
# Also acts like a sentinel that prehistory is over.
if focus.family != 0:
manager[focus.family][focus.minor] = []
# Regular behavior from here
else:
# New release line/branch detected. Create it & dump unreleased
# features.
if focus.minor not in manager[focus.family]:
log("not seen prior, making feature release & bugfix bucket")
manager[focus.family][focus.minor] = []
# TODO: this used to explicitly say "go over everything in
# unreleased_feature and dump if it's feature, support or major
# bug". But what the hell else would BE in unreleased_feature?
# Why not just dump the whole thing??
#
# Dump only the items in the bucket whose family this release
# object belongs to, i.e. 1.5.0 should only nab the 1.0
# family's unreleased feature items.
releases.append({
'obj': focus,
'entries': manager[focus.family]['unreleased_feature'][:],
})
manager[focus.family]['unreleased_feature'] = []
# Existing line -> empty out its bucket into new release.
# Skip 'major' bugs as those "belong" to the next release (and will
# also be in 'unreleased_feature' - so safe to nuke the entire
# line)
else:
log("pre-existing, making bugfix release")
# TODO: as in other branch, I don't get why this wasn't just
# dumping the whole thing - why would major bugs be in the
# regular bugfix buckets?
entries = manager[focus.family][focus.minor][:]
releases.append({'obj': focus, 'entries': entries})
manager[focus.family][focus.minor] = []
# Clean out the items we just released from
# 'unreleased_bugfix'. (Can't nuke it because there might
# be some unreleased bugs for other release lines.)
for x in entries:
if x in manager[focus.family]['unreleased_bugfix']:
manager[focus.family]['unreleased_bugfix'].remove(x)
def construct_entry_without_release(focus, issues, manager, log, rest):
# Handle rare-but-valid non-issue-attached line items, which are
# always bugs. (They are their own description.)
if not isinstance(focus, Issue):
# First, sanity check for potential mistakes resulting in an issue node
# being buried within something else.
buried = focus.traverse(Issue)
if buried:
msg = """
Found issue node ({!r}) buried inside another node:
{}
Please double-check your ReST syntax! There is probably text in the above
output that will show you which part of your changelog to look at.
For example, indentation problems can accidentally generate nested definition
lists.
"""
raise ValueError(msg.format(buried[0], str(buried[0].parent)))
# OK, it looks legit - make it a bug.
log("Found line item w/ no real issue object, creating bug")
nodelist = issue_nodelist('bug')
# Skip nodelist entirely if we're in unstable prehistory -
# classification doesn't matter there.
if manager.unstable_prehistory:
nodelist = []
# Undo the 'pop' from outer scope. TODO: rework things so we don't have
# to do this dumb shit uggggh
rest[0].insert(0, focus)
focus = Issue(
type_='bug',
nodelist=nodelist,
description=rest,
)
else:
focus.attributes['description'] = rest
# Add to global list (for use by explicit releases) or die trying
issues[focus.number] = issues.get(focus.number, []) + [focus]
# Add to per-release bugfix lines and/or unreleased bug/feature buckets, as
# necessary.
# TODO: suspect all of add_to_manager can now live in the manager; most of
# Release's methods should probably go that way
if manager.unstable_prehistory:
log("Unstable prehistory -> adding to 0.x unreleased bucket")
manager[0]['unreleased'].append(focus)
else:
log("Adding to release line manager")
focus.add_to_manager(manager)
def handle_upcoming_major_release(entries, manager):
# Short-circuit if the future holds nothing for us
if not entries:
return
# Short-circuit if we're in the middle of a block of releases, only the
# last release before a bunch of issues, should be taking any action.
if isinstance(entries[0], Release):
return
# Iterate through entries til we find the next Release or set of Releases
next_releases = []
for index, obj in enumerate(entries):
if isinstance(obj, Release):
next_releases.append(obj)
# Non-empty next_releases + encountered a non-release = done w/ release
# block.
elif next_releases:
break
# Examine result: is a major release present? If so, add its major number
# to the line manager!
for obj in next_releases:
# TODO: update when Release gets tied closer w/ Version
version = Version(obj.number)
if version.minor == 0 and version.patch == 0:
manager.add_family(obj.family)
def handle_first_release_line(entries, manager):
"""
Set up initial line-manager entry for first encountered release line.
To be called at start of overall process; afterwards, subsequent major
lines are generated by `handle_upcoming_major_release`.
"""
# It's remotely possible the changelog is totally empty...
if not entries:
return
# Obtain (short-circuiting) first Release obj.
first_release = None
for obj in entries:
if isinstance(obj, Release):
first_release = obj
break
# It's also possible it's non-empty but has no releases yet.
if first_release:
manager.add_family(obj.family)
# If God did not exist, man would be forced to invent him.
else:
manager.add_family(0)
def construct_releases(entries, app):
log = partial(_log, config=app.config)
# Walk from back to front, consuming entries & copying them into
# per-release buckets as releases are encountered. Store releases in order.
releases = []
# Release lines, to be organized by major releases, then by major+minor,
# alongside per-major-release 'unreleased' bugfix/feature buckets.
# NOTE: With exception of unstable_prehistory=True, which triggers use of a
# separate, undifferentiated 'unreleased' bucket (albeit still within the
# '0' major line family).
manager = LineManager(app)
# Also keep a master hash of issues by number to detect duplicates & assist
# in explicitly defined release lists.
issues = {}
reversed_entries = list(reversed(entries))
# For the lookahead, so we're not doing this stripping O(n) times.
# TODO: probs just merge the two into e.g. a list of 2-tuples of "actual
# entry obj + rest"?
stripped_entries = [x[0][0] for x in reversed_entries]
# Perform an initial lookahead to prime manager with the 1st major release
handle_first_release_line(stripped_entries, manager)
# Start crawling...
for index, obj in enumerate(reversed_entries):
# Issue object is always found in obj (LI) index 0 (first, often only
# P) and is the 1st item within that (index 0 again).
# Preserve all other contents of 'obj'.
focus = obj[0].pop(0)
rest = obj
log(repr(focus))
# Releases 'eat' the entries in their line's list and get added to the
# final data structure. They also inform new release-line 'buffers'.
# Release lines, once the release obj is removed, should be empty or a
# comma-separated list of issue numbers.
if isinstance(focus, Release):
construct_entry_with_release(
focus, issues, manager, log, releases, rest
)
# After each release is handled, look ahead to see if we're
# entering "last stretch before a major release". If so,
# pre-emptively update the line-manager so upcoming features are
# correctly sorted into that major release by default (re: logic in
# Release.add_to_manager)
handle_upcoming_major_release(
stripped_entries[index + 1:], manager
)
# Entries get copied into release line buckets as follows:
# * Features and support go into 'unreleased_feature' for use in new
# feature releases.
# * Bugfixes go into all release lines (so they can be printed in >1
# bugfix release as appropriate) as well as 'unreleased_bugfix' (so
# they can be displayed prior to release'). Caveats include bugs marked
# 'major' (they go into unreleased_feature instead) or with 'N.N+'
# (meaning they only go into release line buckets for that release and
# up.)
# * Support/feature entries marked as 'backported' go into all
# release lines as well, on the assumption that they were released to
# all active branches.
# * The 'rest' variable (which here is the bug description, vitally
# important!) is preserved by stuffing it into the focus (issue)
# object - it will get unpacked by construct_nodes() later.
else:
construct_entry_without_release(focus, issues, manager, log, rest)
if manager.unstable_prehistory:
releases.append(generate_unreleased_entry(
header="Next release",
line="unreleased",
issues=manager[0]['unreleased'],
manager=manager,
app=app,
))
else:
append_unreleased_entries(app, manager, releases)
reorder_release_entries(releases)
return releases, manager
def construct_nodes(releases):
result = []
# Reverse the list again so the final display is newest on top
for d in reversed(releases):
if not d['entries']:
continue
obj = d['obj']
entries = []
for entry in d['entries']:
# Use nodes.Node.deepcopy to deepcopy the description
# node. If this is not done, multiple references to the same
# object (e.g. a reference object in the description of #649, which
# is then copied into 2 different release lists) will end up in the
# doctree, which makes subsequent parse steps very angry (index()
# errors).
desc = entry['description'].deepcopy()
# Additionally, expand any other issue roles found in the
# description - sometimes we refer to related issues inline. (They
# can't be left as issue() objects at render time since that's
# undefined.)
# Use [:] slicing to avoid mutation during the loops.
for index, node in enumerate(desc[:]):
for subindex, subnode in enumerate(node[:]):
if isinstance(subnode, Issue):
lst = subnode['nodelist']
desc[index][subindex:subindex + 1] = lst
# Rework this entry to insert the now-rendered issue nodes in front
# of the 1st paragraph of the 'description' nodes (which should be
# the preserved LI + nested paragraph-or-more from original
# markup.)
# FIXME: why is there no "prepend a list" method?
for node in reversed(entry['nodelist']):
desc[0].insert(0, node)
entries.append(desc)
# Entry list
list_ = nodes.bullet_list('', *entries)
# Insert list into release nodelist (as it's a section)
obj['nodelist'][0].append(list_)
# Release header
header = nodes.paragraph('', '', *obj['nodelist'])
result.extend(header)
return result
class BulletListVisitor(nodes.NodeVisitor):
def __init__(self, document, app):
nodes.NodeVisitor.__init__(self, document)
self.found_changelog = False
self.app = app
def visit_bullet_list(self, node):
# The first found bullet list (which should be the first one at the top
# level of the document) is the changelog.
if not self.found_changelog:
self.found_changelog = True
# Walk + parse into release mapping
releases, _ = construct_releases(node.children, self.app)
# Construct new set of nodes to replace the old, and we're done
node.replace_self(construct_nodes(releases))
def unknown_visit(self, node):
pass
def generate_changelog(app, doctree):
# Don't scan/mutate documents that don't match the configured document name
# (which by default is ['changelog.rst', ]).
if app.env.docname not in app.config.releases_document_name:
return
# Find the first bullet-list node & replace it with our organized/parsed
# elements.
changelog_visitor = BulletListVisitor(doctree, app)
doctree.walk(changelog_visitor)
def setup(app):
for key, default in (
# Issue base URI setting: releases_issue_uri
# E.g. 'https://github.com/fabric/fabric/issues/'
('issue_uri', None),
# Release-tag base URI setting: releases_release_uri
# E.g. 'https://github.com/fabric/fabric/tree/'
('release_uri', None),
# Convenience Github version of above
('github_path', None),
# Which document to use as the changelog
('document_name', ['changelog']),
# Debug output
('debug', False),
# Whether to enable linear history during 0.x release timeline
# TODO: flip this to True by default in our 2.0 release
('unstable_prehistory', False),
):
app.add_config_value(
name='releases_{}'.format(key), default=default, rebuild='html'
)
# if a string is given for `document_name`, convert it to a list
# done to maintain backwards compatibility
# https://stackoverflow.com/questions/1303243/how-to-find-out-if-a-python-object-is-a-string
PY2 = sys.version_info[0] == 2
if PY2:
string_types = (basestring,)
else:
string_types = (str,)
if isinstance(app.config.releases_document_name, string_types):
app.config.releases_document_name = [app.config.releases_document_name]
# Register intermediate roles
for x in list(ISSUE_TYPES) + ['issue']:
add_role(app, x, issues_role)
add_role(app, 'release', release_role)
# Hook in our changelog transmutation at appropriate step
app.connect('doctree-read', generate_changelog)
# identifies the version of our extension
return {'version': __version__}
def add_role(app, name, role_obj):
# This (introspecting docutils.parser.rst.roles._roles) is the same trick
# Sphinx uses to emit warnings about double-registering; it's a PITA to try
# and configure the app early on so it doesn't emit those warnings, so we
# instead just...don't double-register. Meh.
if name not in roles._roles:
app.add_role(name, role_obj)
|
bitprophet/releases
|
releases/__init__.py
|
scan_for_spec
|
python
|
def scan_for_spec(keyword):
# Both 'spec' formats are wrapped in parens, discard
keyword = keyword.lstrip('(').rstrip(')')
# First, test for intermediate '1.2+' style
matches = release_line_re.findall(keyword)
if matches:
return Spec(">={}".format(matches[0]))
# Failing that, see if Spec can make sense of it
try:
return Spec(keyword)
# I've only ever seen Spec fail with ValueError.
except ValueError:
return None
|
Attempt to return some sort of Spec from given keyword value.
Returns None if one could not be derived.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L38-L55
| null |
import itertools
import re
import sys
from functools import partial
from docutils import nodes, utils
from docutils.parsers.rst import roles
import six
from .models import Issue, ISSUE_TYPES, Release, Version, Spec
from .line_manager import LineManager
from ._version import __version__
def _log(txt, config):
"""
Log debug output if debug setting is on.
Intended to be partial'd w/ config at top of functions. Meh.
"""
if config.releases_debug:
sys.stderr.write(str(txt) + "\n")
sys.stderr.flush()
def issue_nodelist(name, identifier=None):
which = '[<span style="color: #%s;">%s</span>]' % (
ISSUE_TYPES[name], name.capitalize()
)
signifier = [nodes.raw(text=which, format='html')]
id_nodelist = [nodes.inline(text=" "), identifier] if identifier else []
trail = [] if identifier else [nodes.inline(text=" ")]
return signifier + id_nodelist + [nodes.inline(text=":")] + trail
release_line_re = re.compile(r'^(\d+\.\d+)\+$') # e.g. '1.2+'
def issues_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Use: :issue|bug|feature|support:`ticket_number`
When invoked as :issue:, turns into just a "#NN" hyperlink to
`releases_issue_uri`.
When invoked otherwise, turns into "[Type] <#NN hyperlink>: ".
Spaces present in the "ticket number" are used as fields for keywords
(major, backported) and/or specs (e.g. '>=1.0'). This data is removed &
used when constructing the object.
May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink.
"""
parts = utils.unescape(text).split()
issue_no = parts.pop(0)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
if issue_no not in ('-', '0'):
ref = None
if config.releases_issue_uri:
# TODO: deal with % vs .format()
ref = config.releases_issue_uri % issue_no
elif config.releases_github_path:
ref = "https://github.com/{}/issues/{}".format(
config.releases_github_path, issue_no)
# Only generate a reference/link if we were able to make a URI
if ref:
identifier = nodes.reference(
rawtext, '#' + issue_no, refuri=ref, **options
)
# Otherwise, just make it regular text
else:
identifier = nodes.raw(
rawtext=rawtext, text='#' + issue_no, format='html',
**options
)
else:
identifier = None
issue_no = None # So it doesn't gum up dupe detection later
# Additional 'new-style changelog' stuff
if name in ISSUE_TYPES:
nodelist = issue_nodelist(name, identifier)
spec = None
keyword = None
# TODO: sanity checks re: e.g. >2 parts, >1 instance of keywords, >1
# instance of specs, etc.
for part in parts:
maybe_spec = scan_for_spec(part)
if maybe_spec:
spec = maybe_spec
else:
if part in ('backported', 'major'):
keyword = part
else:
err = "Gave unknown keyword {!r} for issue no. {}"
raise ValueError(err.format(keyword, issue_no))
# Create temporary node w/ data & final nodes to publish
node = Issue(
number=issue_no,
type_=name,
nodelist=nodelist,
backported=(keyword == 'backported'),
major=(keyword == 'major'),
spec=spec,
)
return [node], []
# Return old style info for 'issue' for older changelog entries
else:
return [identifier], []
def release_nodes(text, slug, date, config):
# Doesn't seem possible to do this "cleanly" (i.e. just say "make me a
# title and give it these HTML attributes during render time) so...fuckit.
# We were already doing fully raw elements elsewhere anyway. And who cares
# about a PDF of a changelog? :x
uri = None
if config.releases_release_uri:
# TODO: % vs .format()
uri = config.releases_release_uri % slug
elif config.releases_github_path:
uri = "https://github.com/{}/tree/{}".format(
config.releases_github_path, slug)
# Only construct link tag if user actually configured release URIs somehow
if uri:
link = '<a class="reference external" href="{}">{}</a>'.format(
uri, text,
)
else:
link = text
datespan = ''
if date:
datespan = ' <span style="font-size: 75%;">{}</span>'.format(date)
header = '<h2 style="margin-bottom: 0.3em;">{}{}</h2>'.format(
link, datespan)
return nodes.section('',
nodes.raw(rawtext='', text=header, format='html'),
ids=[text]
)
year_arg_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
def release_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Invoked as :release:`N.N.N <YYYY-MM-DD>`.
Turns into useful release header + link to GH tree for the tag.
"""
# Make sure year has been specified
match = year_arg_re.match(text)
if not match:
msg = inliner.reporter.error("Must specify release date!")
return [inliner.problematic(rawtext, rawtext, msg)], [msg]
number, date = match.group(1), match.group(2)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
nodelist = [release_nodes(number, number, date, config)]
# Return intermediate node
node = Release(number=number, date=date, nodelist=nodelist)
return [node], []
def generate_unreleased_entry(header, line, issues, manager, app):
log = partial(_log, config=app.config)
nodelist = [release_nodes(
header,
# TODO: should link to master for newest family and...what
# exactly, for the others? Expectation isn't necessarily to
# have a branch per family? Or is there? Maybe there must be..
'master',
None,
app.config
)]
log("Creating {!r} faux-release with {!r}".format(line, issues))
return {
'obj': Release(number=line, date=None, nodelist=nodelist),
'entries': issues,
}
def append_unreleased_entries(app, manager, releases):
"""
Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored.
"""
for family, lines in six.iteritems(manager):
for type_ in ('bugfix', 'feature'):
bucket = 'unreleased_{}'.format(type_)
if bucket not in lines: # Implies unstable prehistory + 0.x fam
continue
issues = lines[bucket]
fam_prefix = "{}.x ".format(family) if len(manager) > 1 else ""
header = "Next {}{} release".format(fam_prefix, type_)
line = "unreleased_{}.x_{}".format(family, type_)
releases.append(
generate_unreleased_entry(header, line, issues, manager, app)
)
def reorder_release_entries(releases):
"""
Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc.
"""
order = {'feature': 0, 'bug': 1, 'support': 2}
for release in releases:
entries = release['entries'][:]
release['entries'] = sorted(entries, key=lambda x: order[x.type])
def construct_entry_with_release(focus, issues, manager, log, releases, rest):
"""
Releases 'eat' the entries in their line's list and get added to the
final data structure. They also inform new release-line 'buffers'.
Release lines, once the release obj is removed, should be empty or a
comma-separated list of issue numbers.
"""
log("release for line %r" % focus.minor)
# Check for explicitly listed issues first
explicit = None
if rest[0].children:
explicit = [x.strip() for x in rest[0][0].split(',')]
# Do those by themselves since they override all other logic
if explicit:
log("Explicit issues requested: %r" % (explicit,))
# First scan global issue dict, dying if not found
missing = [i for i in explicit if i not in issues]
if missing:
raise ValueError(
"Couldn't find issue(s) #{} in the changelog!".format(
', '.join(missing)))
# Obtain the explicitly named issues from global list
entries = []
for i in explicit:
for flattened_issue_item in itertools.chain(issues[i]):
entries.append(flattened_issue_item)
# Create release
log("entries in this release: %r" % (entries,))
releases.append({
'obj': focus,
'entries': entries,
})
# Introspect these entries to determine which buckets they should get
# removed from (it's not "all of them"!)
for obj in entries:
if obj.type == 'bug':
# Major bugfix: remove from unreleased_feature
if obj.major:
log("Removing #%s from unreleased" % obj.number)
# TODO: consider making a LineManager method somehow
manager[focus.family]['unreleased_feature'].remove(obj)
# Regular bugfix: remove from bucket for this release's
# line + unreleased_bugfix
else:
if obj in manager[focus.family]['unreleased_bugfix']:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_bugfix'].remove(obj)
if obj in manager[focus.family][focus.minor]:
log("Removing #%s from %s" % (obj.number, focus.minor))
manager[focus.family][focus.minor].remove(obj)
# Regular feature/support: remove from unreleased_feature
# Backported feature/support: remove from bucket for this
# release's line (if applicable) + unreleased_feature
else:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_feature'].remove(obj)
if obj in manager[focus.family].get(focus.minor, []):
manager[focus.family][focus.minor].remove(obj)
# Implicit behavior otherwise
else:
# Unstable prehistory -> just dump 'unreleased' and continue
if manager.unstable_prehistory:
# TODO: need to continue making LineManager actually OO, i.e. do
# away with the subdicts + keys, move to sub-objects with methods
# answering questions like "what should I give you for a release"
# or whatever
log("in unstable prehistory, dumping 'unreleased'")
releases.append({
'obj': focus,
# NOTE: explicitly dumping 0, not focus.family, since this
# might be the last pre-historical release and thus not 0.x
'entries': manager[0]['unreleased'][:],
})
manager[0]['unreleased'] = []
# If this isn't a 0.x release, it signals end of prehistory, make a
# new release bucket (as is also done below in regular behavior).
# Also acts like a sentinel that prehistory is over.
if focus.family != 0:
manager[focus.family][focus.minor] = []
# Regular behavior from here
else:
# New release line/branch detected. Create it & dump unreleased
# features.
if focus.minor not in manager[focus.family]:
log("not seen prior, making feature release & bugfix bucket")
manager[focus.family][focus.minor] = []
# TODO: this used to explicitly say "go over everything in
# unreleased_feature and dump if it's feature, support or major
# bug". But what the hell else would BE in unreleased_feature?
# Why not just dump the whole thing??
#
# Dump only the items in the bucket whose family this release
# object belongs to, i.e. 1.5.0 should only nab the 1.0
# family's unreleased feature items.
releases.append({
'obj': focus,
'entries': manager[focus.family]['unreleased_feature'][:],
})
manager[focus.family]['unreleased_feature'] = []
# Existing line -> empty out its bucket into new release.
# Skip 'major' bugs as those "belong" to the next release (and will
# also be in 'unreleased_feature' - so safe to nuke the entire
# line)
else:
log("pre-existing, making bugfix release")
# TODO: as in other branch, I don't get why this wasn't just
# dumping the whole thing - why would major bugs be in the
# regular bugfix buckets?
entries = manager[focus.family][focus.minor][:]
releases.append({'obj': focus, 'entries': entries})
manager[focus.family][focus.minor] = []
# Clean out the items we just released from
# 'unreleased_bugfix'. (Can't nuke it because there might
# be some unreleased bugs for other release lines.)
for x in entries:
if x in manager[focus.family]['unreleased_bugfix']:
manager[focus.family]['unreleased_bugfix'].remove(x)
def construct_entry_without_release(focus, issues, manager, log, rest):
# Handle rare-but-valid non-issue-attached line items, which are
# always bugs. (They are their own description.)
if not isinstance(focus, Issue):
# First, sanity check for potential mistakes resulting in an issue node
# being buried within something else.
buried = focus.traverse(Issue)
if buried:
msg = """
Found issue node ({!r}) buried inside another node:
{}
Please double-check your ReST syntax! There is probably text in the above
output that will show you which part of your changelog to look at.
For example, indentation problems can accidentally generate nested definition
lists.
"""
raise ValueError(msg.format(buried[0], str(buried[0].parent)))
# OK, it looks legit - make it a bug.
log("Found line item w/ no real issue object, creating bug")
nodelist = issue_nodelist('bug')
# Skip nodelist entirely if we're in unstable prehistory -
# classification doesn't matter there.
if manager.unstable_prehistory:
nodelist = []
# Undo the 'pop' from outer scope. TODO: rework things so we don't have
# to do this dumb shit uggggh
rest[0].insert(0, focus)
focus = Issue(
type_='bug',
nodelist=nodelist,
description=rest,
)
else:
focus.attributes['description'] = rest
# Add to global list (for use by explicit releases) or die trying
issues[focus.number] = issues.get(focus.number, []) + [focus]
# Add to per-release bugfix lines and/or unreleased bug/feature buckets, as
# necessary.
# TODO: suspect all of add_to_manager can now live in the manager; most of
# Release's methods should probably go that way
if manager.unstable_prehistory:
log("Unstable prehistory -> adding to 0.x unreleased bucket")
manager[0]['unreleased'].append(focus)
else:
log("Adding to release line manager")
focus.add_to_manager(manager)
def handle_upcoming_major_release(entries, manager):
# Short-circuit if the future holds nothing for us
if not entries:
return
# Short-circuit if we're in the middle of a block of releases, only the
# last release before a bunch of issues, should be taking any action.
if isinstance(entries[0], Release):
return
# Iterate through entries til we find the next Release or set of Releases
next_releases = []
for index, obj in enumerate(entries):
if isinstance(obj, Release):
next_releases.append(obj)
# Non-empty next_releases + encountered a non-release = done w/ release
# block.
elif next_releases:
break
# Examine result: is a major release present? If so, add its major number
# to the line manager!
for obj in next_releases:
# TODO: update when Release gets tied closer w/ Version
version = Version(obj.number)
if version.minor == 0 and version.patch == 0:
manager.add_family(obj.family)
def handle_first_release_line(entries, manager):
"""
Set up initial line-manager entry for first encountered release line.
To be called at start of overall process; afterwards, subsequent major
lines are generated by `handle_upcoming_major_release`.
"""
# It's remotely possible the changelog is totally empty...
if not entries:
return
# Obtain (short-circuiting) first Release obj.
first_release = None
for obj in entries:
if isinstance(obj, Release):
first_release = obj
break
# It's also possible it's non-empty but has no releases yet.
if first_release:
manager.add_family(obj.family)
# If God did not exist, man would be forced to invent him.
else:
manager.add_family(0)
def construct_releases(entries, app):
log = partial(_log, config=app.config)
# Walk from back to front, consuming entries & copying them into
# per-release buckets as releases are encountered. Store releases in order.
releases = []
# Release lines, to be organized by major releases, then by major+minor,
# alongside per-major-release 'unreleased' bugfix/feature buckets.
# NOTE: With exception of unstable_prehistory=True, which triggers use of a
# separate, undifferentiated 'unreleased' bucket (albeit still within the
# '0' major line family).
manager = LineManager(app)
# Also keep a master hash of issues by number to detect duplicates & assist
# in explicitly defined release lists.
issues = {}
reversed_entries = list(reversed(entries))
# For the lookahead, so we're not doing this stripping O(n) times.
# TODO: probs just merge the two into e.g. a list of 2-tuples of "actual
# entry obj + rest"?
stripped_entries = [x[0][0] for x in reversed_entries]
# Perform an initial lookahead to prime manager with the 1st major release
handle_first_release_line(stripped_entries, manager)
# Start crawling...
for index, obj in enumerate(reversed_entries):
# Issue object is always found in obj (LI) index 0 (first, often only
# P) and is the 1st item within that (index 0 again).
# Preserve all other contents of 'obj'.
focus = obj[0].pop(0)
rest = obj
log(repr(focus))
# Releases 'eat' the entries in their line's list and get added to the
# final data structure. They also inform new release-line 'buffers'.
# Release lines, once the release obj is removed, should be empty or a
# comma-separated list of issue numbers.
if isinstance(focus, Release):
construct_entry_with_release(
focus, issues, manager, log, releases, rest
)
# After each release is handled, look ahead to see if we're
# entering "last stretch before a major release". If so,
# pre-emptively update the line-manager so upcoming features are
# correctly sorted into that major release by default (re: logic in
# Release.add_to_manager)
handle_upcoming_major_release(
stripped_entries[index + 1:], manager
)
# Entries get copied into release line buckets as follows:
# * Features and support go into 'unreleased_feature' for use in new
# feature releases.
# * Bugfixes go into all release lines (so they can be printed in >1
# bugfix release as appropriate) as well as 'unreleased_bugfix' (so
# they can be displayed prior to release'). Caveats include bugs marked
# 'major' (they go into unreleased_feature instead) or with 'N.N+'
# (meaning they only go into release line buckets for that release and
# up.)
# * Support/feature entries marked as 'backported' go into all
# release lines as well, on the assumption that they were released to
# all active branches.
# * The 'rest' variable (which here is the bug description, vitally
# important!) is preserved by stuffing it into the focus (issue)
# object - it will get unpacked by construct_nodes() later.
else:
construct_entry_without_release(focus, issues, manager, log, rest)
if manager.unstable_prehistory:
releases.append(generate_unreleased_entry(
header="Next release",
line="unreleased",
issues=manager[0]['unreleased'],
manager=manager,
app=app,
))
else:
append_unreleased_entries(app, manager, releases)
reorder_release_entries(releases)
return releases, manager
def construct_nodes(releases):
result = []
# Reverse the list again so the final display is newest on top
for d in reversed(releases):
if not d['entries']:
continue
obj = d['obj']
entries = []
for entry in d['entries']:
# Use nodes.Node.deepcopy to deepcopy the description
# node. If this is not done, multiple references to the same
# object (e.g. a reference object in the description of #649, which
# is then copied into 2 different release lists) will end up in the
# doctree, which makes subsequent parse steps very angry (index()
# errors).
desc = entry['description'].deepcopy()
# Additionally, expand any other issue roles found in the
# description - sometimes we refer to related issues inline. (They
# can't be left as issue() objects at render time since that's
# undefined.)
# Use [:] slicing to avoid mutation during the loops.
for index, node in enumerate(desc[:]):
for subindex, subnode in enumerate(node[:]):
if isinstance(subnode, Issue):
lst = subnode['nodelist']
desc[index][subindex:subindex + 1] = lst
# Rework this entry to insert the now-rendered issue nodes in front
# of the 1st paragraph of the 'description' nodes (which should be
# the preserved LI + nested paragraph-or-more from original
# markup.)
# FIXME: why is there no "prepend a list" method?
for node in reversed(entry['nodelist']):
desc[0].insert(0, node)
entries.append(desc)
# Entry list
list_ = nodes.bullet_list('', *entries)
# Insert list into release nodelist (as it's a section)
obj['nodelist'][0].append(list_)
# Release header
header = nodes.paragraph('', '', *obj['nodelist'])
result.extend(header)
return result
class BulletListVisitor(nodes.NodeVisitor):
def __init__(self, document, app):
nodes.NodeVisitor.__init__(self, document)
self.found_changelog = False
self.app = app
def visit_bullet_list(self, node):
# The first found bullet list (which should be the first one at the top
# level of the document) is the changelog.
if not self.found_changelog:
self.found_changelog = True
# Walk + parse into release mapping
releases, _ = construct_releases(node.children, self.app)
# Construct new set of nodes to replace the old, and we're done
node.replace_self(construct_nodes(releases))
def unknown_visit(self, node):
pass
def generate_changelog(app, doctree):
# Don't scan/mutate documents that don't match the configured document name
# (which by default is ['changelog.rst', ]).
if app.env.docname not in app.config.releases_document_name:
return
# Find the first bullet-list node & replace it with our organized/parsed
# elements.
changelog_visitor = BulletListVisitor(doctree, app)
doctree.walk(changelog_visitor)
def setup(app):
for key, default in (
# Issue base URI setting: releases_issue_uri
# E.g. 'https://github.com/fabric/fabric/issues/'
('issue_uri', None),
# Release-tag base URI setting: releases_release_uri
# E.g. 'https://github.com/fabric/fabric/tree/'
('release_uri', None),
# Convenience Github version of above
('github_path', None),
# Which document to use as the changelog
('document_name', ['changelog']),
# Debug output
('debug', False),
# Whether to enable linear history during 0.x release timeline
# TODO: flip this to True by default in our 2.0 release
('unstable_prehistory', False),
):
app.add_config_value(
name='releases_{}'.format(key), default=default, rebuild='html'
)
# if a string is given for `document_name`, convert it to a list
# done to maintain backwards compatibility
# https://stackoverflow.com/questions/1303243/how-to-find-out-if-a-python-object-is-a-string
PY2 = sys.version_info[0] == 2
if PY2:
string_types = (basestring,)
else:
string_types = (str,)
if isinstance(app.config.releases_document_name, string_types):
app.config.releases_document_name = [app.config.releases_document_name]
# Register intermediate roles
for x in list(ISSUE_TYPES) + ['issue']:
add_role(app, x, issues_role)
add_role(app, 'release', release_role)
# Hook in our changelog transmutation at appropriate step
app.connect('doctree-read', generate_changelog)
# identifies the version of our extension
return {'version': __version__}
def add_role(app, name, role_obj):
# This (introspecting docutils.parser.rst.roles._roles) is the same trick
# Sphinx uses to emit warnings about double-registering; it's a PITA to try
# and configure the app early on so it doesn't emit those warnings, so we
# instead just...don't double-register. Meh.
if name not in roles._roles:
app.add_role(name, role_obj)
|
bitprophet/releases
|
releases/__init__.py
|
issues_role
|
python
|
def issues_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
parts = utils.unescape(text).split()
issue_no = parts.pop(0)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
if issue_no not in ('-', '0'):
ref = None
if config.releases_issue_uri:
# TODO: deal with % vs .format()
ref = config.releases_issue_uri % issue_no
elif config.releases_github_path:
ref = "https://github.com/{}/issues/{}".format(
config.releases_github_path, issue_no)
# Only generate a reference/link if we were able to make a URI
if ref:
identifier = nodes.reference(
rawtext, '#' + issue_no, refuri=ref, **options
)
# Otherwise, just make it regular text
else:
identifier = nodes.raw(
rawtext=rawtext, text='#' + issue_no, format='html',
**options
)
else:
identifier = None
issue_no = None # So it doesn't gum up dupe detection later
# Additional 'new-style changelog' stuff
if name in ISSUE_TYPES:
nodelist = issue_nodelist(name, identifier)
spec = None
keyword = None
# TODO: sanity checks re: e.g. >2 parts, >1 instance of keywords, >1
# instance of specs, etc.
for part in parts:
maybe_spec = scan_for_spec(part)
if maybe_spec:
spec = maybe_spec
else:
if part in ('backported', 'major'):
keyword = part
else:
err = "Gave unknown keyword {!r} for issue no. {}"
raise ValueError(err.format(keyword, issue_no))
# Create temporary node w/ data & final nodes to publish
node = Issue(
number=issue_no,
type_=name,
nodelist=nodelist,
backported=(keyword == 'backported'),
major=(keyword == 'major'),
spec=spec,
)
return [node], []
# Return old style info for 'issue' for older changelog entries
else:
return [identifier], []
|
Use: :issue|bug|feature|support:`ticket_number`
When invoked as :issue:, turns into just a "#NN" hyperlink to
`releases_issue_uri`.
When invoked otherwise, turns into "[Type] <#NN hyperlink>: ".
Spaces present in the "ticket number" are used as fields for keywords
(major, backported) and/or specs (e.g. '>=1.0'). This data is removed &
used when constructing the object.
May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L58-L128
|
[
"def issue_nodelist(name, identifier=None):\n which = '[<span style=\"color: #%s;\">%s</span>]' % (\n ISSUE_TYPES[name], name.capitalize()\n )\n signifier = [nodes.raw(text=which, format='html')]\n id_nodelist = [nodes.inline(text=\" \"), identifier] if identifier else []\n trail = [] if identifier else [nodes.inline(text=\" \")]\n return signifier + id_nodelist + [nodes.inline(text=\":\")] + trail\n",
"def scan_for_spec(keyword):\n \"\"\"\n Attempt to return some sort of Spec from given keyword value.\n\n Returns None if one could not be derived.\n \"\"\"\n # Both 'spec' formats are wrapped in parens, discard\n keyword = keyword.lstrip('(').rstrip(')')\n # First, test for intermediate '1.2+' style\n matches = release_line_re.findall(keyword)\n if matches:\n return Spec(\">={}\".format(matches[0]))\n # Failing that, see if Spec can make sense of it\n try:\n return Spec(keyword)\n # I've only ever seen Spec fail with ValueError.\n except ValueError:\n return None\n"
] |
import itertools
import re
import sys
from functools import partial
from docutils import nodes, utils
from docutils.parsers.rst import roles
import six
from .models import Issue, ISSUE_TYPES, Release, Version, Spec
from .line_manager import LineManager
from ._version import __version__
def _log(txt, config):
"""
Log debug output if debug setting is on.
Intended to be partial'd w/ config at top of functions. Meh.
"""
if config.releases_debug:
sys.stderr.write(str(txt) + "\n")
sys.stderr.flush()
def issue_nodelist(name, identifier=None):
which = '[<span style="color: #%s;">%s</span>]' % (
ISSUE_TYPES[name], name.capitalize()
)
signifier = [nodes.raw(text=which, format='html')]
id_nodelist = [nodes.inline(text=" "), identifier] if identifier else []
trail = [] if identifier else [nodes.inline(text=" ")]
return signifier + id_nodelist + [nodes.inline(text=":")] + trail
release_line_re = re.compile(r'^(\d+\.\d+)\+$') # e.g. '1.2+'
def scan_for_spec(keyword):
"""
Attempt to return some sort of Spec from given keyword value.
Returns None if one could not be derived.
"""
# Both 'spec' formats are wrapped in parens, discard
keyword = keyword.lstrip('(').rstrip(')')
# First, test for intermediate '1.2+' style
matches = release_line_re.findall(keyword)
if matches:
return Spec(">={}".format(matches[0]))
# Failing that, see if Spec can make sense of it
try:
return Spec(keyword)
# I've only ever seen Spec fail with ValueError.
except ValueError:
return None
def release_nodes(text, slug, date, config):
# Doesn't seem possible to do this "cleanly" (i.e. just say "make me a
# title and give it these HTML attributes during render time) so...fuckit.
# We were already doing fully raw elements elsewhere anyway. And who cares
# about a PDF of a changelog? :x
uri = None
if config.releases_release_uri:
# TODO: % vs .format()
uri = config.releases_release_uri % slug
elif config.releases_github_path:
uri = "https://github.com/{}/tree/{}".format(
config.releases_github_path, slug)
# Only construct link tag if user actually configured release URIs somehow
if uri:
link = '<a class="reference external" href="{}">{}</a>'.format(
uri, text,
)
else:
link = text
datespan = ''
if date:
datespan = ' <span style="font-size: 75%;">{}</span>'.format(date)
header = '<h2 style="margin-bottom: 0.3em;">{}{}</h2>'.format(
link, datespan)
return nodes.section('',
nodes.raw(rawtext='', text=header, format='html'),
ids=[text]
)
year_arg_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
def release_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Invoked as :release:`N.N.N <YYYY-MM-DD>`.
Turns into useful release header + link to GH tree for the tag.
"""
# Make sure year has been specified
match = year_arg_re.match(text)
if not match:
msg = inliner.reporter.error("Must specify release date!")
return [inliner.problematic(rawtext, rawtext, msg)], [msg]
number, date = match.group(1), match.group(2)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
nodelist = [release_nodes(number, number, date, config)]
# Return intermediate node
node = Release(number=number, date=date, nodelist=nodelist)
return [node], []
def generate_unreleased_entry(header, line, issues, manager, app):
log = partial(_log, config=app.config)
nodelist = [release_nodes(
header,
# TODO: should link to master for newest family and...what
# exactly, for the others? Expectation isn't necessarily to
# have a branch per family? Or is there? Maybe there must be..
'master',
None,
app.config
)]
log("Creating {!r} faux-release with {!r}".format(line, issues))
return {
'obj': Release(number=line, date=None, nodelist=nodelist),
'entries': issues,
}
def append_unreleased_entries(app, manager, releases):
"""
Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored.
"""
for family, lines in six.iteritems(manager):
for type_ in ('bugfix', 'feature'):
bucket = 'unreleased_{}'.format(type_)
if bucket not in lines: # Implies unstable prehistory + 0.x fam
continue
issues = lines[bucket]
fam_prefix = "{}.x ".format(family) if len(manager) > 1 else ""
header = "Next {}{} release".format(fam_prefix, type_)
line = "unreleased_{}.x_{}".format(family, type_)
releases.append(
generate_unreleased_entry(header, line, issues, manager, app)
)
def reorder_release_entries(releases):
"""
Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc.
"""
order = {'feature': 0, 'bug': 1, 'support': 2}
for release in releases:
entries = release['entries'][:]
release['entries'] = sorted(entries, key=lambda x: order[x.type])
def construct_entry_with_release(focus, issues, manager, log, releases, rest):
"""
Releases 'eat' the entries in their line's list and get added to the
final data structure. They also inform new release-line 'buffers'.
Release lines, once the release obj is removed, should be empty or a
comma-separated list of issue numbers.
"""
log("release for line %r" % focus.minor)
# Check for explicitly listed issues first
explicit = None
if rest[0].children:
explicit = [x.strip() for x in rest[0][0].split(',')]
# Do those by themselves since they override all other logic
if explicit:
log("Explicit issues requested: %r" % (explicit,))
# First scan global issue dict, dying if not found
missing = [i for i in explicit if i not in issues]
if missing:
raise ValueError(
"Couldn't find issue(s) #{} in the changelog!".format(
', '.join(missing)))
# Obtain the explicitly named issues from global list
entries = []
for i in explicit:
for flattened_issue_item in itertools.chain(issues[i]):
entries.append(flattened_issue_item)
# Create release
log("entries in this release: %r" % (entries,))
releases.append({
'obj': focus,
'entries': entries,
})
# Introspect these entries to determine which buckets they should get
# removed from (it's not "all of them"!)
for obj in entries:
if obj.type == 'bug':
# Major bugfix: remove from unreleased_feature
if obj.major:
log("Removing #%s from unreleased" % obj.number)
# TODO: consider making a LineManager method somehow
manager[focus.family]['unreleased_feature'].remove(obj)
# Regular bugfix: remove from bucket for this release's
# line + unreleased_bugfix
else:
if obj in manager[focus.family]['unreleased_bugfix']:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_bugfix'].remove(obj)
if obj in manager[focus.family][focus.minor]:
log("Removing #%s from %s" % (obj.number, focus.minor))
manager[focus.family][focus.minor].remove(obj)
# Regular feature/support: remove from unreleased_feature
# Backported feature/support: remove from bucket for this
# release's line (if applicable) + unreleased_feature
else:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_feature'].remove(obj)
if obj in manager[focus.family].get(focus.minor, []):
manager[focus.family][focus.minor].remove(obj)
# Implicit behavior otherwise
else:
# Unstable prehistory -> just dump 'unreleased' and continue
if manager.unstable_prehistory:
# TODO: need to continue making LineManager actually OO, i.e. do
# away with the subdicts + keys, move to sub-objects with methods
# answering questions like "what should I give you for a release"
# or whatever
log("in unstable prehistory, dumping 'unreleased'")
releases.append({
'obj': focus,
# NOTE: explicitly dumping 0, not focus.family, since this
# might be the last pre-historical release and thus not 0.x
'entries': manager[0]['unreleased'][:],
})
manager[0]['unreleased'] = []
# If this isn't a 0.x release, it signals end of prehistory, make a
# new release bucket (as is also done below in regular behavior).
# Also acts like a sentinel that prehistory is over.
if focus.family != 0:
manager[focus.family][focus.minor] = []
# Regular behavior from here
else:
# New release line/branch detected. Create it & dump unreleased
# features.
if focus.minor not in manager[focus.family]:
log("not seen prior, making feature release & bugfix bucket")
manager[focus.family][focus.minor] = []
# TODO: this used to explicitly say "go over everything in
# unreleased_feature and dump if it's feature, support or major
# bug". But what the hell else would BE in unreleased_feature?
# Why not just dump the whole thing??
#
# Dump only the items in the bucket whose family this release
# object belongs to, i.e. 1.5.0 should only nab the 1.0
# family's unreleased feature items.
releases.append({
'obj': focus,
'entries': manager[focus.family]['unreleased_feature'][:],
})
manager[focus.family]['unreleased_feature'] = []
# Existing line -> empty out its bucket into new release.
# Skip 'major' bugs as those "belong" to the next release (and will
# also be in 'unreleased_feature' - so safe to nuke the entire
# line)
else:
log("pre-existing, making bugfix release")
# TODO: as in other branch, I don't get why this wasn't just
# dumping the whole thing - why would major bugs be in the
# regular bugfix buckets?
entries = manager[focus.family][focus.minor][:]
releases.append({'obj': focus, 'entries': entries})
manager[focus.family][focus.minor] = []
# Clean out the items we just released from
# 'unreleased_bugfix'. (Can't nuke it because there might
# be some unreleased bugs for other release lines.)
for x in entries:
if x in manager[focus.family]['unreleased_bugfix']:
manager[focus.family]['unreleased_bugfix'].remove(x)
def construct_entry_without_release(focus, issues, manager, log, rest):
# Handle rare-but-valid non-issue-attached line items, which are
# always bugs. (They are their own description.)
if not isinstance(focus, Issue):
# First, sanity check for potential mistakes resulting in an issue node
# being buried within something else.
buried = focus.traverse(Issue)
if buried:
msg = """
Found issue node ({!r}) buried inside another node:
{}
Please double-check your ReST syntax! There is probably text in the above
output that will show you which part of your changelog to look at.
For example, indentation problems can accidentally generate nested definition
lists.
"""
raise ValueError(msg.format(buried[0], str(buried[0].parent)))
# OK, it looks legit - make it a bug.
log("Found line item w/ no real issue object, creating bug")
nodelist = issue_nodelist('bug')
# Skip nodelist entirely if we're in unstable prehistory -
# classification doesn't matter there.
if manager.unstable_prehistory:
nodelist = []
# Undo the 'pop' from outer scope. TODO: rework things so we don't have
# to do this dumb shit uggggh
rest[0].insert(0, focus)
focus = Issue(
type_='bug',
nodelist=nodelist,
description=rest,
)
else:
focus.attributes['description'] = rest
# Add to global list (for use by explicit releases) or die trying
issues[focus.number] = issues.get(focus.number, []) + [focus]
# Add to per-release bugfix lines and/or unreleased bug/feature buckets, as
# necessary.
# TODO: suspect all of add_to_manager can now live in the manager; most of
# Release's methods should probably go that way
if manager.unstable_prehistory:
log("Unstable prehistory -> adding to 0.x unreleased bucket")
manager[0]['unreleased'].append(focus)
else:
log("Adding to release line manager")
focus.add_to_manager(manager)
def handle_upcoming_major_release(entries, manager):
# Short-circuit if the future holds nothing for us
if not entries:
return
# Short-circuit if we're in the middle of a block of releases, only the
# last release before a bunch of issues, should be taking any action.
if isinstance(entries[0], Release):
return
# Iterate through entries til we find the next Release or set of Releases
next_releases = []
for index, obj in enumerate(entries):
if isinstance(obj, Release):
next_releases.append(obj)
# Non-empty next_releases + encountered a non-release = done w/ release
# block.
elif next_releases:
break
# Examine result: is a major release present? If so, add its major number
# to the line manager!
for obj in next_releases:
# TODO: update when Release gets tied closer w/ Version
version = Version(obj.number)
if version.minor == 0 and version.patch == 0:
manager.add_family(obj.family)
def handle_first_release_line(entries, manager):
"""
Set up initial line-manager entry for first encountered release line.
To be called at start of overall process; afterwards, subsequent major
lines are generated by `handle_upcoming_major_release`.
"""
# It's remotely possible the changelog is totally empty...
if not entries:
return
# Obtain (short-circuiting) first Release obj.
first_release = None
for obj in entries:
if isinstance(obj, Release):
first_release = obj
break
# It's also possible it's non-empty but has no releases yet.
if first_release:
manager.add_family(obj.family)
# If God did not exist, man would be forced to invent him.
else:
manager.add_family(0)
def construct_releases(entries, app):
log = partial(_log, config=app.config)
# Walk from back to front, consuming entries & copying them into
# per-release buckets as releases are encountered. Store releases in order.
releases = []
# Release lines, to be organized by major releases, then by major+minor,
# alongside per-major-release 'unreleased' bugfix/feature buckets.
# NOTE: With exception of unstable_prehistory=True, which triggers use of a
# separate, undifferentiated 'unreleased' bucket (albeit still within the
# '0' major line family).
manager = LineManager(app)
# Also keep a master hash of issues by number to detect duplicates & assist
# in explicitly defined release lists.
issues = {}
reversed_entries = list(reversed(entries))
# For the lookahead, so we're not doing this stripping O(n) times.
# TODO: probs just merge the two into e.g. a list of 2-tuples of "actual
# entry obj + rest"?
stripped_entries = [x[0][0] for x in reversed_entries]
# Perform an initial lookahead to prime manager with the 1st major release
handle_first_release_line(stripped_entries, manager)
# Start crawling...
for index, obj in enumerate(reversed_entries):
# Issue object is always found in obj (LI) index 0 (first, often only
# P) and is the 1st item within that (index 0 again).
# Preserve all other contents of 'obj'.
focus = obj[0].pop(0)
rest = obj
log(repr(focus))
# Releases 'eat' the entries in their line's list and get added to the
# final data structure. They also inform new release-line 'buffers'.
# Release lines, once the release obj is removed, should be empty or a
# comma-separated list of issue numbers.
if isinstance(focus, Release):
construct_entry_with_release(
focus, issues, manager, log, releases, rest
)
# After each release is handled, look ahead to see if we're
# entering "last stretch before a major release". If so,
# pre-emptively update the line-manager so upcoming features are
# correctly sorted into that major release by default (re: logic in
# Release.add_to_manager)
handle_upcoming_major_release(
stripped_entries[index + 1:], manager
)
# Entries get copied into release line buckets as follows:
# * Features and support go into 'unreleased_feature' for use in new
# feature releases.
# * Bugfixes go into all release lines (so they can be printed in >1
# bugfix release as appropriate) as well as 'unreleased_bugfix' (so
# they can be displayed prior to release'). Caveats include bugs marked
# 'major' (they go into unreleased_feature instead) or with 'N.N+'
# (meaning they only go into release line buckets for that release and
# up.)
# * Support/feature entries marked as 'backported' go into all
# release lines as well, on the assumption that they were released to
# all active branches.
# * The 'rest' variable (which here is the bug description, vitally
# important!) is preserved by stuffing it into the focus (issue)
# object - it will get unpacked by construct_nodes() later.
else:
construct_entry_without_release(focus, issues, manager, log, rest)
if manager.unstable_prehistory:
releases.append(generate_unreleased_entry(
header="Next release",
line="unreleased",
issues=manager[0]['unreleased'],
manager=manager,
app=app,
))
else:
append_unreleased_entries(app, manager, releases)
reorder_release_entries(releases)
return releases, manager
def construct_nodes(releases):
result = []
# Reverse the list again so the final display is newest on top
for d in reversed(releases):
if not d['entries']:
continue
obj = d['obj']
entries = []
for entry in d['entries']:
# Use nodes.Node.deepcopy to deepcopy the description
# node. If this is not done, multiple references to the same
# object (e.g. a reference object in the description of #649, which
# is then copied into 2 different release lists) will end up in the
# doctree, which makes subsequent parse steps very angry (index()
# errors).
desc = entry['description'].deepcopy()
# Additionally, expand any other issue roles found in the
# description - sometimes we refer to related issues inline. (They
# can't be left as issue() objects at render time since that's
# undefined.)
# Use [:] slicing to avoid mutation during the loops.
for index, node in enumerate(desc[:]):
for subindex, subnode in enumerate(node[:]):
if isinstance(subnode, Issue):
lst = subnode['nodelist']
desc[index][subindex:subindex + 1] = lst
# Rework this entry to insert the now-rendered issue nodes in front
# of the 1st paragraph of the 'description' nodes (which should be
# the preserved LI + nested paragraph-or-more from original
# markup.)
# FIXME: why is there no "prepend a list" method?
for node in reversed(entry['nodelist']):
desc[0].insert(0, node)
entries.append(desc)
# Entry list
list_ = nodes.bullet_list('', *entries)
# Insert list into release nodelist (as it's a section)
obj['nodelist'][0].append(list_)
# Release header
header = nodes.paragraph('', '', *obj['nodelist'])
result.extend(header)
return result
class BulletListVisitor(nodes.NodeVisitor):
def __init__(self, document, app):
nodes.NodeVisitor.__init__(self, document)
self.found_changelog = False
self.app = app
def visit_bullet_list(self, node):
# The first found bullet list (which should be the first one at the top
# level of the document) is the changelog.
if not self.found_changelog:
self.found_changelog = True
# Walk + parse into release mapping
releases, _ = construct_releases(node.children, self.app)
# Construct new set of nodes to replace the old, and we're done
node.replace_self(construct_nodes(releases))
def unknown_visit(self, node):
pass
def generate_changelog(app, doctree):
# Don't scan/mutate documents that don't match the configured document name
# (which by default is ['changelog.rst', ]).
if app.env.docname not in app.config.releases_document_name:
return
# Find the first bullet-list node & replace it with our organized/parsed
# elements.
changelog_visitor = BulletListVisitor(doctree, app)
doctree.walk(changelog_visitor)
def setup(app):
for key, default in (
# Issue base URI setting: releases_issue_uri
# E.g. 'https://github.com/fabric/fabric/issues/'
('issue_uri', None),
# Release-tag base URI setting: releases_release_uri
# E.g. 'https://github.com/fabric/fabric/tree/'
('release_uri', None),
# Convenience Github version of above
('github_path', None),
# Which document to use as the changelog
('document_name', ['changelog']),
# Debug output
('debug', False),
# Whether to enable linear history during 0.x release timeline
# TODO: flip this to True by default in our 2.0 release
('unstable_prehistory', False),
):
app.add_config_value(
name='releases_{}'.format(key), default=default, rebuild='html'
)
# if a string is given for `document_name`, convert it to a list
# done to maintain backwards compatibility
# https://stackoverflow.com/questions/1303243/how-to-find-out-if-a-python-object-is-a-string
PY2 = sys.version_info[0] == 2
if PY2:
string_types = (basestring,)
else:
string_types = (str,)
if isinstance(app.config.releases_document_name, string_types):
app.config.releases_document_name = [app.config.releases_document_name]
# Register intermediate roles
for x in list(ISSUE_TYPES) + ['issue']:
add_role(app, x, issues_role)
add_role(app, 'release', release_role)
# Hook in our changelog transmutation at appropriate step
app.connect('doctree-read', generate_changelog)
# identifies the version of our extension
return {'version': __version__}
def add_role(app, name, role_obj):
# This (introspecting docutils.parser.rst.roles._roles) is the same trick
# Sphinx uses to emit warnings about double-registering; it's a PITA to try
# and configure the app early on so it doesn't emit those warnings, so we
# instead just...don't double-register. Meh.
if name not in roles._roles:
app.add_role(name, role_obj)
|
bitprophet/releases
|
releases/__init__.py
|
release_role
|
python
|
def release_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
# Make sure year has been specified
match = year_arg_re.match(text)
if not match:
msg = inliner.reporter.error("Must specify release date!")
return [inliner.problematic(rawtext, rawtext, msg)], [msg]
number, date = match.group(1), match.group(2)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
nodelist = [release_nodes(number, number, date, config)]
# Return intermediate node
node = Release(number=number, date=date, nodelist=nodelist)
return [node], []
|
Invoked as :release:`N.N.N <YYYY-MM-DD>`.
Turns into useful release header + link to GH tree for the tag.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L164-L181
|
[
"def release_nodes(text, slug, date, config):\n # Doesn't seem possible to do this \"cleanly\" (i.e. just say \"make me a\n # title and give it these HTML attributes during render time) so...fuckit.\n # We were already doing fully raw elements elsewhere anyway. And who cares\n # about a PDF of a changelog? :x\n uri = None\n if config.releases_release_uri:\n # TODO: % vs .format()\n uri = config.releases_release_uri % slug\n elif config.releases_github_path:\n uri = \"https://github.com/{}/tree/{}\".format(\n config.releases_github_path, slug)\n # Only construct link tag if user actually configured release URIs somehow\n if uri:\n link = '<a class=\"reference external\" href=\"{}\">{}</a>'.format(\n uri, text,\n )\n else:\n link = text\n datespan = ''\n if date:\n datespan = ' <span style=\"font-size: 75%;\">{}</span>'.format(date)\n header = '<h2 style=\"margin-bottom: 0.3em;\">{}{}</h2>'.format(\n link, datespan)\n return nodes.section('',\n nodes.raw(rawtext='', text=header, format='html'),\n ids=[text]\n )\n"
] |
import itertools
import re
import sys
from functools import partial
from docutils import nodes, utils
from docutils.parsers.rst import roles
import six
from .models import Issue, ISSUE_TYPES, Release, Version, Spec
from .line_manager import LineManager
from ._version import __version__
def _log(txt, config):
"""
Log debug output if debug setting is on.
Intended to be partial'd w/ config at top of functions. Meh.
"""
if config.releases_debug:
sys.stderr.write(str(txt) + "\n")
sys.stderr.flush()
def issue_nodelist(name, identifier=None):
which = '[<span style="color: #%s;">%s</span>]' % (
ISSUE_TYPES[name], name.capitalize()
)
signifier = [nodes.raw(text=which, format='html')]
id_nodelist = [nodes.inline(text=" "), identifier] if identifier else []
trail = [] if identifier else [nodes.inline(text=" ")]
return signifier + id_nodelist + [nodes.inline(text=":")] + trail
release_line_re = re.compile(r'^(\d+\.\d+)\+$') # e.g. '1.2+'
def scan_for_spec(keyword):
"""
Attempt to return some sort of Spec from given keyword value.
Returns None if one could not be derived.
"""
# Both 'spec' formats are wrapped in parens, discard
keyword = keyword.lstrip('(').rstrip(')')
# First, test for intermediate '1.2+' style
matches = release_line_re.findall(keyword)
if matches:
return Spec(">={}".format(matches[0]))
# Failing that, see if Spec can make sense of it
try:
return Spec(keyword)
# I've only ever seen Spec fail with ValueError.
except ValueError:
return None
def issues_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Use: :issue|bug|feature|support:`ticket_number`
When invoked as :issue:, turns into just a "#NN" hyperlink to
`releases_issue_uri`.
When invoked otherwise, turns into "[Type] <#NN hyperlink>: ".
Spaces present in the "ticket number" are used as fields for keywords
(major, backported) and/or specs (e.g. '>=1.0'). This data is removed &
used when constructing the object.
May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink.
"""
parts = utils.unescape(text).split()
issue_no = parts.pop(0)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
if issue_no not in ('-', '0'):
ref = None
if config.releases_issue_uri:
# TODO: deal with % vs .format()
ref = config.releases_issue_uri % issue_no
elif config.releases_github_path:
ref = "https://github.com/{}/issues/{}".format(
config.releases_github_path, issue_no)
# Only generate a reference/link if we were able to make a URI
if ref:
identifier = nodes.reference(
rawtext, '#' + issue_no, refuri=ref, **options
)
# Otherwise, just make it regular text
else:
identifier = nodes.raw(
rawtext=rawtext, text='#' + issue_no, format='html',
**options
)
else:
identifier = None
issue_no = None # So it doesn't gum up dupe detection later
# Additional 'new-style changelog' stuff
if name in ISSUE_TYPES:
nodelist = issue_nodelist(name, identifier)
spec = None
keyword = None
# TODO: sanity checks re: e.g. >2 parts, >1 instance of keywords, >1
# instance of specs, etc.
for part in parts:
maybe_spec = scan_for_spec(part)
if maybe_spec:
spec = maybe_spec
else:
if part in ('backported', 'major'):
keyword = part
else:
err = "Gave unknown keyword {!r} for issue no. {}"
raise ValueError(err.format(keyword, issue_no))
# Create temporary node w/ data & final nodes to publish
node = Issue(
number=issue_no,
type_=name,
nodelist=nodelist,
backported=(keyword == 'backported'),
major=(keyword == 'major'),
spec=spec,
)
return [node], []
# Return old style info for 'issue' for older changelog entries
else:
return [identifier], []
def release_nodes(text, slug, date, config):
# Doesn't seem possible to do this "cleanly" (i.e. just say "make me a
# title and give it these HTML attributes during render time) so...fuckit.
# We were already doing fully raw elements elsewhere anyway. And who cares
# about a PDF of a changelog? :x
uri = None
if config.releases_release_uri:
# TODO: % vs .format()
uri = config.releases_release_uri % slug
elif config.releases_github_path:
uri = "https://github.com/{}/tree/{}".format(
config.releases_github_path, slug)
# Only construct link tag if user actually configured release URIs somehow
if uri:
link = '<a class="reference external" href="{}">{}</a>'.format(
uri, text,
)
else:
link = text
datespan = ''
if date:
datespan = ' <span style="font-size: 75%;">{}</span>'.format(date)
header = '<h2 style="margin-bottom: 0.3em;">{}{}</h2>'.format(
link, datespan)
return nodes.section('',
nodes.raw(rawtext='', text=header, format='html'),
ids=[text]
)
year_arg_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
def generate_unreleased_entry(header, line, issues, manager, app):
log = partial(_log, config=app.config)
nodelist = [release_nodes(
header,
# TODO: should link to master for newest family and...what
# exactly, for the others? Expectation isn't necessarily to
# have a branch per family? Or is there? Maybe there must be..
'master',
None,
app.config
)]
log("Creating {!r} faux-release with {!r}".format(line, issues))
return {
'obj': Release(number=line, date=None, nodelist=nodelist),
'entries': issues,
}
def append_unreleased_entries(app, manager, releases):
"""
Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored.
"""
for family, lines in six.iteritems(manager):
for type_ in ('bugfix', 'feature'):
bucket = 'unreleased_{}'.format(type_)
if bucket not in lines: # Implies unstable prehistory + 0.x fam
continue
issues = lines[bucket]
fam_prefix = "{}.x ".format(family) if len(manager) > 1 else ""
header = "Next {}{} release".format(fam_prefix, type_)
line = "unreleased_{}.x_{}".format(family, type_)
releases.append(
generate_unreleased_entry(header, line, issues, manager, app)
)
def reorder_release_entries(releases):
"""
Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc.
"""
order = {'feature': 0, 'bug': 1, 'support': 2}
for release in releases:
entries = release['entries'][:]
release['entries'] = sorted(entries, key=lambda x: order[x.type])
def construct_entry_with_release(focus, issues, manager, log, releases, rest):
"""
Releases 'eat' the entries in their line's list and get added to the
final data structure. They also inform new release-line 'buffers'.
Release lines, once the release obj is removed, should be empty or a
comma-separated list of issue numbers.
"""
log("release for line %r" % focus.minor)
# Check for explicitly listed issues first
explicit = None
if rest[0].children:
explicit = [x.strip() for x in rest[0][0].split(',')]
# Do those by themselves since they override all other logic
if explicit:
log("Explicit issues requested: %r" % (explicit,))
# First scan global issue dict, dying if not found
missing = [i for i in explicit if i not in issues]
if missing:
raise ValueError(
"Couldn't find issue(s) #{} in the changelog!".format(
', '.join(missing)))
# Obtain the explicitly named issues from global list
entries = []
for i in explicit:
for flattened_issue_item in itertools.chain(issues[i]):
entries.append(flattened_issue_item)
# Create release
log("entries in this release: %r" % (entries,))
releases.append({
'obj': focus,
'entries': entries,
})
# Introspect these entries to determine which buckets they should get
# removed from (it's not "all of them"!)
for obj in entries:
if obj.type == 'bug':
# Major bugfix: remove from unreleased_feature
if obj.major:
log("Removing #%s from unreleased" % obj.number)
# TODO: consider making a LineManager method somehow
manager[focus.family]['unreleased_feature'].remove(obj)
# Regular bugfix: remove from bucket for this release's
# line + unreleased_bugfix
else:
if obj in manager[focus.family]['unreleased_bugfix']:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_bugfix'].remove(obj)
if obj in manager[focus.family][focus.minor]:
log("Removing #%s from %s" % (obj.number, focus.minor))
manager[focus.family][focus.minor].remove(obj)
# Regular feature/support: remove from unreleased_feature
# Backported feature/support: remove from bucket for this
# release's line (if applicable) + unreleased_feature
else:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_feature'].remove(obj)
if obj in manager[focus.family].get(focus.minor, []):
manager[focus.family][focus.minor].remove(obj)
# Implicit behavior otherwise
else:
# Unstable prehistory -> just dump 'unreleased' and continue
if manager.unstable_prehistory:
# TODO: need to continue making LineManager actually OO, i.e. do
# away with the subdicts + keys, move to sub-objects with methods
# answering questions like "what should I give you for a release"
# or whatever
log("in unstable prehistory, dumping 'unreleased'")
releases.append({
'obj': focus,
# NOTE: explicitly dumping 0, not focus.family, since this
# might be the last pre-historical release and thus not 0.x
'entries': manager[0]['unreleased'][:],
})
manager[0]['unreleased'] = []
# If this isn't a 0.x release, it signals end of prehistory, make a
# new release bucket (as is also done below in regular behavior).
# Also acts like a sentinel that prehistory is over.
if focus.family != 0:
manager[focus.family][focus.minor] = []
# Regular behavior from here
else:
# New release line/branch detected. Create it & dump unreleased
# features.
if focus.minor not in manager[focus.family]:
log("not seen prior, making feature release & bugfix bucket")
manager[focus.family][focus.minor] = []
# TODO: this used to explicitly say "go over everything in
# unreleased_feature and dump if it's feature, support or major
# bug". But what the hell else would BE in unreleased_feature?
# Why not just dump the whole thing??
#
# Dump only the items in the bucket whose family this release
# object belongs to, i.e. 1.5.0 should only nab the 1.0
# family's unreleased feature items.
releases.append({
'obj': focus,
'entries': manager[focus.family]['unreleased_feature'][:],
})
manager[focus.family]['unreleased_feature'] = []
# Existing line -> empty out its bucket into new release.
# Skip 'major' bugs as those "belong" to the next release (and will
# also be in 'unreleased_feature' - so safe to nuke the entire
# line)
else:
log("pre-existing, making bugfix release")
# TODO: as in other branch, I don't get why this wasn't just
# dumping the whole thing - why would major bugs be in the
# regular bugfix buckets?
entries = manager[focus.family][focus.minor][:]
releases.append({'obj': focus, 'entries': entries})
manager[focus.family][focus.minor] = []
# Clean out the items we just released from
# 'unreleased_bugfix'. (Can't nuke it because there might
# be some unreleased bugs for other release lines.)
for x in entries:
if x in manager[focus.family]['unreleased_bugfix']:
manager[focus.family]['unreleased_bugfix'].remove(x)
def construct_entry_without_release(focus, issues, manager, log, rest):
# Handle rare-but-valid non-issue-attached line items, which are
# always bugs. (They are their own description.)
if not isinstance(focus, Issue):
# First, sanity check for potential mistakes resulting in an issue node
# being buried within something else.
buried = focus.traverse(Issue)
if buried:
msg = """
Found issue node ({!r}) buried inside another node:
{}
Please double-check your ReST syntax! There is probably text in the above
output that will show you which part of your changelog to look at.
For example, indentation problems can accidentally generate nested definition
lists.
"""
raise ValueError(msg.format(buried[0], str(buried[0].parent)))
# OK, it looks legit - make it a bug.
log("Found line item w/ no real issue object, creating bug")
nodelist = issue_nodelist('bug')
# Skip nodelist entirely if we're in unstable prehistory -
# classification doesn't matter there.
if manager.unstable_prehistory:
nodelist = []
# Undo the 'pop' from outer scope. TODO: rework things so we don't have
# to do this dumb shit uggggh
rest[0].insert(0, focus)
focus = Issue(
type_='bug',
nodelist=nodelist,
description=rest,
)
else:
focus.attributes['description'] = rest
# Add to global list (for use by explicit releases) or die trying
issues[focus.number] = issues.get(focus.number, []) + [focus]
# Add to per-release bugfix lines and/or unreleased bug/feature buckets, as
# necessary.
# TODO: suspect all of add_to_manager can now live in the manager; most of
# Release's methods should probably go that way
if manager.unstable_prehistory:
log("Unstable prehistory -> adding to 0.x unreleased bucket")
manager[0]['unreleased'].append(focus)
else:
log("Adding to release line manager")
focus.add_to_manager(manager)
def handle_upcoming_major_release(entries, manager):
# Short-circuit if the future holds nothing for us
if not entries:
return
# Short-circuit if we're in the middle of a block of releases, only the
# last release before a bunch of issues, should be taking any action.
if isinstance(entries[0], Release):
return
# Iterate through entries til we find the next Release or set of Releases
next_releases = []
for index, obj in enumerate(entries):
if isinstance(obj, Release):
next_releases.append(obj)
# Non-empty next_releases + encountered a non-release = done w/ release
# block.
elif next_releases:
break
# Examine result: is a major release present? If so, add its major number
# to the line manager!
for obj in next_releases:
# TODO: update when Release gets tied closer w/ Version
version = Version(obj.number)
if version.minor == 0 and version.patch == 0:
manager.add_family(obj.family)
def handle_first_release_line(entries, manager):
"""
Set up initial line-manager entry for first encountered release line.
To be called at start of overall process; afterwards, subsequent major
lines are generated by `handle_upcoming_major_release`.
"""
# It's remotely possible the changelog is totally empty...
if not entries:
return
# Obtain (short-circuiting) first Release obj.
first_release = None
for obj in entries:
if isinstance(obj, Release):
first_release = obj
break
# It's also possible it's non-empty but has no releases yet.
if first_release:
manager.add_family(obj.family)
# If God did not exist, man would be forced to invent him.
else:
manager.add_family(0)
def construct_releases(entries, app):
log = partial(_log, config=app.config)
# Walk from back to front, consuming entries & copying them into
# per-release buckets as releases are encountered. Store releases in order.
releases = []
# Release lines, to be organized by major releases, then by major+minor,
# alongside per-major-release 'unreleased' bugfix/feature buckets.
# NOTE: With exception of unstable_prehistory=True, which triggers use of a
# separate, undifferentiated 'unreleased' bucket (albeit still within the
# '0' major line family).
manager = LineManager(app)
# Also keep a master hash of issues by number to detect duplicates & assist
# in explicitly defined release lists.
issues = {}
reversed_entries = list(reversed(entries))
# For the lookahead, so we're not doing this stripping O(n) times.
# TODO: probs just merge the two into e.g. a list of 2-tuples of "actual
# entry obj + rest"?
stripped_entries = [x[0][0] for x in reversed_entries]
# Perform an initial lookahead to prime manager with the 1st major release
handle_first_release_line(stripped_entries, manager)
# Start crawling...
for index, obj in enumerate(reversed_entries):
# Issue object is always found in obj (LI) index 0 (first, often only
# P) and is the 1st item within that (index 0 again).
# Preserve all other contents of 'obj'.
focus = obj[0].pop(0)
rest = obj
log(repr(focus))
# Releases 'eat' the entries in their line's list and get added to the
# final data structure. They also inform new release-line 'buffers'.
# Release lines, once the release obj is removed, should be empty or a
# comma-separated list of issue numbers.
if isinstance(focus, Release):
construct_entry_with_release(
focus, issues, manager, log, releases, rest
)
# After each release is handled, look ahead to see if we're
# entering "last stretch before a major release". If so,
# pre-emptively update the line-manager so upcoming features are
# correctly sorted into that major release by default (re: logic in
# Release.add_to_manager)
handle_upcoming_major_release(
stripped_entries[index + 1:], manager
)
# Entries get copied into release line buckets as follows:
# * Features and support go into 'unreleased_feature' for use in new
# feature releases.
# * Bugfixes go into all release lines (so they can be printed in >1
# bugfix release as appropriate) as well as 'unreleased_bugfix' (so
# they can be displayed prior to release'). Caveats include bugs marked
# 'major' (they go into unreleased_feature instead) or with 'N.N+'
# (meaning they only go into release line buckets for that release and
# up.)
# * Support/feature entries marked as 'backported' go into all
# release lines as well, on the assumption that they were released to
# all active branches.
# * The 'rest' variable (which here is the bug description, vitally
# important!) is preserved by stuffing it into the focus (issue)
# object - it will get unpacked by construct_nodes() later.
else:
construct_entry_without_release(focus, issues, manager, log, rest)
if manager.unstable_prehistory:
releases.append(generate_unreleased_entry(
header="Next release",
line="unreleased",
issues=manager[0]['unreleased'],
manager=manager,
app=app,
))
else:
append_unreleased_entries(app, manager, releases)
reorder_release_entries(releases)
return releases, manager
def construct_nodes(releases):
result = []
# Reverse the list again so the final display is newest on top
for d in reversed(releases):
if not d['entries']:
continue
obj = d['obj']
entries = []
for entry in d['entries']:
# Use nodes.Node.deepcopy to deepcopy the description
# node. If this is not done, multiple references to the same
# object (e.g. a reference object in the description of #649, which
# is then copied into 2 different release lists) will end up in the
# doctree, which makes subsequent parse steps very angry (index()
# errors).
desc = entry['description'].deepcopy()
# Additionally, expand any other issue roles found in the
# description - sometimes we refer to related issues inline. (They
# can't be left as issue() objects at render time since that's
# undefined.)
# Use [:] slicing to avoid mutation during the loops.
for index, node in enumerate(desc[:]):
for subindex, subnode in enumerate(node[:]):
if isinstance(subnode, Issue):
lst = subnode['nodelist']
desc[index][subindex:subindex + 1] = lst
# Rework this entry to insert the now-rendered issue nodes in front
# of the 1st paragraph of the 'description' nodes (which should be
# the preserved LI + nested paragraph-or-more from original
# markup.)
# FIXME: why is there no "prepend a list" method?
for node in reversed(entry['nodelist']):
desc[0].insert(0, node)
entries.append(desc)
# Entry list
list_ = nodes.bullet_list('', *entries)
# Insert list into release nodelist (as it's a section)
obj['nodelist'][0].append(list_)
# Release header
header = nodes.paragraph('', '', *obj['nodelist'])
result.extend(header)
return result
class BulletListVisitor(nodes.NodeVisitor):
def __init__(self, document, app):
nodes.NodeVisitor.__init__(self, document)
self.found_changelog = False
self.app = app
def visit_bullet_list(self, node):
# The first found bullet list (which should be the first one at the top
# level of the document) is the changelog.
if not self.found_changelog:
self.found_changelog = True
# Walk + parse into release mapping
releases, _ = construct_releases(node.children, self.app)
# Construct new set of nodes to replace the old, and we're done
node.replace_self(construct_nodes(releases))
def unknown_visit(self, node):
pass
def generate_changelog(app, doctree):
# Don't scan/mutate documents that don't match the configured document name
# (which by default is ['changelog.rst', ]).
if app.env.docname not in app.config.releases_document_name:
return
# Find the first bullet-list node & replace it with our organized/parsed
# elements.
changelog_visitor = BulletListVisitor(doctree, app)
doctree.walk(changelog_visitor)
def setup(app):
for key, default in (
# Issue base URI setting: releases_issue_uri
# E.g. 'https://github.com/fabric/fabric/issues/'
('issue_uri', None),
# Release-tag base URI setting: releases_release_uri
# E.g. 'https://github.com/fabric/fabric/tree/'
('release_uri', None),
# Convenience Github version of above
('github_path', None),
# Which document to use as the changelog
('document_name', ['changelog']),
# Debug output
('debug', False),
# Whether to enable linear history during 0.x release timeline
# TODO: flip this to True by default in our 2.0 release
('unstable_prehistory', False),
):
app.add_config_value(
name='releases_{}'.format(key), default=default, rebuild='html'
)
# if a string is given for `document_name`, convert it to a list
# done to maintain backwards compatibility
# https://stackoverflow.com/questions/1303243/how-to-find-out-if-a-python-object-is-a-string
PY2 = sys.version_info[0] == 2
if PY2:
string_types = (basestring,)
else:
string_types = (str,)
if isinstance(app.config.releases_document_name, string_types):
app.config.releases_document_name = [app.config.releases_document_name]
# Register intermediate roles
for x in list(ISSUE_TYPES) + ['issue']:
add_role(app, x, issues_role)
add_role(app, 'release', release_role)
# Hook in our changelog transmutation at appropriate step
app.connect('doctree-read', generate_changelog)
# identifies the version of our extension
return {'version': __version__}
def add_role(app, name, role_obj):
# This (introspecting docutils.parser.rst.roles._roles) is the same trick
# Sphinx uses to emit warnings about double-registering; it's a PITA to try
# and configure the app early on so it doesn't emit those warnings, so we
# instead just...don't double-register. Meh.
if name not in roles._roles:
app.add_role(name, role_obj)
|
bitprophet/releases
|
releases/__init__.py
|
append_unreleased_entries
|
python
|
def append_unreleased_entries(app, manager, releases):
for family, lines in six.iteritems(manager):
for type_ in ('bugfix', 'feature'):
bucket = 'unreleased_{}'.format(type_)
if bucket not in lines: # Implies unstable prehistory + 0.x fam
continue
issues = lines[bucket]
fam_prefix = "{}.x ".format(family) if len(manager) > 1 else ""
header = "Next {}{} release".format(fam_prefix, type_)
line = "unreleased_{}.x_{}".format(family, type_)
releases.append(
generate_unreleased_entry(header, line, issues, manager, app)
)
|
Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L202-L221
|
[
"def generate_unreleased_entry(header, line, issues, manager, app):\n log = partial(_log, config=app.config)\n nodelist = [release_nodes(\n header,\n # TODO: should link to master for newest family and...what\n # exactly, for the others? Expectation isn't necessarily to\n # have a branch per family? Or is there? Maybe there must be..\n 'master',\n None,\n app.config\n )]\n log(\"Creating {!r} faux-release with {!r}\".format(line, issues))\n return {\n 'obj': Release(number=line, date=None, nodelist=nodelist),\n 'entries': issues,\n }\n"
] |
import itertools
import re
import sys
from functools import partial
from docutils import nodes, utils
from docutils.parsers.rst import roles
import six
from .models import Issue, ISSUE_TYPES, Release, Version, Spec
from .line_manager import LineManager
from ._version import __version__
def _log(txt, config):
"""
Log debug output if debug setting is on.
Intended to be partial'd w/ config at top of functions. Meh.
"""
if config.releases_debug:
sys.stderr.write(str(txt) + "\n")
sys.stderr.flush()
def issue_nodelist(name, identifier=None):
which = '[<span style="color: #%s;">%s</span>]' % (
ISSUE_TYPES[name], name.capitalize()
)
signifier = [nodes.raw(text=which, format='html')]
id_nodelist = [nodes.inline(text=" "), identifier] if identifier else []
trail = [] if identifier else [nodes.inline(text=" ")]
return signifier + id_nodelist + [nodes.inline(text=":")] + trail
release_line_re = re.compile(r'^(\d+\.\d+)\+$') # e.g. '1.2+'
def scan_for_spec(keyword):
"""
Attempt to return some sort of Spec from given keyword value.
Returns None if one could not be derived.
"""
# Both 'spec' formats are wrapped in parens, discard
keyword = keyword.lstrip('(').rstrip(')')
# First, test for intermediate '1.2+' style
matches = release_line_re.findall(keyword)
if matches:
return Spec(">={}".format(matches[0]))
# Failing that, see if Spec can make sense of it
try:
return Spec(keyword)
# I've only ever seen Spec fail with ValueError.
except ValueError:
return None
def issues_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Use: :issue|bug|feature|support:`ticket_number`
When invoked as :issue:, turns into just a "#NN" hyperlink to
`releases_issue_uri`.
When invoked otherwise, turns into "[Type] <#NN hyperlink>: ".
Spaces present in the "ticket number" are used as fields for keywords
(major, backported) and/or specs (e.g. '>=1.0'). This data is removed &
used when constructing the object.
May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink.
"""
parts = utils.unescape(text).split()
issue_no = parts.pop(0)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
if issue_no not in ('-', '0'):
ref = None
if config.releases_issue_uri:
# TODO: deal with % vs .format()
ref = config.releases_issue_uri % issue_no
elif config.releases_github_path:
ref = "https://github.com/{}/issues/{}".format(
config.releases_github_path, issue_no)
# Only generate a reference/link if we were able to make a URI
if ref:
identifier = nodes.reference(
rawtext, '#' + issue_no, refuri=ref, **options
)
# Otherwise, just make it regular text
else:
identifier = nodes.raw(
rawtext=rawtext, text='#' + issue_no, format='html',
**options
)
else:
identifier = None
issue_no = None # So it doesn't gum up dupe detection later
# Additional 'new-style changelog' stuff
if name in ISSUE_TYPES:
nodelist = issue_nodelist(name, identifier)
spec = None
keyword = None
# TODO: sanity checks re: e.g. >2 parts, >1 instance of keywords, >1
# instance of specs, etc.
for part in parts:
maybe_spec = scan_for_spec(part)
if maybe_spec:
spec = maybe_spec
else:
if part in ('backported', 'major'):
keyword = part
else:
err = "Gave unknown keyword {!r} for issue no. {}"
raise ValueError(err.format(keyword, issue_no))
# Create temporary node w/ data & final nodes to publish
node = Issue(
number=issue_no,
type_=name,
nodelist=nodelist,
backported=(keyword == 'backported'),
major=(keyword == 'major'),
spec=spec,
)
return [node], []
# Return old style info for 'issue' for older changelog entries
else:
return [identifier], []
def release_nodes(text, slug, date, config):
# Doesn't seem possible to do this "cleanly" (i.e. just say "make me a
# title and give it these HTML attributes during render time) so...fuckit.
# We were already doing fully raw elements elsewhere anyway. And who cares
# about a PDF of a changelog? :x
uri = None
if config.releases_release_uri:
# TODO: % vs .format()
uri = config.releases_release_uri % slug
elif config.releases_github_path:
uri = "https://github.com/{}/tree/{}".format(
config.releases_github_path, slug)
# Only construct link tag if user actually configured release URIs somehow
if uri:
link = '<a class="reference external" href="{}">{}</a>'.format(
uri, text,
)
else:
link = text
datespan = ''
if date:
datespan = ' <span style="font-size: 75%;">{}</span>'.format(date)
header = '<h2 style="margin-bottom: 0.3em;">{}{}</h2>'.format(
link, datespan)
return nodes.section('',
nodes.raw(rawtext='', text=header, format='html'),
ids=[text]
)
year_arg_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
def release_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Invoked as :release:`N.N.N <YYYY-MM-DD>`.
Turns into useful release header + link to GH tree for the tag.
"""
# Make sure year has been specified
match = year_arg_re.match(text)
if not match:
msg = inliner.reporter.error("Must specify release date!")
return [inliner.problematic(rawtext, rawtext, msg)], [msg]
number, date = match.group(1), match.group(2)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
nodelist = [release_nodes(number, number, date, config)]
# Return intermediate node
node = Release(number=number, date=date, nodelist=nodelist)
return [node], []
def generate_unreleased_entry(header, line, issues, manager, app):
log = partial(_log, config=app.config)
nodelist = [release_nodes(
header,
# TODO: should link to master for newest family and...what
# exactly, for the others? Expectation isn't necessarily to
# have a branch per family? Or is there? Maybe there must be..
'master',
None,
app.config
)]
log("Creating {!r} faux-release with {!r}".format(line, issues))
return {
'obj': Release(number=line, date=None, nodelist=nodelist),
'entries': issues,
}
def reorder_release_entries(releases):
"""
Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc.
"""
order = {'feature': 0, 'bug': 1, 'support': 2}
for release in releases:
entries = release['entries'][:]
release['entries'] = sorted(entries, key=lambda x: order[x.type])
def construct_entry_with_release(focus, issues, manager, log, releases, rest):
"""
Releases 'eat' the entries in their line's list and get added to the
final data structure. They also inform new release-line 'buffers'.
Release lines, once the release obj is removed, should be empty or a
comma-separated list of issue numbers.
"""
log("release for line %r" % focus.minor)
# Check for explicitly listed issues first
explicit = None
if rest[0].children:
explicit = [x.strip() for x in rest[0][0].split(',')]
# Do those by themselves since they override all other logic
if explicit:
log("Explicit issues requested: %r" % (explicit,))
# First scan global issue dict, dying if not found
missing = [i for i in explicit if i not in issues]
if missing:
raise ValueError(
"Couldn't find issue(s) #{} in the changelog!".format(
', '.join(missing)))
# Obtain the explicitly named issues from global list
entries = []
for i in explicit:
for flattened_issue_item in itertools.chain(issues[i]):
entries.append(flattened_issue_item)
# Create release
log("entries in this release: %r" % (entries,))
releases.append({
'obj': focus,
'entries': entries,
})
# Introspect these entries to determine which buckets they should get
# removed from (it's not "all of them"!)
for obj in entries:
if obj.type == 'bug':
# Major bugfix: remove from unreleased_feature
if obj.major:
log("Removing #%s from unreleased" % obj.number)
# TODO: consider making a LineManager method somehow
manager[focus.family]['unreleased_feature'].remove(obj)
# Regular bugfix: remove from bucket for this release's
# line + unreleased_bugfix
else:
if obj in manager[focus.family]['unreleased_bugfix']:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_bugfix'].remove(obj)
if obj in manager[focus.family][focus.minor]:
log("Removing #%s from %s" % (obj.number, focus.minor))
manager[focus.family][focus.minor].remove(obj)
# Regular feature/support: remove from unreleased_feature
# Backported feature/support: remove from bucket for this
# release's line (if applicable) + unreleased_feature
else:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_feature'].remove(obj)
if obj in manager[focus.family].get(focus.minor, []):
manager[focus.family][focus.minor].remove(obj)
# Implicit behavior otherwise
else:
# Unstable prehistory -> just dump 'unreleased' and continue
if manager.unstable_prehistory:
# TODO: need to continue making LineManager actually OO, i.e. do
# away with the subdicts + keys, move to sub-objects with methods
# answering questions like "what should I give you for a release"
# or whatever
log("in unstable prehistory, dumping 'unreleased'")
releases.append({
'obj': focus,
# NOTE: explicitly dumping 0, not focus.family, since this
# might be the last pre-historical release and thus not 0.x
'entries': manager[0]['unreleased'][:],
})
manager[0]['unreleased'] = []
# If this isn't a 0.x release, it signals end of prehistory, make a
# new release bucket (as is also done below in regular behavior).
# Also acts like a sentinel that prehistory is over.
if focus.family != 0:
manager[focus.family][focus.minor] = []
# Regular behavior from here
else:
# New release line/branch detected. Create it & dump unreleased
# features.
if focus.minor not in manager[focus.family]:
log("not seen prior, making feature release & bugfix bucket")
manager[focus.family][focus.minor] = []
# TODO: this used to explicitly say "go over everything in
# unreleased_feature and dump if it's feature, support or major
# bug". But what the hell else would BE in unreleased_feature?
# Why not just dump the whole thing??
#
# Dump only the items in the bucket whose family this release
# object belongs to, i.e. 1.5.0 should only nab the 1.0
# family's unreleased feature items.
releases.append({
'obj': focus,
'entries': manager[focus.family]['unreleased_feature'][:],
})
manager[focus.family]['unreleased_feature'] = []
# Existing line -> empty out its bucket into new release.
# Skip 'major' bugs as those "belong" to the next release (and will
# also be in 'unreleased_feature' - so safe to nuke the entire
# line)
else:
log("pre-existing, making bugfix release")
# TODO: as in other branch, I don't get why this wasn't just
# dumping the whole thing - why would major bugs be in the
# regular bugfix buckets?
entries = manager[focus.family][focus.minor][:]
releases.append({'obj': focus, 'entries': entries})
manager[focus.family][focus.minor] = []
# Clean out the items we just released from
# 'unreleased_bugfix'. (Can't nuke it because there might
# be some unreleased bugs for other release lines.)
for x in entries:
if x in manager[focus.family]['unreleased_bugfix']:
manager[focus.family]['unreleased_bugfix'].remove(x)
def construct_entry_without_release(focus, issues, manager, log, rest):
# Handle rare-but-valid non-issue-attached line items, which are
# always bugs. (They are their own description.)
if not isinstance(focus, Issue):
# First, sanity check for potential mistakes resulting in an issue node
# being buried within something else.
buried = focus.traverse(Issue)
if buried:
msg = """
Found issue node ({!r}) buried inside another node:
{}
Please double-check your ReST syntax! There is probably text in the above
output that will show you which part of your changelog to look at.
For example, indentation problems can accidentally generate nested definition
lists.
"""
raise ValueError(msg.format(buried[0], str(buried[0].parent)))
# OK, it looks legit - make it a bug.
log("Found line item w/ no real issue object, creating bug")
nodelist = issue_nodelist('bug')
# Skip nodelist entirely if we're in unstable prehistory -
# classification doesn't matter there.
if manager.unstable_prehistory:
nodelist = []
# Undo the 'pop' from outer scope. TODO: rework things so we don't have
# to do this dumb shit uggggh
rest[0].insert(0, focus)
focus = Issue(
type_='bug',
nodelist=nodelist,
description=rest,
)
else:
focus.attributes['description'] = rest
# Add to global list (for use by explicit releases) or die trying
issues[focus.number] = issues.get(focus.number, []) + [focus]
# Add to per-release bugfix lines and/or unreleased bug/feature buckets, as
# necessary.
# TODO: suspect all of add_to_manager can now live in the manager; most of
# Release's methods should probably go that way
if manager.unstable_prehistory:
log("Unstable prehistory -> adding to 0.x unreleased bucket")
manager[0]['unreleased'].append(focus)
else:
log("Adding to release line manager")
focus.add_to_manager(manager)
def handle_upcoming_major_release(entries, manager):
# Short-circuit if the future holds nothing for us
if not entries:
return
# Short-circuit if we're in the middle of a block of releases, only the
# last release before a bunch of issues, should be taking any action.
if isinstance(entries[0], Release):
return
# Iterate through entries til we find the next Release or set of Releases
next_releases = []
for index, obj in enumerate(entries):
if isinstance(obj, Release):
next_releases.append(obj)
# Non-empty next_releases + encountered a non-release = done w/ release
# block.
elif next_releases:
break
# Examine result: is a major release present? If so, add its major number
# to the line manager!
for obj in next_releases:
# TODO: update when Release gets tied closer w/ Version
version = Version(obj.number)
if version.minor == 0 and version.patch == 0:
manager.add_family(obj.family)
def handle_first_release_line(entries, manager):
"""
Set up initial line-manager entry for first encountered release line.
To be called at start of overall process; afterwards, subsequent major
lines are generated by `handle_upcoming_major_release`.
"""
# It's remotely possible the changelog is totally empty...
if not entries:
return
# Obtain (short-circuiting) first Release obj.
first_release = None
for obj in entries:
if isinstance(obj, Release):
first_release = obj
break
# It's also possible it's non-empty but has no releases yet.
if first_release:
manager.add_family(obj.family)
# If God did not exist, man would be forced to invent him.
else:
manager.add_family(0)
def construct_releases(entries, app):
log = partial(_log, config=app.config)
# Walk from back to front, consuming entries & copying them into
# per-release buckets as releases are encountered. Store releases in order.
releases = []
# Release lines, to be organized by major releases, then by major+minor,
# alongside per-major-release 'unreleased' bugfix/feature buckets.
# NOTE: With exception of unstable_prehistory=True, which triggers use of a
# separate, undifferentiated 'unreleased' bucket (albeit still within the
# '0' major line family).
manager = LineManager(app)
# Also keep a master hash of issues by number to detect duplicates & assist
# in explicitly defined release lists.
issues = {}
reversed_entries = list(reversed(entries))
# For the lookahead, so we're not doing this stripping O(n) times.
# TODO: probs just merge the two into e.g. a list of 2-tuples of "actual
# entry obj + rest"?
stripped_entries = [x[0][0] for x in reversed_entries]
# Perform an initial lookahead to prime manager with the 1st major release
handle_first_release_line(stripped_entries, manager)
# Start crawling...
for index, obj in enumerate(reversed_entries):
# Issue object is always found in obj (LI) index 0 (first, often only
# P) and is the 1st item within that (index 0 again).
# Preserve all other contents of 'obj'.
focus = obj[0].pop(0)
rest = obj
log(repr(focus))
# Releases 'eat' the entries in their line's list and get added to the
# final data structure. They also inform new release-line 'buffers'.
# Release lines, once the release obj is removed, should be empty or a
# comma-separated list of issue numbers.
if isinstance(focus, Release):
construct_entry_with_release(
focus, issues, manager, log, releases, rest
)
# After each release is handled, look ahead to see if we're
# entering "last stretch before a major release". If so,
# pre-emptively update the line-manager so upcoming features are
# correctly sorted into that major release by default (re: logic in
# Release.add_to_manager)
handle_upcoming_major_release(
stripped_entries[index + 1:], manager
)
# Entries get copied into release line buckets as follows:
# * Features and support go into 'unreleased_feature' for use in new
# feature releases.
# * Bugfixes go into all release lines (so they can be printed in >1
# bugfix release as appropriate) as well as 'unreleased_bugfix' (so
# they can be displayed prior to release'). Caveats include bugs marked
# 'major' (they go into unreleased_feature instead) or with 'N.N+'
# (meaning they only go into release line buckets for that release and
# up.)
# * Support/feature entries marked as 'backported' go into all
# release lines as well, on the assumption that they were released to
# all active branches.
# * The 'rest' variable (which here is the bug description, vitally
# important!) is preserved by stuffing it into the focus (issue)
# object - it will get unpacked by construct_nodes() later.
else:
construct_entry_without_release(focus, issues, manager, log, rest)
if manager.unstable_prehistory:
releases.append(generate_unreleased_entry(
header="Next release",
line="unreleased",
issues=manager[0]['unreleased'],
manager=manager,
app=app,
))
else:
append_unreleased_entries(app, manager, releases)
reorder_release_entries(releases)
return releases, manager
def construct_nodes(releases):
result = []
# Reverse the list again so the final display is newest on top
for d in reversed(releases):
if not d['entries']:
continue
obj = d['obj']
entries = []
for entry in d['entries']:
# Use nodes.Node.deepcopy to deepcopy the description
# node. If this is not done, multiple references to the same
# object (e.g. a reference object in the description of #649, which
# is then copied into 2 different release lists) will end up in the
# doctree, which makes subsequent parse steps very angry (index()
# errors).
desc = entry['description'].deepcopy()
# Additionally, expand any other issue roles found in the
# description - sometimes we refer to related issues inline. (They
# can't be left as issue() objects at render time since that's
# undefined.)
# Use [:] slicing to avoid mutation during the loops.
for index, node in enumerate(desc[:]):
for subindex, subnode in enumerate(node[:]):
if isinstance(subnode, Issue):
lst = subnode['nodelist']
desc[index][subindex:subindex + 1] = lst
# Rework this entry to insert the now-rendered issue nodes in front
# of the 1st paragraph of the 'description' nodes (which should be
# the preserved LI + nested paragraph-or-more from original
# markup.)
# FIXME: why is there no "prepend a list" method?
for node in reversed(entry['nodelist']):
desc[0].insert(0, node)
entries.append(desc)
# Entry list
list_ = nodes.bullet_list('', *entries)
# Insert list into release nodelist (as it's a section)
obj['nodelist'][0].append(list_)
# Release header
header = nodes.paragraph('', '', *obj['nodelist'])
result.extend(header)
return result
class BulletListVisitor(nodes.NodeVisitor):
def __init__(self, document, app):
nodes.NodeVisitor.__init__(self, document)
self.found_changelog = False
self.app = app
def visit_bullet_list(self, node):
# The first found bullet list (which should be the first one at the top
# level of the document) is the changelog.
if not self.found_changelog:
self.found_changelog = True
# Walk + parse into release mapping
releases, _ = construct_releases(node.children, self.app)
# Construct new set of nodes to replace the old, and we're done
node.replace_self(construct_nodes(releases))
def unknown_visit(self, node):
pass
def generate_changelog(app, doctree):
# Don't scan/mutate documents that don't match the configured document name
# (which by default is ['changelog.rst', ]).
if app.env.docname not in app.config.releases_document_name:
return
# Find the first bullet-list node & replace it with our organized/parsed
# elements.
changelog_visitor = BulletListVisitor(doctree, app)
doctree.walk(changelog_visitor)
def setup(app):
for key, default in (
# Issue base URI setting: releases_issue_uri
# E.g. 'https://github.com/fabric/fabric/issues/'
('issue_uri', None),
# Release-tag base URI setting: releases_release_uri
# E.g. 'https://github.com/fabric/fabric/tree/'
('release_uri', None),
# Convenience Github version of above
('github_path', None),
# Which document to use as the changelog
('document_name', ['changelog']),
# Debug output
('debug', False),
# Whether to enable linear history during 0.x release timeline
# TODO: flip this to True by default in our 2.0 release
('unstable_prehistory', False),
):
app.add_config_value(
name='releases_{}'.format(key), default=default, rebuild='html'
)
# if a string is given for `document_name`, convert it to a list
# done to maintain backwards compatibility
# https://stackoverflow.com/questions/1303243/how-to-find-out-if-a-python-object-is-a-string
PY2 = sys.version_info[0] == 2
if PY2:
string_types = (basestring,)
else:
string_types = (str,)
if isinstance(app.config.releases_document_name, string_types):
app.config.releases_document_name = [app.config.releases_document_name]
# Register intermediate roles
for x in list(ISSUE_TYPES) + ['issue']:
add_role(app, x, issues_role)
add_role(app, 'release', release_role)
# Hook in our changelog transmutation at appropriate step
app.connect('doctree-read', generate_changelog)
# identifies the version of our extension
return {'version': __version__}
def add_role(app, name, role_obj):
# This (introspecting docutils.parser.rst.roles._roles) is the same trick
# Sphinx uses to emit warnings about double-registering; it's a PITA to try
# and configure the app early on so it doesn't emit those warnings, so we
# instead just...don't double-register. Meh.
if name not in roles._roles:
app.add_role(name, role_obj)
|
bitprophet/releases
|
releases/__init__.py
|
reorder_release_entries
|
python
|
def reorder_release_entries(releases):
order = {'feature': 0, 'bug': 1, 'support': 2}
for release in releases:
entries = release['entries'][:]
release['entries'] = sorted(entries, key=lambda x: order[x.type])
|
Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L224-L231
| null |
import itertools
import re
import sys
from functools import partial
from docutils import nodes, utils
from docutils.parsers.rst import roles
import six
from .models import Issue, ISSUE_TYPES, Release, Version, Spec
from .line_manager import LineManager
from ._version import __version__
def _log(txt, config):
"""
Log debug output if debug setting is on.
Intended to be partial'd w/ config at top of functions. Meh.
"""
if config.releases_debug:
sys.stderr.write(str(txt) + "\n")
sys.stderr.flush()
def issue_nodelist(name, identifier=None):
which = '[<span style="color: #%s;">%s</span>]' % (
ISSUE_TYPES[name], name.capitalize()
)
signifier = [nodes.raw(text=which, format='html')]
id_nodelist = [nodes.inline(text=" "), identifier] if identifier else []
trail = [] if identifier else [nodes.inline(text=" ")]
return signifier + id_nodelist + [nodes.inline(text=":")] + trail
release_line_re = re.compile(r'^(\d+\.\d+)\+$') # e.g. '1.2+'
def scan_for_spec(keyword):
"""
Attempt to return some sort of Spec from given keyword value.
Returns None if one could not be derived.
"""
# Both 'spec' formats are wrapped in parens, discard
keyword = keyword.lstrip('(').rstrip(')')
# First, test for intermediate '1.2+' style
matches = release_line_re.findall(keyword)
if matches:
return Spec(">={}".format(matches[0]))
# Failing that, see if Spec can make sense of it
try:
return Spec(keyword)
# I've only ever seen Spec fail with ValueError.
except ValueError:
return None
def issues_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Use: :issue|bug|feature|support:`ticket_number`
When invoked as :issue:, turns into just a "#NN" hyperlink to
`releases_issue_uri`.
When invoked otherwise, turns into "[Type] <#NN hyperlink>: ".
Spaces present in the "ticket number" are used as fields for keywords
(major, backported) and/or specs (e.g. '>=1.0'). This data is removed &
used when constructing the object.
May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink.
"""
parts = utils.unescape(text).split()
issue_no = parts.pop(0)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
if issue_no not in ('-', '0'):
ref = None
if config.releases_issue_uri:
# TODO: deal with % vs .format()
ref = config.releases_issue_uri % issue_no
elif config.releases_github_path:
ref = "https://github.com/{}/issues/{}".format(
config.releases_github_path, issue_no)
# Only generate a reference/link if we were able to make a URI
if ref:
identifier = nodes.reference(
rawtext, '#' + issue_no, refuri=ref, **options
)
# Otherwise, just make it regular text
else:
identifier = nodes.raw(
rawtext=rawtext, text='#' + issue_no, format='html',
**options
)
else:
identifier = None
issue_no = None # So it doesn't gum up dupe detection later
# Additional 'new-style changelog' stuff
if name in ISSUE_TYPES:
nodelist = issue_nodelist(name, identifier)
spec = None
keyword = None
# TODO: sanity checks re: e.g. >2 parts, >1 instance of keywords, >1
# instance of specs, etc.
for part in parts:
maybe_spec = scan_for_spec(part)
if maybe_spec:
spec = maybe_spec
else:
if part in ('backported', 'major'):
keyword = part
else:
err = "Gave unknown keyword {!r} for issue no. {}"
raise ValueError(err.format(keyword, issue_no))
# Create temporary node w/ data & final nodes to publish
node = Issue(
number=issue_no,
type_=name,
nodelist=nodelist,
backported=(keyword == 'backported'),
major=(keyword == 'major'),
spec=spec,
)
return [node], []
# Return old style info for 'issue' for older changelog entries
else:
return [identifier], []
def release_nodes(text, slug, date, config):
# Doesn't seem possible to do this "cleanly" (i.e. just say "make me a
# title and give it these HTML attributes during render time) so...fuckit.
# We were already doing fully raw elements elsewhere anyway. And who cares
# about a PDF of a changelog? :x
uri = None
if config.releases_release_uri:
# TODO: % vs .format()
uri = config.releases_release_uri % slug
elif config.releases_github_path:
uri = "https://github.com/{}/tree/{}".format(
config.releases_github_path, slug)
# Only construct link tag if user actually configured release URIs somehow
if uri:
link = '<a class="reference external" href="{}">{}</a>'.format(
uri, text,
)
else:
link = text
datespan = ''
if date:
datespan = ' <span style="font-size: 75%;">{}</span>'.format(date)
header = '<h2 style="margin-bottom: 0.3em;">{}{}</h2>'.format(
link, datespan)
return nodes.section('',
nodes.raw(rawtext='', text=header, format='html'),
ids=[text]
)
year_arg_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
def release_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Invoked as :release:`N.N.N <YYYY-MM-DD>`.
Turns into useful release header + link to GH tree for the tag.
"""
# Make sure year has been specified
match = year_arg_re.match(text)
if not match:
msg = inliner.reporter.error("Must specify release date!")
return [inliner.problematic(rawtext, rawtext, msg)], [msg]
number, date = match.group(1), match.group(2)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
nodelist = [release_nodes(number, number, date, config)]
# Return intermediate node
node = Release(number=number, date=date, nodelist=nodelist)
return [node], []
def generate_unreleased_entry(header, line, issues, manager, app):
log = partial(_log, config=app.config)
nodelist = [release_nodes(
header,
# TODO: should link to master for newest family and...what
# exactly, for the others? Expectation isn't necessarily to
# have a branch per family? Or is there? Maybe there must be..
'master',
None,
app.config
)]
log("Creating {!r} faux-release with {!r}".format(line, issues))
return {
'obj': Release(number=line, date=None, nodelist=nodelist),
'entries': issues,
}
def append_unreleased_entries(app, manager, releases):
"""
Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored.
"""
for family, lines in six.iteritems(manager):
for type_ in ('bugfix', 'feature'):
bucket = 'unreleased_{}'.format(type_)
if bucket not in lines: # Implies unstable prehistory + 0.x fam
continue
issues = lines[bucket]
fam_prefix = "{}.x ".format(family) if len(manager) > 1 else ""
header = "Next {}{} release".format(fam_prefix, type_)
line = "unreleased_{}.x_{}".format(family, type_)
releases.append(
generate_unreleased_entry(header, line, issues, manager, app)
)
def construct_entry_with_release(focus, issues, manager, log, releases, rest):
"""
Releases 'eat' the entries in their line's list and get added to the
final data structure. They also inform new release-line 'buffers'.
Release lines, once the release obj is removed, should be empty or a
comma-separated list of issue numbers.
"""
log("release for line %r" % focus.minor)
# Check for explicitly listed issues first
explicit = None
if rest[0].children:
explicit = [x.strip() for x in rest[0][0].split(',')]
# Do those by themselves since they override all other logic
if explicit:
log("Explicit issues requested: %r" % (explicit,))
# First scan global issue dict, dying if not found
missing = [i for i in explicit if i not in issues]
if missing:
raise ValueError(
"Couldn't find issue(s) #{} in the changelog!".format(
', '.join(missing)))
# Obtain the explicitly named issues from global list
entries = []
for i in explicit:
for flattened_issue_item in itertools.chain(issues[i]):
entries.append(flattened_issue_item)
# Create release
log("entries in this release: %r" % (entries,))
releases.append({
'obj': focus,
'entries': entries,
})
# Introspect these entries to determine which buckets they should get
# removed from (it's not "all of them"!)
for obj in entries:
if obj.type == 'bug':
# Major bugfix: remove from unreleased_feature
if obj.major:
log("Removing #%s from unreleased" % obj.number)
# TODO: consider making a LineManager method somehow
manager[focus.family]['unreleased_feature'].remove(obj)
# Regular bugfix: remove from bucket for this release's
# line + unreleased_bugfix
else:
if obj in manager[focus.family]['unreleased_bugfix']:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_bugfix'].remove(obj)
if obj in manager[focus.family][focus.minor]:
log("Removing #%s from %s" % (obj.number, focus.minor))
manager[focus.family][focus.minor].remove(obj)
# Regular feature/support: remove from unreleased_feature
# Backported feature/support: remove from bucket for this
# release's line (if applicable) + unreleased_feature
else:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_feature'].remove(obj)
if obj in manager[focus.family].get(focus.minor, []):
manager[focus.family][focus.minor].remove(obj)
# Implicit behavior otherwise
else:
# Unstable prehistory -> just dump 'unreleased' and continue
if manager.unstable_prehistory:
# TODO: need to continue making LineManager actually OO, i.e. do
# away with the subdicts + keys, move to sub-objects with methods
# answering questions like "what should I give you for a release"
# or whatever
log("in unstable prehistory, dumping 'unreleased'")
releases.append({
'obj': focus,
# NOTE: explicitly dumping 0, not focus.family, since this
# might be the last pre-historical release and thus not 0.x
'entries': manager[0]['unreleased'][:],
})
manager[0]['unreleased'] = []
# If this isn't a 0.x release, it signals end of prehistory, make a
# new release bucket (as is also done below in regular behavior).
# Also acts like a sentinel that prehistory is over.
if focus.family != 0:
manager[focus.family][focus.minor] = []
# Regular behavior from here
else:
# New release line/branch detected. Create it & dump unreleased
# features.
if focus.minor not in manager[focus.family]:
log("not seen prior, making feature release & bugfix bucket")
manager[focus.family][focus.minor] = []
# TODO: this used to explicitly say "go over everything in
# unreleased_feature and dump if it's feature, support or major
# bug". But what the hell else would BE in unreleased_feature?
# Why not just dump the whole thing??
#
# Dump only the items in the bucket whose family this release
# object belongs to, i.e. 1.5.0 should only nab the 1.0
# family's unreleased feature items.
releases.append({
'obj': focus,
'entries': manager[focus.family]['unreleased_feature'][:],
})
manager[focus.family]['unreleased_feature'] = []
# Existing line -> empty out its bucket into new release.
# Skip 'major' bugs as those "belong" to the next release (and will
# also be in 'unreleased_feature' - so safe to nuke the entire
# line)
else:
log("pre-existing, making bugfix release")
# TODO: as in other branch, I don't get why this wasn't just
# dumping the whole thing - why would major bugs be in the
# regular bugfix buckets?
entries = manager[focus.family][focus.minor][:]
releases.append({'obj': focus, 'entries': entries})
manager[focus.family][focus.minor] = []
# Clean out the items we just released from
# 'unreleased_bugfix'. (Can't nuke it because there might
# be some unreleased bugs for other release lines.)
for x in entries:
if x in manager[focus.family]['unreleased_bugfix']:
manager[focus.family]['unreleased_bugfix'].remove(x)
def construct_entry_without_release(focus, issues, manager, log, rest):
# Handle rare-but-valid non-issue-attached line items, which are
# always bugs. (They are their own description.)
if not isinstance(focus, Issue):
# First, sanity check for potential mistakes resulting in an issue node
# being buried within something else.
buried = focus.traverse(Issue)
if buried:
msg = """
Found issue node ({!r}) buried inside another node:
{}
Please double-check your ReST syntax! There is probably text in the above
output that will show you which part of your changelog to look at.
For example, indentation problems can accidentally generate nested definition
lists.
"""
raise ValueError(msg.format(buried[0], str(buried[0].parent)))
# OK, it looks legit - make it a bug.
log("Found line item w/ no real issue object, creating bug")
nodelist = issue_nodelist('bug')
# Skip nodelist entirely if we're in unstable prehistory -
# classification doesn't matter there.
if manager.unstable_prehistory:
nodelist = []
# Undo the 'pop' from outer scope. TODO: rework things so we don't have
# to do this dumb shit uggggh
rest[0].insert(0, focus)
focus = Issue(
type_='bug',
nodelist=nodelist,
description=rest,
)
else:
focus.attributes['description'] = rest
# Add to global list (for use by explicit releases) or die trying
issues[focus.number] = issues.get(focus.number, []) + [focus]
# Add to per-release bugfix lines and/or unreleased bug/feature buckets, as
# necessary.
# TODO: suspect all of add_to_manager can now live in the manager; most of
# Release's methods should probably go that way
if manager.unstable_prehistory:
log("Unstable prehistory -> adding to 0.x unreleased bucket")
manager[0]['unreleased'].append(focus)
else:
log("Adding to release line manager")
focus.add_to_manager(manager)
def handle_upcoming_major_release(entries, manager):
# Short-circuit if the future holds nothing for us
if not entries:
return
# Short-circuit if we're in the middle of a block of releases, only the
# last release before a bunch of issues, should be taking any action.
if isinstance(entries[0], Release):
return
# Iterate through entries til we find the next Release or set of Releases
next_releases = []
for index, obj in enumerate(entries):
if isinstance(obj, Release):
next_releases.append(obj)
# Non-empty next_releases + encountered a non-release = done w/ release
# block.
elif next_releases:
break
# Examine result: is a major release present? If so, add its major number
# to the line manager!
for obj in next_releases:
# TODO: update when Release gets tied closer w/ Version
version = Version(obj.number)
if version.minor == 0 and version.patch == 0:
manager.add_family(obj.family)
def handle_first_release_line(entries, manager):
"""
Set up initial line-manager entry for first encountered release line.
To be called at start of overall process; afterwards, subsequent major
lines are generated by `handle_upcoming_major_release`.
"""
# It's remotely possible the changelog is totally empty...
if not entries:
return
# Obtain (short-circuiting) first Release obj.
first_release = None
for obj in entries:
if isinstance(obj, Release):
first_release = obj
break
# It's also possible it's non-empty but has no releases yet.
if first_release:
manager.add_family(obj.family)
# If God did not exist, man would be forced to invent him.
else:
manager.add_family(0)
def construct_releases(entries, app):
log = partial(_log, config=app.config)
# Walk from back to front, consuming entries & copying them into
# per-release buckets as releases are encountered. Store releases in order.
releases = []
# Release lines, to be organized by major releases, then by major+minor,
# alongside per-major-release 'unreleased' bugfix/feature buckets.
# NOTE: With exception of unstable_prehistory=True, which triggers use of a
# separate, undifferentiated 'unreleased' bucket (albeit still within the
# '0' major line family).
manager = LineManager(app)
# Also keep a master hash of issues by number to detect duplicates & assist
# in explicitly defined release lists.
issues = {}
reversed_entries = list(reversed(entries))
# For the lookahead, so we're not doing this stripping O(n) times.
# TODO: probs just merge the two into e.g. a list of 2-tuples of "actual
# entry obj + rest"?
stripped_entries = [x[0][0] for x in reversed_entries]
# Perform an initial lookahead to prime manager with the 1st major release
handle_first_release_line(stripped_entries, manager)
# Start crawling...
for index, obj in enumerate(reversed_entries):
# Issue object is always found in obj (LI) index 0 (first, often only
# P) and is the 1st item within that (index 0 again).
# Preserve all other contents of 'obj'.
focus = obj[0].pop(0)
rest = obj
log(repr(focus))
# Releases 'eat' the entries in their line's list and get added to the
# final data structure. They also inform new release-line 'buffers'.
# Release lines, once the release obj is removed, should be empty or a
# comma-separated list of issue numbers.
if isinstance(focus, Release):
construct_entry_with_release(
focus, issues, manager, log, releases, rest
)
# After each release is handled, look ahead to see if we're
# entering "last stretch before a major release". If so,
# pre-emptively update the line-manager so upcoming features are
# correctly sorted into that major release by default (re: logic in
# Release.add_to_manager)
handle_upcoming_major_release(
stripped_entries[index + 1:], manager
)
# Entries get copied into release line buckets as follows:
# * Features and support go into 'unreleased_feature' for use in new
# feature releases.
# * Bugfixes go into all release lines (so they can be printed in >1
# bugfix release as appropriate) as well as 'unreleased_bugfix' (so
# they can be displayed prior to release'). Caveats include bugs marked
# 'major' (they go into unreleased_feature instead) or with 'N.N+'
# (meaning they only go into release line buckets for that release and
# up.)
# * Support/feature entries marked as 'backported' go into all
# release lines as well, on the assumption that they were released to
# all active branches.
# * The 'rest' variable (which here is the bug description, vitally
# important!) is preserved by stuffing it into the focus (issue)
# object - it will get unpacked by construct_nodes() later.
else:
construct_entry_without_release(focus, issues, manager, log, rest)
if manager.unstable_prehistory:
releases.append(generate_unreleased_entry(
header="Next release",
line="unreleased",
issues=manager[0]['unreleased'],
manager=manager,
app=app,
))
else:
append_unreleased_entries(app, manager, releases)
reorder_release_entries(releases)
return releases, manager
def construct_nodes(releases):
result = []
# Reverse the list again so the final display is newest on top
for d in reversed(releases):
if not d['entries']:
continue
obj = d['obj']
entries = []
for entry in d['entries']:
# Use nodes.Node.deepcopy to deepcopy the description
# node. If this is not done, multiple references to the same
# object (e.g. a reference object in the description of #649, which
# is then copied into 2 different release lists) will end up in the
# doctree, which makes subsequent parse steps very angry (index()
# errors).
desc = entry['description'].deepcopy()
# Additionally, expand any other issue roles found in the
# description - sometimes we refer to related issues inline. (They
# can't be left as issue() objects at render time since that's
# undefined.)
# Use [:] slicing to avoid mutation during the loops.
for index, node in enumerate(desc[:]):
for subindex, subnode in enumerate(node[:]):
if isinstance(subnode, Issue):
lst = subnode['nodelist']
desc[index][subindex:subindex + 1] = lst
# Rework this entry to insert the now-rendered issue nodes in front
# of the 1st paragraph of the 'description' nodes (which should be
# the preserved LI + nested paragraph-or-more from original
# markup.)
# FIXME: why is there no "prepend a list" method?
for node in reversed(entry['nodelist']):
desc[0].insert(0, node)
entries.append(desc)
# Entry list
list_ = nodes.bullet_list('', *entries)
# Insert list into release nodelist (as it's a section)
obj['nodelist'][0].append(list_)
# Release header
header = nodes.paragraph('', '', *obj['nodelist'])
result.extend(header)
return result
class BulletListVisitor(nodes.NodeVisitor):
def __init__(self, document, app):
nodes.NodeVisitor.__init__(self, document)
self.found_changelog = False
self.app = app
def visit_bullet_list(self, node):
# The first found bullet list (which should be the first one at the top
# level of the document) is the changelog.
if not self.found_changelog:
self.found_changelog = True
# Walk + parse into release mapping
releases, _ = construct_releases(node.children, self.app)
# Construct new set of nodes to replace the old, and we're done
node.replace_self(construct_nodes(releases))
def unknown_visit(self, node):
pass
def generate_changelog(app, doctree):
# Don't scan/mutate documents that don't match the configured document name
# (which by default is ['changelog.rst', ]).
if app.env.docname not in app.config.releases_document_name:
return
# Find the first bullet-list node & replace it with our organized/parsed
# elements.
changelog_visitor = BulletListVisitor(doctree, app)
doctree.walk(changelog_visitor)
def setup(app):
for key, default in (
# Issue base URI setting: releases_issue_uri
# E.g. 'https://github.com/fabric/fabric/issues/'
('issue_uri', None),
# Release-tag base URI setting: releases_release_uri
# E.g. 'https://github.com/fabric/fabric/tree/'
('release_uri', None),
# Convenience Github version of above
('github_path', None),
# Which document to use as the changelog
('document_name', ['changelog']),
# Debug output
('debug', False),
# Whether to enable linear history during 0.x release timeline
# TODO: flip this to True by default in our 2.0 release
('unstable_prehistory', False),
):
app.add_config_value(
name='releases_{}'.format(key), default=default, rebuild='html'
)
# if a string is given for `document_name`, convert it to a list
# done to maintain backwards compatibility
# https://stackoverflow.com/questions/1303243/how-to-find-out-if-a-python-object-is-a-string
PY2 = sys.version_info[0] == 2
if PY2:
string_types = (basestring,)
else:
string_types = (str,)
if isinstance(app.config.releases_document_name, string_types):
app.config.releases_document_name = [app.config.releases_document_name]
# Register intermediate roles
for x in list(ISSUE_TYPES) + ['issue']:
add_role(app, x, issues_role)
add_role(app, 'release', release_role)
# Hook in our changelog transmutation at appropriate step
app.connect('doctree-read', generate_changelog)
# identifies the version of our extension
return {'version': __version__}
def add_role(app, name, role_obj):
# This (introspecting docutils.parser.rst.roles._roles) is the same trick
# Sphinx uses to emit warnings about double-registering; it's a PITA to try
# and configure the app early on so it doesn't emit those warnings, so we
# instead just...don't double-register. Meh.
if name not in roles._roles:
app.add_role(name, role_obj)
|
bitprophet/releases
|
releases/__init__.py
|
construct_entry_with_release
|
python
|
def construct_entry_with_release(focus, issues, manager, log, releases, rest):
log("release for line %r" % focus.minor)
# Check for explicitly listed issues first
explicit = None
if rest[0].children:
explicit = [x.strip() for x in rest[0][0].split(',')]
# Do those by themselves since they override all other logic
if explicit:
log("Explicit issues requested: %r" % (explicit,))
# First scan global issue dict, dying if not found
missing = [i for i in explicit if i not in issues]
if missing:
raise ValueError(
"Couldn't find issue(s) #{} in the changelog!".format(
', '.join(missing)))
# Obtain the explicitly named issues from global list
entries = []
for i in explicit:
for flattened_issue_item in itertools.chain(issues[i]):
entries.append(flattened_issue_item)
# Create release
log("entries in this release: %r" % (entries,))
releases.append({
'obj': focus,
'entries': entries,
})
# Introspect these entries to determine which buckets they should get
# removed from (it's not "all of them"!)
for obj in entries:
if obj.type == 'bug':
# Major bugfix: remove from unreleased_feature
if obj.major:
log("Removing #%s from unreleased" % obj.number)
# TODO: consider making a LineManager method somehow
manager[focus.family]['unreleased_feature'].remove(obj)
# Regular bugfix: remove from bucket for this release's
# line + unreleased_bugfix
else:
if obj in manager[focus.family]['unreleased_bugfix']:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_bugfix'].remove(obj)
if obj in manager[focus.family][focus.minor]:
log("Removing #%s from %s" % (obj.number, focus.minor))
manager[focus.family][focus.minor].remove(obj)
# Regular feature/support: remove from unreleased_feature
# Backported feature/support: remove from bucket for this
# release's line (if applicable) + unreleased_feature
else:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_feature'].remove(obj)
if obj in manager[focus.family].get(focus.minor, []):
manager[focus.family][focus.minor].remove(obj)
# Implicit behavior otherwise
else:
# Unstable prehistory -> just dump 'unreleased' and continue
if manager.unstable_prehistory:
# TODO: need to continue making LineManager actually OO, i.e. do
# away with the subdicts + keys, move to sub-objects with methods
# answering questions like "what should I give you for a release"
# or whatever
log("in unstable prehistory, dumping 'unreleased'")
releases.append({
'obj': focus,
# NOTE: explicitly dumping 0, not focus.family, since this
# might be the last pre-historical release and thus not 0.x
'entries': manager[0]['unreleased'][:],
})
manager[0]['unreleased'] = []
# If this isn't a 0.x release, it signals end of prehistory, make a
# new release bucket (as is also done below in regular behavior).
# Also acts like a sentinel that prehistory is over.
if focus.family != 0:
manager[focus.family][focus.minor] = []
# Regular behavior from here
else:
# New release line/branch detected. Create it & dump unreleased
# features.
if focus.minor not in manager[focus.family]:
log("not seen prior, making feature release & bugfix bucket")
manager[focus.family][focus.minor] = []
# TODO: this used to explicitly say "go over everything in
# unreleased_feature and dump if it's feature, support or major
# bug". But what the hell else would BE in unreleased_feature?
# Why not just dump the whole thing??
#
# Dump only the items in the bucket whose family this release
# object belongs to, i.e. 1.5.0 should only nab the 1.0
# family's unreleased feature items.
releases.append({
'obj': focus,
'entries': manager[focus.family]['unreleased_feature'][:],
})
manager[focus.family]['unreleased_feature'] = []
# Existing line -> empty out its bucket into new release.
# Skip 'major' bugs as those "belong" to the next release (and will
# also be in 'unreleased_feature' - so safe to nuke the entire
# line)
else:
log("pre-existing, making bugfix release")
# TODO: as in other branch, I don't get why this wasn't just
# dumping the whole thing - why would major bugs be in the
# regular bugfix buckets?
entries = manager[focus.family][focus.minor][:]
releases.append({'obj': focus, 'entries': entries})
manager[focus.family][focus.minor] = []
# Clean out the items we just released from
# 'unreleased_bugfix'. (Can't nuke it because there might
# be some unreleased bugs for other release lines.)
for x in entries:
if x in manager[focus.family]['unreleased_bugfix']:
manager[focus.family]['unreleased_bugfix'].remove(x)
|
Releases 'eat' the entries in their line's list and get added to the
final data structure. They also inform new release-line 'buffers'.
Release lines, once the release obj is removed, should be empty or a
comma-separated list of issue numbers.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L234-L352
| null |
import itertools
import re
import sys
from functools import partial
from docutils import nodes, utils
from docutils.parsers.rst import roles
import six
from .models import Issue, ISSUE_TYPES, Release, Version, Spec
from .line_manager import LineManager
from ._version import __version__
def _log(txt, config):
"""
Log debug output if debug setting is on.
Intended to be partial'd w/ config at top of functions. Meh.
"""
if config.releases_debug:
sys.stderr.write(str(txt) + "\n")
sys.stderr.flush()
def issue_nodelist(name, identifier=None):
which = '[<span style="color: #%s;">%s</span>]' % (
ISSUE_TYPES[name], name.capitalize()
)
signifier = [nodes.raw(text=which, format='html')]
id_nodelist = [nodes.inline(text=" "), identifier] if identifier else []
trail = [] if identifier else [nodes.inline(text=" ")]
return signifier + id_nodelist + [nodes.inline(text=":")] + trail
release_line_re = re.compile(r'^(\d+\.\d+)\+$') # e.g. '1.2+'
def scan_for_spec(keyword):
"""
Attempt to return some sort of Spec from given keyword value.
Returns None if one could not be derived.
"""
# Both 'spec' formats are wrapped in parens, discard
keyword = keyword.lstrip('(').rstrip(')')
# First, test for intermediate '1.2+' style
matches = release_line_re.findall(keyword)
if matches:
return Spec(">={}".format(matches[0]))
# Failing that, see if Spec can make sense of it
try:
return Spec(keyword)
# I've only ever seen Spec fail with ValueError.
except ValueError:
return None
def issues_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Use: :issue|bug|feature|support:`ticket_number`
When invoked as :issue:, turns into just a "#NN" hyperlink to
`releases_issue_uri`.
When invoked otherwise, turns into "[Type] <#NN hyperlink>: ".
Spaces present in the "ticket number" are used as fields for keywords
(major, backported) and/or specs (e.g. '>=1.0'). This data is removed &
used when constructing the object.
May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink.
"""
parts = utils.unescape(text).split()
issue_no = parts.pop(0)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
if issue_no not in ('-', '0'):
ref = None
if config.releases_issue_uri:
# TODO: deal with % vs .format()
ref = config.releases_issue_uri % issue_no
elif config.releases_github_path:
ref = "https://github.com/{}/issues/{}".format(
config.releases_github_path, issue_no)
# Only generate a reference/link if we were able to make a URI
if ref:
identifier = nodes.reference(
rawtext, '#' + issue_no, refuri=ref, **options
)
# Otherwise, just make it regular text
else:
identifier = nodes.raw(
rawtext=rawtext, text='#' + issue_no, format='html',
**options
)
else:
identifier = None
issue_no = None # So it doesn't gum up dupe detection later
# Additional 'new-style changelog' stuff
if name in ISSUE_TYPES:
nodelist = issue_nodelist(name, identifier)
spec = None
keyword = None
# TODO: sanity checks re: e.g. >2 parts, >1 instance of keywords, >1
# instance of specs, etc.
for part in parts:
maybe_spec = scan_for_spec(part)
if maybe_spec:
spec = maybe_spec
else:
if part in ('backported', 'major'):
keyword = part
else:
err = "Gave unknown keyword {!r} for issue no. {}"
raise ValueError(err.format(keyword, issue_no))
# Create temporary node w/ data & final nodes to publish
node = Issue(
number=issue_no,
type_=name,
nodelist=nodelist,
backported=(keyword == 'backported'),
major=(keyword == 'major'),
spec=spec,
)
return [node], []
# Return old style info for 'issue' for older changelog entries
else:
return [identifier], []
def release_nodes(text, slug, date, config):
# Doesn't seem possible to do this "cleanly" (i.e. just say "make me a
# title and give it these HTML attributes during render time) so...fuckit.
# We were already doing fully raw elements elsewhere anyway. And who cares
# about a PDF of a changelog? :x
uri = None
if config.releases_release_uri:
# TODO: % vs .format()
uri = config.releases_release_uri % slug
elif config.releases_github_path:
uri = "https://github.com/{}/tree/{}".format(
config.releases_github_path, slug)
# Only construct link tag if user actually configured release URIs somehow
if uri:
link = '<a class="reference external" href="{}">{}</a>'.format(
uri, text,
)
else:
link = text
datespan = ''
if date:
datespan = ' <span style="font-size: 75%;">{}</span>'.format(date)
header = '<h2 style="margin-bottom: 0.3em;">{}{}</h2>'.format(
link, datespan)
return nodes.section('',
nodes.raw(rawtext='', text=header, format='html'),
ids=[text]
)
year_arg_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
def release_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Invoked as :release:`N.N.N <YYYY-MM-DD>`.
Turns into useful release header + link to GH tree for the tag.
"""
# Make sure year has been specified
match = year_arg_re.match(text)
if not match:
msg = inliner.reporter.error("Must specify release date!")
return [inliner.problematic(rawtext, rawtext, msg)], [msg]
number, date = match.group(1), match.group(2)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
nodelist = [release_nodes(number, number, date, config)]
# Return intermediate node
node = Release(number=number, date=date, nodelist=nodelist)
return [node], []
def generate_unreleased_entry(header, line, issues, manager, app):
log = partial(_log, config=app.config)
nodelist = [release_nodes(
header,
# TODO: should link to master for newest family and...what
# exactly, for the others? Expectation isn't necessarily to
# have a branch per family? Or is there? Maybe there must be..
'master',
None,
app.config
)]
log("Creating {!r} faux-release with {!r}".format(line, issues))
return {
'obj': Release(number=line, date=None, nodelist=nodelist),
'entries': issues,
}
def append_unreleased_entries(app, manager, releases):
"""
Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored.
"""
for family, lines in six.iteritems(manager):
for type_ in ('bugfix', 'feature'):
bucket = 'unreleased_{}'.format(type_)
if bucket not in lines: # Implies unstable prehistory + 0.x fam
continue
issues = lines[bucket]
fam_prefix = "{}.x ".format(family) if len(manager) > 1 else ""
header = "Next {}{} release".format(fam_prefix, type_)
line = "unreleased_{}.x_{}".format(family, type_)
releases.append(
generate_unreleased_entry(header, line, issues, manager, app)
)
def reorder_release_entries(releases):
"""
Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc.
"""
order = {'feature': 0, 'bug': 1, 'support': 2}
for release in releases:
entries = release['entries'][:]
release['entries'] = sorted(entries, key=lambda x: order[x.type])
def construct_entry_without_release(focus, issues, manager, log, rest):
# Handle rare-but-valid non-issue-attached line items, which are
# always bugs. (They are their own description.)
if not isinstance(focus, Issue):
# First, sanity check for potential mistakes resulting in an issue node
# being buried within something else.
buried = focus.traverse(Issue)
if buried:
msg = """
Found issue node ({!r}) buried inside another node:
{}
Please double-check your ReST syntax! There is probably text in the above
output that will show you which part of your changelog to look at.
For example, indentation problems can accidentally generate nested definition
lists.
"""
raise ValueError(msg.format(buried[0], str(buried[0].parent)))
# OK, it looks legit - make it a bug.
log("Found line item w/ no real issue object, creating bug")
nodelist = issue_nodelist('bug')
# Skip nodelist entirely if we're in unstable prehistory -
# classification doesn't matter there.
if manager.unstable_prehistory:
nodelist = []
# Undo the 'pop' from outer scope. TODO: rework things so we don't have
# to do this dumb shit uggggh
rest[0].insert(0, focus)
focus = Issue(
type_='bug',
nodelist=nodelist,
description=rest,
)
else:
focus.attributes['description'] = rest
# Add to global list (for use by explicit releases) or die trying
issues[focus.number] = issues.get(focus.number, []) + [focus]
# Add to per-release bugfix lines and/or unreleased bug/feature buckets, as
# necessary.
# TODO: suspect all of add_to_manager can now live in the manager; most of
# Release's methods should probably go that way
if manager.unstable_prehistory:
log("Unstable prehistory -> adding to 0.x unreleased bucket")
manager[0]['unreleased'].append(focus)
else:
log("Adding to release line manager")
focus.add_to_manager(manager)
def handle_upcoming_major_release(entries, manager):
# Short-circuit if the future holds nothing for us
if not entries:
return
# Short-circuit if we're in the middle of a block of releases, only the
# last release before a bunch of issues, should be taking any action.
if isinstance(entries[0], Release):
return
# Iterate through entries til we find the next Release or set of Releases
next_releases = []
for index, obj in enumerate(entries):
if isinstance(obj, Release):
next_releases.append(obj)
# Non-empty next_releases + encountered a non-release = done w/ release
# block.
elif next_releases:
break
# Examine result: is a major release present? If so, add its major number
# to the line manager!
for obj in next_releases:
# TODO: update when Release gets tied closer w/ Version
version = Version(obj.number)
if version.minor == 0 and version.patch == 0:
manager.add_family(obj.family)
def handle_first_release_line(entries, manager):
"""
Set up initial line-manager entry for first encountered release line.
To be called at start of overall process; afterwards, subsequent major
lines are generated by `handle_upcoming_major_release`.
"""
# It's remotely possible the changelog is totally empty...
if not entries:
return
# Obtain (short-circuiting) first Release obj.
first_release = None
for obj in entries:
if isinstance(obj, Release):
first_release = obj
break
# It's also possible it's non-empty but has no releases yet.
if first_release:
manager.add_family(obj.family)
# If God did not exist, man would be forced to invent him.
else:
manager.add_family(0)
def construct_releases(entries, app):
log = partial(_log, config=app.config)
# Walk from back to front, consuming entries & copying them into
# per-release buckets as releases are encountered. Store releases in order.
releases = []
# Release lines, to be organized by major releases, then by major+minor,
# alongside per-major-release 'unreleased' bugfix/feature buckets.
# NOTE: With exception of unstable_prehistory=True, which triggers use of a
# separate, undifferentiated 'unreleased' bucket (albeit still within the
# '0' major line family).
manager = LineManager(app)
# Also keep a master hash of issues by number to detect duplicates & assist
# in explicitly defined release lists.
issues = {}
reversed_entries = list(reversed(entries))
# For the lookahead, so we're not doing this stripping O(n) times.
# TODO: probs just merge the two into e.g. a list of 2-tuples of "actual
# entry obj + rest"?
stripped_entries = [x[0][0] for x in reversed_entries]
# Perform an initial lookahead to prime manager with the 1st major release
handle_first_release_line(stripped_entries, manager)
# Start crawling...
for index, obj in enumerate(reversed_entries):
# Issue object is always found in obj (LI) index 0 (first, often only
# P) and is the 1st item within that (index 0 again).
# Preserve all other contents of 'obj'.
focus = obj[0].pop(0)
rest = obj
log(repr(focus))
# Releases 'eat' the entries in their line's list and get added to the
# final data structure. They also inform new release-line 'buffers'.
# Release lines, once the release obj is removed, should be empty or a
# comma-separated list of issue numbers.
if isinstance(focus, Release):
construct_entry_with_release(
focus, issues, manager, log, releases, rest
)
# After each release is handled, look ahead to see if we're
# entering "last stretch before a major release". If so,
# pre-emptively update the line-manager so upcoming features are
# correctly sorted into that major release by default (re: logic in
# Release.add_to_manager)
handle_upcoming_major_release(
stripped_entries[index + 1:], manager
)
# Entries get copied into release line buckets as follows:
# * Features and support go into 'unreleased_feature' for use in new
# feature releases.
# * Bugfixes go into all release lines (so they can be printed in >1
# bugfix release as appropriate) as well as 'unreleased_bugfix' (so
# they can be displayed prior to release'). Caveats include bugs marked
# 'major' (they go into unreleased_feature instead) or with 'N.N+'
# (meaning they only go into release line buckets for that release and
# up.)
# * Support/feature entries marked as 'backported' go into all
# release lines as well, on the assumption that they were released to
# all active branches.
# * The 'rest' variable (which here is the bug description, vitally
# important!) is preserved by stuffing it into the focus (issue)
# object - it will get unpacked by construct_nodes() later.
else:
construct_entry_without_release(focus, issues, manager, log, rest)
if manager.unstable_prehistory:
releases.append(generate_unreleased_entry(
header="Next release",
line="unreleased",
issues=manager[0]['unreleased'],
manager=manager,
app=app,
))
else:
append_unreleased_entries(app, manager, releases)
reorder_release_entries(releases)
return releases, manager
def construct_nodes(releases):
result = []
# Reverse the list again so the final display is newest on top
for d in reversed(releases):
if not d['entries']:
continue
obj = d['obj']
entries = []
for entry in d['entries']:
# Use nodes.Node.deepcopy to deepcopy the description
# node. If this is not done, multiple references to the same
# object (e.g. a reference object in the description of #649, which
# is then copied into 2 different release lists) will end up in the
# doctree, which makes subsequent parse steps very angry (index()
# errors).
desc = entry['description'].deepcopy()
# Additionally, expand any other issue roles found in the
# description - sometimes we refer to related issues inline. (They
# can't be left as issue() objects at render time since that's
# undefined.)
# Use [:] slicing to avoid mutation during the loops.
for index, node in enumerate(desc[:]):
for subindex, subnode in enumerate(node[:]):
if isinstance(subnode, Issue):
lst = subnode['nodelist']
desc[index][subindex:subindex + 1] = lst
# Rework this entry to insert the now-rendered issue nodes in front
# of the 1st paragraph of the 'description' nodes (which should be
# the preserved LI + nested paragraph-or-more from original
# markup.)
# FIXME: why is there no "prepend a list" method?
for node in reversed(entry['nodelist']):
desc[0].insert(0, node)
entries.append(desc)
# Entry list
list_ = nodes.bullet_list('', *entries)
# Insert list into release nodelist (as it's a section)
obj['nodelist'][0].append(list_)
# Release header
header = nodes.paragraph('', '', *obj['nodelist'])
result.extend(header)
return result
class BulletListVisitor(nodes.NodeVisitor):
def __init__(self, document, app):
nodes.NodeVisitor.__init__(self, document)
self.found_changelog = False
self.app = app
def visit_bullet_list(self, node):
# The first found bullet list (which should be the first one at the top
# level of the document) is the changelog.
if not self.found_changelog:
self.found_changelog = True
# Walk + parse into release mapping
releases, _ = construct_releases(node.children, self.app)
# Construct new set of nodes to replace the old, and we're done
node.replace_self(construct_nodes(releases))
def unknown_visit(self, node):
pass
def generate_changelog(app, doctree):
# Don't scan/mutate documents that don't match the configured document name
# (which by default is ['changelog.rst', ]).
if app.env.docname not in app.config.releases_document_name:
return
# Find the first bullet-list node & replace it with our organized/parsed
# elements.
changelog_visitor = BulletListVisitor(doctree, app)
doctree.walk(changelog_visitor)
def setup(app):
for key, default in (
# Issue base URI setting: releases_issue_uri
# E.g. 'https://github.com/fabric/fabric/issues/'
('issue_uri', None),
# Release-tag base URI setting: releases_release_uri
# E.g. 'https://github.com/fabric/fabric/tree/'
('release_uri', None),
# Convenience Github version of above
('github_path', None),
# Which document to use as the changelog
('document_name', ['changelog']),
# Debug output
('debug', False),
# Whether to enable linear history during 0.x release timeline
# TODO: flip this to True by default in our 2.0 release
('unstable_prehistory', False),
):
app.add_config_value(
name='releases_{}'.format(key), default=default, rebuild='html'
)
# if a string is given for `document_name`, convert it to a list
# done to maintain backwards compatibility
# https://stackoverflow.com/questions/1303243/how-to-find-out-if-a-python-object-is-a-string
PY2 = sys.version_info[0] == 2
if PY2:
string_types = (basestring,)
else:
string_types = (str,)
if isinstance(app.config.releases_document_name, string_types):
app.config.releases_document_name = [app.config.releases_document_name]
# Register intermediate roles
for x in list(ISSUE_TYPES) + ['issue']:
add_role(app, x, issues_role)
add_role(app, 'release', release_role)
# Hook in our changelog transmutation at appropriate step
app.connect('doctree-read', generate_changelog)
# identifies the version of our extension
return {'version': __version__}
def add_role(app, name, role_obj):
# This (introspecting docutils.parser.rst.roles._roles) is the same trick
# Sphinx uses to emit warnings about double-registering; it's a PITA to try
# and configure the app early on so it doesn't emit those warnings, so we
# instead just...don't double-register. Meh.
if name not in roles._roles:
app.add_role(name, role_obj)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.