repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
Feneric/doxypypy | doxypypy/doxypypy.py | AstWalker.visit_Module | def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes) | python | def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes) | [
"def",
"visit_Module",
"(",
"self",
",",
"node",
",",
"*",
"*",
"kwargs",
")",
":",
"containingNodes",
"=",
"kwargs",
".",
"get",
"(",
"'containingNodes'",
",",
"[",
"]",
")",
"if",
"self",
".",
"options",
".",
"debug",
":",
"stderr",
".",
"write",
"... | Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set. | [
"Handles",
"the",
"module",
"-",
"level",
"docstring",
"."
] | a8555b15fa2a758ea8392372de31c0f635cc0d93 | https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L594-L614 | train | 31,000 |
Feneric/doxypypy | doxypypy/doxypypy.py | AstWalker.visit_Assign | def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes']) | python | def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes']) | [
"def",
"visit_Assign",
"(",
"self",
",",
"node",
",",
"*",
"*",
"kwargs",
")",
":",
"lineNum",
"=",
"node",
".",
"lineno",
"-",
"1",
"# Assignments have one Doxygen-significant special case:",
"# interface attributes.",
"match",
"=",
"AstWalker",
".",
"__attributeRE... | Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen. | [
"Handles",
"assignments",
"within",
"code",
"."
] | a8555b15fa2a758ea8392372de31c0f635cc0d93 | https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L616-L656 | train | 31,001 |
Feneric/doxypypy | doxypypy/doxypypy.py | AstWalker.visit_Call | def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes']) | python | def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes']) | [
"def",
"visit_Call",
"(",
"self",
",",
"node",
",",
"*",
"*",
"kwargs",
")",
":",
"lineNum",
"=",
"node",
".",
"lineno",
"-",
"1",
"# Function calls have one Doxygen-significant special case: interface",
"# implementations.",
"match",
"=",
"AstWalker",
".",
"__impl... | Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen. | [
"Handles",
"function",
"calls",
"within",
"code",
"."
] | a8555b15fa2a758ea8392372de31c0f635cc0d93 | https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L658-L678 | train | 31,002 |
Feneric/doxypypy | doxypypy/doxypypy.py | AstWalker.visit_FunctionDef | def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop() | python | def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop() | [
"def",
"visit_FunctionDef",
"(",
"self",
",",
"node",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"options",
".",
"debug",
":",
"stderr",
".",
"write",
"(",
"\"# Function {0.name}{1}\"",
".",
"format",
"(",
"node",
",",
"linesep",
")",
")",
"... | Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition. | [
"Handles",
"function",
"definitions",
"within",
"code",
"."
] | a8555b15fa2a758ea8392372de31c0f635cc0d93 | https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L680-L708 | train | 31,003 |
Feneric/doxypypy | doxypypy/doxypypy.py | AstWalker.visit_ClassDef | def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop() | python | def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop() | [
"def",
"visit_ClassDef",
"(",
"self",
",",
"node",
",",
"*",
"*",
"kwargs",
")",
":",
"lineNum",
"=",
"node",
".",
"lineno",
"-",
"1",
"# Push either 'interface' or 'class' onto our containing nodes",
"# hierarchy so we can keep track of context. This will let us tell",
"#... | Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen. | [
"Handles",
"class",
"definitions",
"within",
"code",
"."
] | a8555b15fa2a758ea8392372de31c0f635cc0d93 | https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L710-L766 | train | 31,004 |
Feneric/doxypypy | doxypypy/doxypypy.py | AstWalker.parseLines | def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst) | python | def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst) | [
"def",
"parseLines",
"(",
"self",
")",
":",
"inAst",
"=",
"parse",
"(",
"''",
".",
"join",
"(",
"self",
".",
"lines",
")",
",",
"self",
".",
"inFilename",
")",
"# Visit all the nodes in our tree and apply Doxygen tags to the source.",
"self",
".",
"visit",
"(",
... | Form an AST for the code and produce a new version of the source. | [
"Form",
"an",
"AST",
"for",
"the",
"code",
"and",
"produce",
"a",
"new",
"version",
"of",
"the",
"source",
"."
] | a8555b15fa2a758ea8392372de31c0f635cc0d93 | https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L768-L772 | train | 31,005 |
Miserlou/django-zappa | django_zappa/handler.py | lambda_handler | def lambda_handler(event, context=None, settings_name="zappa_settings"): # NoQA
"""
An AWS Lambda function which parses specific API Gateway input into a WSGI request.
The request get fed it to Django, processes the Django response, and returns that
back to the API Gateway.
"""
time_start = datetime.datetime.now()
# If in DEBUG mode, log all raw incoming events.
if settings.DEBUG:
logger.info('Zappa Event: {}'.format(event))
# This is a normal HTTP request
if event.get('method', None):
# Create the environment for WSGI and handle the request
environ = create_wsgi_request(event, script_name=settings.SCRIPT_NAME)
# We are always on https on Lambda, so tell our wsgi app that.
environ['HTTPS'] = 'on'
environ['wsgi.url_scheme'] = 'https'
wrap_me = get_wsgi_application()
app = ZappaWSGIMiddleware(wrap_me)
# Execute the application
response = Response.from_app(app, environ)
response.content = response.data
# Prepare the special dictionary which will be returned to the API GW.
returnme = {'Content': response.data}
# Pack the WSGI response into our special dictionary.
for (header_name, header_value) in response.headers:
returnme[header_name] = header_value
returnme['Status'] = response.status_code
# To ensure correct status codes, we need to
# pack the response as a deterministic B64 string and raise it
# as an error to match our APIGW regex.
# The DOCTYPE ensures that the page still renders in the browser.
exception = None
if response.status_code in ERROR_CODES:
content = u"<!DOCTYPE html>" + unicode(response.status_code) + unicode('<meta charset="utf-8" />') + response.data.encode('utf-8')
b64_content = base64.b64encode(content)
exception = (b64_content)
# Internal are changed to become relative redirects
# so they still work for apps on raw APIGW and on a domain.
elif 300 <= response.status_code < 400 and response.has_header('Location'):
location = returnme['Location']
location = '/' + location.replace("http://zappa/", "")
exception = location
# Calculate the total response time,
# and log it in the Common Log format.
time_end = datetime.datetime.now()
delta = time_end - time_start
response_time_ms = delta.total_seconds() * 1000
common_log(environ, response, response_time=response_time_ms)
# Finally, return the response to API Gateway.
if exception:
raise Exception(exception)
else:
return returnme
# This is a management command invocation.
elif event.get('command', None):
from django.core import management
# Couldn't figure out how to get the value into stdout with StringIO..
# Read the log for now. :[]
management.call_command(*event['command'].split(' '))
return {}
elif event.get('detail'):
module, function = event['detail'].rsplit('.', 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
# Execute the function!
app_function()
return
else:
logger.error('Unhandled event: {}'.format(json.dumps(event))) | python | def lambda_handler(event, context=None, settings_name="zappa_settings"): # NoQA
"""
An AWS Lambda function which parses specific API Gateway input into a WSGI request.
The request get fed it to Django, processes the Django response, and returns that
back to the API Gateway.
"""
time_start = datetime.datetime.now()
# If in DEBUG mode, log all raw incoming events.
if settings.DEBUG:
logger.info('Zappa Event: {}'.format(event))
# This is a normal HTTP request
if event.get('method', None):
# Create the environment for WSGI and handle the request
environ = create_wsgi_request(event, script_name=settings.SCRIPT_NAME)
# We are always on https on Lambda, so tell our wsgi app that.
environ['HTTPS'] = 'on'
environ['wsgi.url_scheme'] = 'https'
wrap_me = get_wsgi_application()
app = ZappaWSGIMiddleware(wrap_me)
# Execute the application
response = Response.from_app(app, environ)
response.content = response.data
# Prepare the special dictionary which will be returned to the API GW.
returnme = {'Content': response.data}
# Pack the WSGI response into our special dictionary.
for (header_name, header_value) in response.headers:
returnme[header_name] = header_value
returnme['Status'] = response.status_code
# To ensure correct status codes, we need to
# pack the response as a deterministic B64 string and raise it
# as an error to match our APIGW regex.
# The DOCTYPE ensures that the page still renders in the browser.
exception = None
if response.status_code in ERROR_CODES:
content = u"<!DOCTYPE html>" + unicode(response.status_code) + unicode('<meta charset="utf-8" />') + response.data.encode('utf-8')
b64_content = base64.b64encode(content)
exception = (b64_content)
# Internal are changed to become relative redirects
# so they still work for apps on raw APIGW and on a domain.
elif 300 <= response.status_code < 400 and response.has_header('Location'):
location = returnme['Location']
location = '/' + location.replace("http://zappa/", "")
exception = location
# Calculate the total response time,
# and log it in the Common Log format.
time_end = datetime.datetime.now()
delta = time_end - time_start
response_time_ms = delta.total_seconds() * 1000
common_log(environ, response, response_time=response_time_ms)
# Finally, return the response to API Gateway.
if exception:
raise Exception(exception)
else:
return returnme
# This is a management command invocation.
elif event.get('command', None):
from django.core import management
# Couldn't figure out how to get the value into stdout with StringIO..
# Read the log for now. :[]
management.call_command(*event['command'].split(' '))
return {}
elif event.get('detail'):
module, function = event['detail'].rsplit('.', 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
# Execute the function!
app_function()
return
else:
logger.error('Unhandled event: {}'.format(json.dumps(event))) | [
"def",
"lambda_handler",
"(",
"event",
",",
"context",
"=",
"None",
",",
"settings_name",
"=",
"\"zappa_settings\"",
")",
":",
"# NoQA",
"time_start",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"# If in DEBUG mode, log all raw incoming events.",
"if",
... | An AWS Lambda function which parses specific API Gateway input into a WSGI request.
The request get fed it to Django, processes the Django response, and returns that
back to the API Gateway. | [
"An",
"AWS",
"Lambda",
"function",
"which",
"parses",
"specific",
"API",
"Gateway",
"input",
"into",
"a",
"WSGI",
"request",
"."
] | 7a8083ab6257a0bf0f5c9ae460afabd4de4e2215 | https://github.com/Miserlou/django-zappa/blob/7a8083ab6257a0bf0f5c9ae460afabd4de4e2215/django_zappa/handler.py#L41-L126 | train | 31,006 |
Miserlou/django-zappa | django_zappa/management/commands/zappa_command.py | ZappaCommand.require_settings | def require_settings(self, args, options):
"""
Load the ZAPPA_SETTINGS as we expect it.
"""
if not options.has_key('environment'):
print(
"You must call deploy with an environment name. \n python manage.py deploy <environment>")
raise ImproperlyConfigured
from django.conf import settings
if not 'ZAPPA_SETTINGS' in dir(settings):
print(
"Please define your ZAPPA_SETTINGS in your settings file before deploying.")
raise ImproperlyConfigured
self.zappa_settings = settings.ZAPPA_SETTINGS
# Set your configuration
if type(options['environment']) == list:
self.api_stage = options['environment'][0]
else:
self.api_stage = options['environment']
if self.zappa_settings[self.api_stage].get('project_name'):
self.project_name = self.zappa_settings[self.api_stage]['project_name']
else:
self.project_name = os.path.abspath(settings.BASE_DIR).split(os.sep)[-1]
self.lambda_name = slugify(self.project_name + '-' + self.api_stage).replace("_","-")
if self.api_stage not in self.zappa_settings.keys():
print("Please make sure that the environment '" + self.api_stage +
"' is defined in your ZAPPA_SETTINGS in your settings file before deploying.")
raise ImproperlyConfigured
# Load environment-specific settings
self.s3_bucket_name = self.zappa_settings[self.api_stage]['s3_bucket']
self.vpc_config = self.zappa_settings[
self.api_stage].get('vpc_config', {})
self.memory_size = self.zappa_settings[
self.api_stage].get('memory_size', 512)
self.timeout = self.zappa_settings[
self.api_stage].get('timeout', 30)
custom_settings = [
'http_methods',
'parameter_depth',
'integration_response_codes',
'method_response_codes',
'role_name',
'aws_region'
]
for setting in custom_settings:
if self.zappa_settings[self.api_stage].has_key(setting):
setattr(self.zappa, setting, self.zappa_settings[
self.api_stage][setting]) | python | def require_settings(self, args, options):
"""
Load the ZAPPA_SETTINGS as we expect it.
"""
if not options.has_key('environment'):
print(
"You must call deploy with an environment name. \n python manage.py deploy <environment>")
raise ImproperlyConfigured
from django.conf import settings
if not 'ZAPPA_SETTINGS' in dir(settings):
print(
"Please define your ZAPPA_SETTINGS in your settings file before deploying.")
raise ImproperlyConfigured
self.zappa_settings = settings.ZAPPA_SETTINGS
# Set your configuration
if type(options['environment']) == list:
self.api_stage = options['environment'][0]
else:
self.api_stage = options['environment']
if self.zappa_settings[self.api_stage].get('project_name'):
self.project_name = self.zappa_settings[self.api_stage]['project_name']
else:
self.project_name = os.path.abspath(settings.BASE_DIR).split(os.sep)[-1]
self.lambda_name = slugify(self.project_name + '-' + self.api_stage).replace("_","-")
if self.api_stage not in self.zappa_settings.keys():
print("Please make sure that the environment '" + self.api_stage +
"' is defined in your ZAPPA_SETTINGS in your settings file before deploying.")
raise ImproperlyConfigured
# Load environment-specific settings
self.s3_bucket_name = self.zappa_settings[self.api_stage]['s3_bucket']
self.vpc_config = self.zappa_settings[
self.api_stage].get('vpc_config', {})
self.memory_size = self.zappa_settings[
self.api_stage].get('memory_size', 512)
self.timeout = self.zappa_settings[
self.api_stage].get('timeout', 30)
custom_settings = [
'http_methods',
'parameter_depth',
'integration_response_codes',
'method_response_codes',
'role_name',
'aws_region'
]
for setting in custom_settings:
if self.zappa_settings[self.api_stage].has_key(setting):
setattr(self.zappa, setting, self.zappa_settings[
self.api_stage][setting]) | [
"def",
"require_settings",
"(",
"self",
",",
"args",
",",
"options",
")",
":",
"if",
"not",
"options",
".",
"has_key",
"(",
"'environment'",
")",
":",
"print",
"(",
"\"You must call deploy with an environment name. \\n python manage.py deploy <environment>\"",
")",
"rai... | Load the ZAPPA_SETTINGS as we expect it. | [
"Load",
"the",
"ZAPPA_SETTINGS",
"as",
"we",
"expect",
"it",
"."
] | 7a8083ab6257a0bf0f5c9ae460afabd4de4e2215 | https://github.com/Miserlou/django-zappa/blob/7a8083ab6257a0bf0f5c9ae460afabd4de4e2215/django_zappa/management/commands/zappa_command.py#L43-L98 | train | 31,007 |
adamcharnock/swiftwind | swiftwind/housemates/forms.py | HousemateCreateForm.clean_account | def clean_account(self):
"""Ensure this is an income account"""
account = self.cleaned_data['account']
if not account:
return
if account.type != Account.TYPES.income:
raise ValidationError('Account must be an income account')
try:
account.housemate
except Housemate.DoesNotExist:
pass
else:
raise ValidationError('Account already has a housemate')
return account | python | def clean_account(self):
"""Ensure this is an income account"""
account = self.cleaned_data['account']
if not account:
return
if account.type != Account.TYPES.income:
raise ValidationError('Account must be an income account')
try:
account.housemate
except Housemate.DoesNotExist:
pass
else:
raise ValidationError('Account already has a housemate')
return account | [
"def",
"clean_account",
"(",
"self",
")",
":",
"account",
"=",
"self",
".",
"cleaned_data",
"[",
"'account'",
"]",
"if",
"not",
"account",
":",
"return",
"if",
"account",
".",
"type",
"!=",
"Account",
".",
"TYPES",
".",
"income",
":",
"raise",
"Validatio... | Ensure this is an income account | [
"Ensure",
"this",
"is",
"an",
"income",
"account"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/housemates/forms.py#L28-L44 | train | 31,008 |
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.get_amount_normal | def get_amount_normal(self, billing_cycle):
"""Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle.
"""
if self.is_one_off():
billing_cycle_number = self._get_billing_cycle_number(billing_cycle)
if billing_cycle_number > self.total_billing_cycles:
# A future billing cycle after this one has ended
return Decimal('0')
else:
# This is a current cycle. Split the amount into
# equal parts then return the part for this cycle
splits = ratio_split(
amount=self.fixed_amount,
ratios=[Decimal('1')] * self.total_billing_cycles,
)
return splits[billing_cycle_number - 1]
else:
# This is a none-one-off recurring cost, so the logic is simple
return self.fixed_amount | python | def get_amount_normal(self, billing_cycle):
"""Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle.
"""
if self.is_one_off():
billing_cycle_number = self._get_billing_cycle_number(billing_cycle)
if billing_cycle_number > self.total_billing_cycles:
# A future billing cycle after this one has ended
return Decimal('0')
else:
# This is a current cycle. Split the amount into
# equal parts then return the part for this cycle
splits = ratio_split(
amount=self.fixed_amount,
ratios=[Decimal('1')] * self.total_billing_cycles,
)
return splits[billing_cycle_number - 1]
else:
# This is a none-one-off recurring cost, so the logic is simple
return self.fixed_amount | [
"def",
"get_amount_normal",
"(",
"self",
",",
"billing_cycle",
")",
":",
"if",
"self",
".",
"is_one_off",
"(",
")",
":",
"billing_cycle_number",
"=",
"self",
".",
"_get_billing_cycle_number",
"(",
"billing_cycle",
")",
"if",
"billing_cycle_number",
">",
"self",
... | Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle. | [
"Get",
"the",
"amount",
"due",
"on",
"the",
"given",
"billing",
"cycle"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L121-L144 | train | 31,009 |
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.get_amount_arrears_balance | def get_amount_arrears_balance(self, billing_cycle):
"""Get the balance of to_account at the end of billing_cycle"""
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
) | python | def get_amount_arrears_balance(self, billing_cycle):
"""Get the balance of to_account at the end of billing_cycle"""
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
) | [
"def",
"get_amount_arrears_balance",
"(",
"self",
",",
"billing_cycle",
")",
":",
"return",
"self",
".",
"to_account",
".",
"balance",
"(",
"transaction__date__lt",
"=",
"billing_cycle",
".",
"date_range",
".",
"lower",
",",
")"
] | Get the balance of to_account at the end of billing_cycle | [
"Get",
"the",
"balance",
"of",
"to_account",
"at",
"the",
"end",
"of",
"billing_cycle"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L146-L150 | train | 31,010 |
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.get_amount_arrears_transactions | def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
) | python | def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
) | [
"def",
"get_amount_arrears_transactions",
"(",
"self",
",",
"billing_cycle",
")",
":",
"previous_billing_cycle",
"=",
"billing_cycle",
".",
"get_previous",
"(",
")",
"if",
"not",
"previous_billing_cycle",
":",
"return",
"Decimal",
"(",
"0",
")",
"return",
"self",
... | Get the sum of all transaction legs in to_account during given billing cycle | [
"Get",
"the",
"sum",
"of",
"all",
"transaction",
"legs",
"in",
"to_account",
"during",
"given",
"billing",
"cycle"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L152-L160 | train | 31,011 |
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.enact | def enact(self, billing_cycle, disable_if_done=True):
"""Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle
"""
as_of = billing_cycle.date_range.lower
if not self.is_enactable(as_of):
raise CannotEnactUnenactableRecurringCostError(
"RecurringCost {} is unenactable.".format(self.uuid)
)
if self.has_enacted(billing_cycle):
raise RecurringCostAlreadyEnactedForBillingCycle(
'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)
)
with db_transaction.atomic():
recurred_cost = RecurredCost(
recurring_cost=self,
billing_cycle=billing_cycle,
)
recurred_cost.make_transaction()
recurred_cost.save()
if disable_if_done:
self.disable_if_done(billing_cycle) | python | def enact(self, billing_cycle, disable_if_done=True):
"""Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle
"""
as_of = billing_cycle.date_range.lower
if not self.is_enactable(as_of):
raise CannotEnactUnenactableRecurringCostError(
"RecurringCost {} is unenactable.".format(self.uuid)
)
if self.has_enacted(billing_cycle):
raise RecurringCostAlreadyEnactedForBillingCycle(
'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)
)
with db_transaction.atomic():
recurred_cost = RecurredCost(
recurring_cost=self,
billing_cycle=billing_cycle,
)
recurred_cost.make_transaction()
recurred_cost.save()
if disable_if_done:
self.disable_if_done(billing_cycle) | [
"def",
"enact",
"(",
"self",
",",
"billing_cycle",
",",
"disable_if_done",
"=",
"True",
")",
":",
"as_of",
"=",
"billing_cycle",
".",
"date_range",
".",
"lower",
"if",
"not",
"self",
".",
"is_enactable",
"(",
"as_of",
")",
":",
"raise",
"CannotEnactUnenactab... | Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle | [
"Enact",
"this",
"RecurringCost",
"for",
"the",
"given",
"billing",
"cycle"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L166-L194 | train | 31,012 |
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.disable_if_done | def disable_if_done(self, commit=True):
"""Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs.
"""
if self._is_billing_complete() and not self.disabled:
self.disabled = True
if commit:
self.save() | python | def disable_if_done(self, commit=True):
"""Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs.
"""
if self._is_billing_complete() and not self.disabled:
self.disabled = True
if commit:
self.save() | [
"def",
"disable_if_done",
"(",
"self",
",",
"commit",
"=",
"True",
")",
":",
"if",
"self",
".",
"_is_billing_complete",
"(",
")",
"and",
"not",
"self",
".",
"disabled",
":",
"self",
".",
"disabled",
"=",
"True",
"if",
"commit",
":",
"self",
".",
"save"... | Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs. | [
"Set",
"disabled",
"=",
"True",
"if",
"we",
"have",
"billed",
"all",
"we",
"need",
"to"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L196-L205 | train | 31,013 |
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.is_enactable | def is_enactable(self, as_of):
"""Can this RecurringCost be enacted"""
return \
not self.disabled and \
not self.archived and \
not self._is_finished(as_of) and \
self._is_ready(as_of) and \
not self._is_billing_complete() | python | def is_enactable(self, as_of):
"""Can this RecurringCost be enacted"""
return \
not self.disabled and \
not self.archived and \
not self._is_finished(as_of) and \
self._is_ready(as_of) and \
not self._is_billing_complete() | [
"def",
"is_enactable",
"(",
"self",
",",
"as_of",
")",
":",
"return",
"not",
"self",
".",
"disabled",
"and",
"not",
"self",
".",
"archived",
"and",
"not",
"self",
".",
"_is_finished",
"(",
"as_of",
")",
"and",
"self",
".",
"_is_ready",
"(",
"as_of",
")... | Can this RecurringCost be enacted | [
"Can",
"this",
"RecurringCost",
"be",
"enacted"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L217-L224 | train | 31,014 |
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.has_enacted | def has_enacted(self, billing_cycle):
"""Has this recurring cost already enacted transactions for given billing cycle?"""
return RecurredCost.objects.filter(
recurring_cost=self,
billing_cycle=billing_cycle,
).exists() | python | def has_enacted(self, billing_cycle):
"""Has this recurring cost already enacted transactions for given billing cycle?"""
return RecurredCost.objects.filter(
recurring_cost=self,
billing_cycle=billing_cycle,
).exists() | [
"def",
"has_enacted",
"(",
"self",
",",
"billing_cycle",
")",
":",
"return",
"RecurredCost",
".",
"objects",
".",
"filter",
"(",
"recurring_cost",
"=",
"self",
",",
"billing_cycle",
"=",
"billing_cycle",
",",
")",
".",
"exists",
"(",
")"
] | Has this recurring cost already enacted transactions for given billing cycle? | [
"Has",
"this",
"recurring",
"cost",
"already",
"enacted",
"transactions",
"for",
"given",
"billing",
"cycle?"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L226-L231 | train | 31,015 |
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost._is_ready | def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True | python | def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True | [
"def",
"_is_ready",
"(",
"self",
",",
"as_of",
")",
":",
"if",
"self",
".",
"is_one_off",
"(",
")",
":",
"return",
"self",
".",
"initial_billing_cycle",
".",
"date_range",
".",
"lower",
"<=",
"as_of",
"else",
":",
"return",
"True"
] | Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date): | [
"Is",
"the",
"RecurringCost",
"ready",
"to",
"be",
"enacted",
"as",
"of",
"the",
"date",
"as_of"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L236-L248 | train | 31,016 |
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost._is_finished | def _is_finished(self, as_of):
"""Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]
return last_billing_cycle.date_range.upper <= as_of
else:
return False | python | def _is_finished(self, as_of):
"""Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]
return last_billing_cycle.date_range.upper <= as_of
else:
return False | [
"def",
"_is_finished",
"(",
"self",
",",
"as_of",
")",
":",
"if",
"self",
".",
"is_one_off",
"(",
")",
":",
"last_billing_cycle",
"=",
"self",
".",
"get_billing_cycles",
"(",
")",
"[",
"self",
".",
"total_billing_cycles",
"-",
"1",
"]",
"return",
"last_bil... | Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost. | [
"Have",
"the",
"specified",
"number",
"of",
"billing",
"cycles",
"been",
"completed?"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L250-L259 | train | 31,017 |
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost._is_billing_complete | def _is_billing_complete(self):
"""Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)
else:
return False | python | def _is_billing_complete(self):
"""Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)
else:
return False | [
"def",
"_is_billing_complete",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_one_off",
"(",
")",
":",
"return",
"self",
".",
"get_billed_amount",
"(",
")",
">=",
"Balance",
"(",
"self",
".",
"fixed_amount",
",",
"self",
".",
"currency",
")",
"else",
":",... | Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost. | [
"Has",
"the",
"specified",
"fixed_amount",
"been",
"billed?"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L261-L269 | train | 31,018 |
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost._get_billing_cycle_number | def _get_billing_cycle_number(self, billing_cycle):
"""Gets the 1-indexed number of the billing cycle relative to the provided billing cycle"""
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number | python | def _get_billing_cycle_number(self, billing_cycle):
"""Gets the 1-indexed number of the billing cycle relative to the provided billing cycle"""
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number | [
"def",
"_get_billing_cycle_number",
"(",
"self",
",",
"billing_cycle",
")",
":",
"begins_before_initial_date",
"=",
"billing_cycle",
".",
"date_range",
".",
"lower",
"<",
"self",
".",
"initial_billing_cycle",
".",
"date_range",
".",
"lower",
"if",
"begins_before_initi... | Gets the 1-indexed number of the billing cycle relative to the provided billing cycle | [
"Gets",
"the",
"1",
"-",
"indexed",
"number",
"of",
"the",
"billing",
"cycle",
"relative",
"to",
"the",
"provided",
"billing",
"cycle"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L271-L287 | train | 31,019 |
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCostSplitQuerySet.split | def split(self, amount):
"""Split the value given by amount according to the RecurringCostSplit's portions
Args:
amount (Decimal):
Returns:
list[(RecurringCostSplit, Decimal)]: A list with elements in the form (RecurringCostSplit, Decimal)
"""
split_objs = list(self.all())
if not split_objs:
raise NoSplitsFoundForRecurringCost()
portions = [split_obj.portion for split_obj in split_objs]
split_amounts = ratio_split(amount, portions)
return [
(split_objs[i], split_amount)
for i, split_amount
in enumerate(split_amounts)
] | python | def split(self, amount):
"""Split the value given by amount according to the RecurringCostSplit's portions
Args:
amount (Decimal):
Returns:
list[(RecurringCostSplit, Decimal)]: A list with elements in the form (RecurringCostSplit, Decimal)
"""
split_objs = list(self.all())
if not split_objs:
raise NoSplitsFoundForRecurringCost()
portions = [split_obj.portion for split_obj in split_objs]
split_amounts = ratio_split(amount, portions)
return [
(split_objs[i], split_amount)
for i, split_amount
in enumerate(split_amounts)
] | [
"def",
"split",
"(",
"self",
",",
"amount",
")",
":",
"split_objs",
"=",
"list",
"(",
"self",
".",
"all",
"(",
")",
")",
"if",
"not",
"split_objs",
":",
"raise",
"NoSplitsFoundForRecurringCost",
"(",
")",
"portions",
"=",
"[",
"split_obj",
".",
"portion"... | Split the value given by amount according to the RecurringCostSplit's portions
Args:
amount (Decimal):
Returns:
list[(RecurringCostSplit, Decimal)]: A list with elements in the form (RecurringCostSplit, Decimal) | [
"Split",
"the",
"value",
"given",
"by",
"amount",
"according",
"to",
"the",
"RecurringCostSplit",
"s",
"portions"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L300-L320 | train | 31,020 |
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurredCost.make_transaction | def make_transaction(self):
"""Create the transaction for this RecurredCost
May only be used to create the RecurredCost's initial transaction.
Returns:
Transaction: The created transaction, also assigned to self.transaction. None if the amount is zero.
"""
if self.pk:
raise CannotRecreateTransactionOnRecurredCost(
'The transaction for this recurred cost has already been created. You cannot create it again.'
)
amount = self.recurring_cost.get_amount(self.billing_cycle)
# It is quite possible that there will be nothing to bill, in which
# case we cannot create a transaction with no legs, nor can we create
# legs with zero values. Therefore we don't create any transaction.
if not amount:
return None
self.transaction = Transaction.objects.create(
description='Created by recurring cost',
date=self.billing_cycle.date_range.lower
)
# Use the SplitManager's custom queryset's split() method to get the
# amount to be billed for each split
splits = self.recurring_cost.splits.all().split(amount)
# Create the transaction leg for the outbound funds
# (normally to an expense account)
self.transaction.legs.add(Leg.objects.create(
transaction=self.transaction,
amount=Money(amount, self.recurring_cost.currency),
account=self.recurring_cost.to_account,
))
for split, split_amount in splits:
# Create the transaction legs for the inbound funds
# (from housemate accounts)
if split_amount:
self.transaction.legs.add(Leg.objects.create(
transaction=self.transaction,
amount=Money(split_amount * -1, self.recurring_cost.currency),
account=split.from_account,
))
return self.transaction | python | def make_transaction(self):
"""Create the transaction for this RecurredCost
May only be used to create the RecurredCost's initial transaction.
Returns:
Transaction: The created transaction, also assigned to self.transaction. None if the amount is zero.
"""
if self.pk:
raise CannotRecreateTransactionOnRecurredCost(
'The transaction for this recurred cost has already been created. You cannot create it again.'
)
amount = self.recurring_cost.get_amount(self.billing_cycle)
# It is quite possible that there will be nothing to bill, in which
# case we cannot create a transaction with no legs, nor can we create
# legs with zero values. Therefore we don't create any transaction.
if not amount:
return None
self.transaction = Transaction.objects.create(
description='Created by recurring cost',
date=self.billing_cycle.date_range.lower
)
# Use the SplitManager's custom queryset's split() method to get the
# amount to be billed for each split
splits = self.recurring_cost.splits.all().split(amount)
# Create the transaction leg for the outbound funds
# (normally to an expense account)
self.transaction.legs.add(Leg.objects.create(
transaction=self.transaction,
amount=Money(amount, self.recurring_cost.currency),
account=self.recurring_cost.to_account,
))
for split, split_amount in splits:
# Create the transaction legs for the inbound funds
# (from housemate accounts)
if split_amount:
self.transaction.legs.add(Leg.objects.create(
transaction=self.transaction,
amount=Money(split_amount * -1, self.recurring_cost.currency),
account=split.from_account,
))
return self.transaction | [
"def",
"make_transaction",
"(",
"self",
")",
":",
"if",
"self",
".",
"pk",
":",
"raise",
"CannotRecreateTransactionOnRecurredCost",
"(",
"'The transaction for this recurred cost has already been created. You cannot create it again.'",
")",
"amount",
"=",
"self",
".",
"recurri... | Create the transaction for this RecurredCost
May only be used to create the RecurredCost's initial transaction.
Returns:
Transaction: The created transaction, also assigned to self.transaction. None if the amount is zero. | [
"Create",
"the",
"transaction",
"for",
"this",
"RecurredCost"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L357-L405 | train | 31,021 |
adamcharnock/swiftwind | swiftwind/billing_cycle/models.py | BillingCycle.populate | def populate(cls, as_of=None):
"""Ensure the next X years of billing cycles exist
"""
return cls._populate(as_of=as_of or date.today(), delete=True) | python | def populate(cls, as_of=None):
"""Ensure the next X years of billing cycles exist
"""
return cls._populate(as_of=as_of or date.today(), delete=True) | [
"def",
"populate",
"(",
"cls",
",",
"as_of",
"=",
"None",
")",
":",
"return",
"cls",
".",
"_populate",
"(",
"as_of",
"=",
"as_of",
"or",
"date",
".",
"today",
"(",
")",
",",
"delete",
"=",
"True",
")"
] | Ensure the next X years of billing cycles exist | [
"Ensure",
"the",
"next",
"X",
"years",
"of",
"billing",
"cycles",
"exist"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/billing_cycle/models.py#L78-L81 | train | 31,022 |
adamcharnock/swiftwind | swiftwind/billing_cycle/models.py | BillingCycle._populate | def _populate(cls, as_of=None, delete=False):
"""Populate the table with billing cycles starting from `as_of`
Args:
as_of (date): The date at which to begin the populating
delete (bool): Should future billing cycles be deleted?
"""
billing_cycle_helper = get_billing_cycle()
billing_cycles_exist = BillingCycle.objects.exists()
try:
current_billing_cycle = BillingCycle.objects.as_of(date=as_of)
except BillingCycle.DoesNotExist:
current_billing_cycle = None
# If no cycles exist then disable the deletion logic
if not billing_cycles_exist:
delete = False
# Cycles exist, but a date has been specified outside of them
if billing_cycles_exist and not current_billing_cycle:
raise CannotPopulateForDateOutsideExistingCycles()
# Omit the current billing cycle if we are deleting (as
# deleting the current billing cycle will be a Bad Idea)
omit_current = (current_billing_cycle and delete)
stop_date = as_of + relativedelta(years=settings.SWIFTWIND_BILLING_CYCLE_YEARS)
date_ranges = billing_cycle_helper.generate_date_ranges(as_of, stop_date=stop_date, omit_current=omit_current)
date_ranges = list(date_ranges)
beginning_date = date_ranges[0][0]
with db_transaction.atomic():
if delete:
# Delete all the future unused transactions
cls.objects.filter(start_date__gte=beginning_date).delete()
for start_date, end_date in date_ranges:
exists = BillingCycle.objects.filter(date_range=(start_date, end_date)).exists()
if exists:
if delete:
raise Exception(
'It should not be possible to get here as future billing cycles have just been deleted'
)
else:
# We're updating, so we can just ignore cycles that already exist
pass
else:
BillingCycle.objects.create(
date_range=(start_date, end_date),
) | python | def _populate(cls, as_of=None, delete=False):
"""Populate the table with billing cycles starting from `as_of`
Args:
as_of (date): The date at which to begin the populating
delete (bool): Should future billing cycles be deleted?
"""
billing_cycle_helper = get_billing_cycle()
billing_cycles_exist = BillingCycle.objects.exists()
try:
current_billing_cycle = BillingCycle.objects.as_of(date=as_of)
except BillingCycle.DoesNotExist:
current_billing_cycle = None
# If no cycles exist then disable the deletion logic
if not billing_cycles_exist:
delete = False
# Cycles exist, but a date has been specified outside of them
if billing_cycles_exist and not current_billing_cycle:
raise CannotPopulateForDateOutsideExistingCycles()
# Omit the current billing cycle if we are deleting (as
# deleting the current billing cycle will be a Bad Idea)
omit_current = (current_billing_cycle and delete)
stop_date = as_of + relativedelta(years=settings.SWIFTWIND_BILLING_CYCLE_YEARS)
date_ranges = billing_cycle_helper.generate_date_ranges(as_of, stop_date=stop_date, omit_current=omit_current)
date_ranges = list(date_ranges)
beginning_date = date_ranges[0][0]
with db_transaction.atomic():
if delete:
# Delete all the future unused transactions
cls.objects.filter(start_date__gte=beginning_date).delete()
for start_date, end_date in date_ranges:
exists = BillingCycle.objects.filter(date_range=(start_date, end_date)).exists()
if exists:
if delete:
raise Exception(
'It should not be possible to get here as future billing cycles have just been deleted'
)
else:
# We're updating, so we can just ignore cycles that already exist
pass
else:
BillingCycle.objects.create(
date_range=(start_date, end_date),
) | [
"def",
"_populate",
"(",
"cls",
",",
"as_of",
"=",
"None",
",",
"delete",
"=",
"False",
")",
":",
"billing_cycle_helper",
"=",
"get_billing_cycle",
"(",
")",
"billing_cycles_exist",
"=",
"BillingCycle",
".",
"objects",
".",
"exists",
"(",
")",
"try",
":",
... | Populate the table with billing cycles starting from `as_of`
Args:
as_of (date): The date at which to begin the populating
delete (bool): Should future billing cycles be deleted? | [
"Populate",
"the",
"table",
"with",
"billing",
"cycles",
"starting",
"from",
"as_of"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/billing_cycle/models.py#L92-L146 | train | 31,023 |
adamcharnock/swiftwind | swiftwind/billing_cycle/models.py | BillingCycle.get_next | def get_next(self):
"""Get the billing cycle after this one. May return None"""
return BillingCycle.objects.filter(date_range__gt=self.date_range).order_by('date_range').first() | python | def get_next(self):
"""Get the billing cycle after this one. May return None"""
return BillingCycle.objects.filter(date_range__gt=self.date_range).order_by('date_range').first() | [
"def",
"get_next",
"(",
"self",
")",
":",
"return",
"BillingCycle",
".",
"objects",
".",
"filter",
"(",
"date_range__gt",
"=",
"self",
".",
"date_range",
")",
".",
"order_by",
"(",
"'date_range'",
")",
".",
"first",
"(",
")"
] | Get the billing cycle after this one. May return None | [
"Get",
"the",
"billing",
"cycle",
"after",
"this",
"one",
".",
"May",
"return",
"None"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/billing_cycle/models.py#L148-L150 | train | 31,024 |
adamcharnock/swiftwind | swiftwind/billing_cycle/models.py | BillingCycle.get_previous | def get_previous(self):
"""Get the billing cycle prior to this one. May return None"""
return BillingCycle.objects.filter(date_range__lt=self.date_range).order_by('date_range').last() | python | def get_previous(self):
"""Get the billing cycle prior to this one. May return None"""
return BillingCycle.objects.filter(date_range__lt=self.date_range).order_by('date_range').last() | [
"def",
"get_previous",
"(",
"self",
")",
":",
"return",
"BillingCycle",
".",
"objects",
".",
"filter",
"(",
"date_range__lt",
"=",
"self",
".",
"date_range",
")",
".",
"order_by",
"(",
"'date_range'",
")",
".",
"last",
"(",
")"
] | Get the billing cycle prior to this one. May return None | [
"Get",
"the",
"billing",
"cycle",
"prior",
"to",
"this",
"one",
".",
"May",
"return",
"None"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/billing_cycle/models.py#L152-L154 | train | 31,025 |
adamcharnock/swiftwind | swiftwind/billing_cycle/models.py | BillingCycle.is_reconciled | def is_reconciled(self):
"""Have transactions been imported and reconciled for this billing cycle?"""
from hordak.models import StatementImport, StatementLine
since = datetime(
self.date_range.lower.year,
self.date_range.lower.month,
self.date_range.lower.day,
tzinfo=UTC
)
if not StatementImport.objects.filter(timestamp__gte=since).exists():
# No import done since the end of the above billing cycle, and reconciliation
# requires an import. Therefore reconciliation can not have been done
return False
if StatementLine.objects.filter(
transaction__isnull=True,
date__gte=self.date_range.lower,
date__lt=self.date_range.upper
).exists():
# There are statement lines for this period which have not been reconciled
return False
return True | python | def is_reconciled(self):
"""Have transactions been imported and reconciled for this billing cycle?"""
from hordak.models import StatementImport, StatementLine
since = datetime(
self.date_range.lower.year,
self.date_range.lower.month,
self.date_range.lower.day,
tzinfo=UTC
)
if not StatementImport.objects.filter(timestamp__gte=since).exists():
# No import done since the end of the above billing cycle, and reconciliation
# requires an import. Therefore reconciliation can not have been done
return False
if StatementLine.objects.filter(
transaction__isnull=True,
date__gte=self.date_range.lower,
date__lt=self.date_range.upper
).exists():
# There are statement lines for this period which have not been reconciled
return False
return True | [
"def",
"is_reconciled",
"(",
"self",
")",
":",
"from",
"hordak",
".",
"models",
"import",
"StatementImport",
",",
"StatementLine",
"since",
"=",
"datetime",
"(",
"self",
".",
"date_range",
".",
"lower",
".",
"year",
",",
"self",
".",
"date_range",
".",
"lo... | Have transactions been imported and reconciled for this billing cycle? | [
"Have",
"transactions",
"been",
"imported",
"and",
"reconciled",
"for",
"this",
"billing",
"cycle?"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/billing_cycle/models.py#L156-L178 | train | 31,026 |
di/pip-api | pip_api/_hash.py | hash | def hash(filename, algorithm='sha256'):
"""
Hash the given filename. Unavailable in `pip<8.0.0`
"""
if incompatible:
raise Incompatible
if algorithm not in ['sha256', 'sha384', 'sha512']:
raise InvalidArguments('Algorithm {} not supported'.format(algorithm))
result = call('hash', '--algorithm', algorithm, filename)
# result is of the form:
# <filename>:\n--hash=<algorithm>:<hash>\n
return result.strip().split(':')[-1] | python | def hash(filename, algorithm='sha256'):
"""
Hash the given filename. Unavailable in `pip<8.0.0`
"""
if incompatible:
raise Incompatible
if algorithm not in ['sha256', 'sha384', 'sha512']:
raise InvalidArguments('Algorithm {} not supported'.format(algorithm))
result = call('hash', '--algorithm', algorithm, filename)
# result is of the form:
# <filename>:\n--hash=<algorithm>:<hash>\n
return result.strip().split(':')[-1] | [
"def",
"hash",
"(",
"filename",
",",
"algorithm",
"=",
"'sha256'",
")",
":",
"if",
"incompatible",
":",
"raise",
"Incompatible",
"if",
"algorithm",
"not",
"in",
"[",
"'sha256'",
",",
"'sha384'",
",",
"'sha512'",
"]",
":",
"raise",
"InvalidArguments",
"(",
... | Hash the given filename. Unavailable in `pip<8.0.0` | [
"Hash",
"the",
"given",
"filename",
".",
"Unavailable",
"in",
"pip<8",
".",
"0",
".",
"0"
] | 6824afa634484eca258b2f757a35b7df8ebf56d7 | https://github.com/di/pip-api/blob/6824afa634484eca258b2f757a35b7df8ebf56d7/pip_api/_hash.py#L10-L24 | train | 31,027 |
adamcharnock/swiftwind | swiftwind/core/templatetags/swiftwind_utilities.py | partition | def partition(list_, columns=2):
"""
Break a list into ``columns`` number of columns.
"""
iter_ = iter(list_)
columns = int(columns)
rows = []
while True:
row = []
for column_number in range(1, columns + 1):
try:
value = six.next(iter_)
except StopIteration:
pass
else:
row.append(value)
if not row:
return rows
rows.append(row) | python | def partition(list_, columns=2):
"""
Break a list into ``columns`` number of columns.
"""
iter_ = iter(list_)
columns = int(columns)
rows = []
while True:
row = []
for column_number in range(1, columns + 1):
try:
value = six.next(iter_)
except StopIteration:
pass
else:
row.append(value)
if not row:
return rows
rows.append(row) | [
"def",
"partition",
"(",
"list_",
",",
"columns",
"=",
"2",
")",
":",
"iter_",
"=",
"iter",
"(",
"list_",
")",
"columns",
"=",
"int",
"(",
"columns",
")",
"rows",
"=",
"[",
"]",
"while",
"True",
":",
"row",
"=",
"[",
"]",
"for",
"column_number",
... | Break a list into ``columns`` number of columns. | [
"Break",
"a",
"list",
"into",
"columns",
"number",
"of",
"columns",
"."
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/core/templatetags/swiftwind_utilities.py#L8-L29 | train | 31,028 |
adamcharnock/swiftwind | swiftwind/dashboard/views.py | DashboardView.get_balance_context | def get_balance_context(self):
"""Get the high level balances"""
bank_account = Account.objects.get(name='Bank')
return dict(
bank=bank_account,
retained_earnings_accounts=Account.objects.filter(parent__name='Retained Earnings'),
) | python | def get_balance_context(self):
"""Get the high level balances"""
bank_account = Account.objects.get(name='Bank')
return dict(
bank=bank_account,
retained_earnings_accounts=Account.objects.filter(parent__name='Retained Earnings'),
) | [
"def",
"get_balance_context",
"(",
"self",
")",
":",
"bank_account",
"=",
"Account",
".",
"objects",
".",
"get",
"(",
"name",
"=",
"'Bank'",
")",
"return",
"dict",
"(",
"bank",
"=",
"bank_account",
",",
"retained_earnings_accounts",
"=",
"Account",
".",
"obj... | Get the high level balances | [
"Get",
"the",
"high",
"level",
"balances"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/dashboard/views.py#L15-L22 | train | 31,029 |
adamcharnock/swiftwind | swiftwind/dashboard/views.py | DashboardView.get_accounts_context | def get_accounts_context(self):
"""Get the accounts we may want to display"""
income_parent = Account.objects.get(name='Income')
housemate_parent = Account.objects.get(name='Housemate Income')
expense_parent = Account.objects.get(name='Expenses')
current_liabilities_parent = Account.objects.get(name='Current Liabilities')
long_term_liabilities_parent = Account.objects.get(name='Long Term Liabilities')
return dict(
housemate_accounts=Account.objects.filter(parent=housemate_parent),
expense_accounts=expense_parent.get_descendants(),
current_liability_accounts=Account.objects.filter(parent=current_liabilities_parent),
long_term_liability_accounts=Account.objects.filter(parent=long_term_liabilities_parent),
other_income_accounts=Account.objects.filter(~Q(pk=housemate_parent.pk), parent=income_parent)
) | python | def get_accounts_context(self):
"""Get the accounts we may want to display"""
income_parent = Account.objects.get(name='Income')
housemate_parent = Account.objects.get(name='Housemate Income')
expense_parent = Account.objects.get(name='Expenses')
current_liabilities_parent = Account.objects.get(name='Current Liabilities')
long_term_liabilities_parent = Account.objects.get(name='Long Term Liabilities')
return dict(
housemate_accounts=Account.objects.filter(parent=housemate_parent),
expense_accounts=expense_parent.get_descendants(),
current_liability_accounts=Account.objects.filter(parent=current_liabilities_parent),
long_term_liability_accounts=Account.objects.filter(parent=long_term_liabilities_parent),
other_income_accounts=Account.objects.filter(~Q(pk=housemate_parent.pk), parent=income_parent)
) | [
"def",
"get_accounts_context",
"(",
"self",
")",
":",
"income_parent",
"=",
"Account",
".",
"objects",
".",
"get",
"(",
"name",
"=",
"'Income'",
")",
"housemate_parent",
"=",
"Account",
".",
"objects",
".",
"get",
"(",
"name",
"=",
"'Housemate Income'",
")",... | Get the accounts we may want to display | [
"Get",
"the",
"accounts",
"we",
"may",
"want",
"to",
"display"
] | 72c715800841c3b2feabded3f3b65b76388b4cea | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/dashboard/views.py#L24-L38 | train | 31,030 |
tsroten/zhon | zhon/pinyin.py | _build_syl | def _build_syl(vowels, tone_numbers=False):
"""Builds a Pinyin syllable re pattern.
Syllables can be preceded by a middle dot (tone mark). Syllables that end
in a consonant are only valid if they aren't followed directly by a vowel
with no apostrophe in between.
The rough approach used to validate a Pinyin syllable is:
1. Get the longest valid syllable.
2. If it ends in a consonant make sure it's not followed directly by a
vowel (hyphens and apostrophes don't count).
3. If the above didn't match, repeat for the next longest valid match.
Lookahead assertions are used to ensure that hyphens and apostrophes are
only considered valid if used correctly. This helps to weed out non-Pinyin
strings.
"""
# This is the end-of-syllable-consonant lookahead assertion.
consonant_end = '(?![{a}{e}{i}{o}{u}{v}]|u:)'.format(
a=_a, e=_e, i=_i, o=_o, u=_u, v=_v
)
_vowels = vowels.copy()
for v, s in _vowels.items():
if len(s) > 1:
_vowels[v] = '[{}]'.format(s)
return (
'(?:\u00B7|\u2027)?'
'(?:'
'(?:(?:[zcs]h|[gkh])u%(a)sng%(consonant_end)s)|'
'(?:[jqx]i%(o)sng%(consonant_end)s)|'
'(?:[nljqx]i%(a)sng%(consonant_end)s)|'
'(?:(?:[zcs]h?|[dtnlgkhrjqxy])u%(a)sn%(consonant_end)s)|'
'(?:(?:[zcs]h|[gkh])u%(a)si)|'
'(?:(?:[zc]h?|[rdtnlgkhsy])%(o)sng%(consonant_end)s)|'
'(?:(?:[zcs]h?|[rbpmfdtnlgkhw])?%(e)sng%(consonant_end)s)|'
'(?:(?:[zcs]h?|[rbpmfdtnlgkhwy])?%(a)sng%(consonant_end)s)|'
'(?:[bpmdtnljqxy]%(i)sng%(consonant_end)s)|'
'(?:[bpmdtnljqx]i%(a)sn%(consonant_end)s)|'
'(?:[bpmdtnljqx]i%(a)so)|'
'(?:[nl](?:v|u:|\u00FC)%(e)s)|'
'(?:[nl](?:%(v)s|u:))|'
'(?:[jqxy]u%(e)s)|'
'(?:[bpmnljqxy]%(i)sn%(consonant_end)s)|'
'(?:[mdnljqx]i%(u)s)|'
'(?:[bpmdtnljqx]i%(e)s)|'
'(?:[dljqx]i%(a)s)|'
'(?:(?:[zcs]h?|[rdtnlgkhxqjy])%(u)sn%(consonant_end)s)|'
'(?:(?:[zcs]h?|[rdtgkh])u%(i)s)|'
'(?:(?:[zcs]h?|[rdtnlgkh])u%(o)s)|'
'(?:(?:[zcs]h|[rgkh])u%(a)s)|'
'(?:(?:[zcs]h?|[rbpmfdngkhw])?%(e)sn%(consonant_end)s)|'
'(?:(?:[zcs]h?|[rbpmfdtnlgkhwy])?%(a)sn%(consonant_end)s)|'
'(?:(?:[zcs]h?|[rpmfdtnlgkhy])?%(o)su)|'
'(?:(?:[zcs]h?|[rbpmdtnlgkhy])?%(a)so)|'
'(?:(?:[zs]h|[bpmfdtnlgkhwz])?%(e)si)|'
'(?:(?:[zcs]h?|[bpmdtnlgkhw])?%(a)si)|'
'(?:(?:[zcs]h?|[rjqxybpmdtnl])%(i)s)|'
'(?:(?:[zcs]h?|[rwbpmfdtnlgkhjqxwy])%(u)s)|'
'(?:%(e)s(?:r%(consonant_end)s)?)|'
'(?:(?:[zcs]h?|[rmdtnlgkhy])%(e)s)|'
'(?:[bpmfwyl]?%(o)s)|'
'(?:(?:[zcs]h|[bpmfdtnlgkhzcswy])?%(a)s)|'
'(?:r%(consonant_end)s)'
')' + ('[0-5]?' if tone_numbers else '')
) % {
'consonant_end': consonant_end, 'a': _vowels['a'], 'e': _vowels['e'],
'i': _vowels['i'], 'o': _vowels['o'], 'u': _vowels['u'],
'v': _vowels['v']
} | python | def _build_syl(vowels, tone_numbers=False):
"""Builds a Pinyin syllable re pattern.
Syllables can be preceded by a middle dot (tone mark). Syllables that end
in a consonant are only valid if they aren't followed directly by a vowel
with no apostrophe in between.
The rough approach used to validate a Pinyin syllable is:
1. Get the longest valid syllable.
2. If it ends in a consonant make sure it's not followed directly by a
vowel (hyphens and apostrophes don't count).
3. If the above didn't match, repeat for the next longest valid match.
Lookahead assertions are used to ensure that hyphens and apostrophes are
only considered valid if used correctly. This helps to weed out non-Pinyin
strings.
"""
# This is the end-of-syllable-consonant lookahead assertion.
consonant_end = '(?![{a}{e}{i}{o}{u}{v}]|u:)'.format(
a=_a, e=_e, i=_i, o=_o, u=_u, v=_v
)
_vowels = vowels.copy()
for v, s in _vowels.items():
if len(s) > 1:
_vowels[v] = '[{}]'.format(s)
return (
'(?:\u00B7|\u2027)?'
'(?:'
'(?:(?:[zcs]h|[gkh])u%(a)sng%(consonant_end)s)|'
'(?:[jqx]i%(o)sng%(consonant_end)s)|'
'(?:[nljqx]i%(a)sng%(consonant_end)s)|'
'(?:(?:[zcs]h?|[dtnlgkhrjqxy])u%(a)sn%(consonant_end)s)|'
'(?:(?:[zcs]h|[gkh])u%(a)si)|'
'(?:(?:[zc]h?|[rdtnlgkhsy])%(o)sng%(consonant_end)s)|'
'(?:(?:[zcs]h?|[rbpmfdtnlgkhw])?%(e)sng%(consonant_end)s)|'
'(?:(?:[zcs]h?|[rbpmfdtnlgkhwy])?%(a)sng%(consonant_end)s)|'
'(?:[bpmdtnljqxy]%(i)sng%(consonant_end)s)|'
'(?:[bpmdtnljqx]i%(a)sn%(consonant_end)s)|'
'(?:[bpmdtnljqx]i%(a)so)|'
'(?:[nl](?:v|u:|\u00FC)%(e)s)|'
'(?:[nl](?:%(v)s|u:))|'
'(?:[jqxy]u%(e)s)|'
'(?:[bpmnljqxy]%(i)sn%(consonant_end)s)|'
'(?:[mdnljqx]i%(u)s)|'
'(?:[bpmdtnljqx]i%(e)s)|'
'(?:[dljqx]i%(a)s)|'
'(?:(?:[zcs]h?|[rdtnlgkhxqjy])%(u)sn%(consonant_end)s)|'
'(?:(?:[zcs]h?|[rdtgkh])u%(i)s)|'
'(?:(?:[zcs]h?|[rdtnlgkh])u%(o)s)|'
'(?:(?:[zcs]h|[rgkh])u%(a)s)|'
'(?:(?:[zcs]h?|[rbpmfdngkhw])?%(e)sn%(consonant_end)s)|'
'(?:(?:[zcs]h?|[rbpmfdtnlgkhwy])?%(a)sn%(consonant_end)s)|'
'(?:(?:[zcs]h?|[rpmfdtnlgkhy])?%(o)su)|'
'(?:(?:[zcs]h?|[rbpmdtnlgkhy])?%(a)so)|'
'(?:(?:[zs]h|[bpmfdtnlgkhwz])?%(e)si)|'
'(?:(?:[zcs]h?|[bpmdtnlgkhw])?%(a)si)|'
'(?:(?:[zcs]h?|[rjqxybpmdtnl])%(i)s)|'
'(?:(?:[zcs]h?|[rwbpmfdtnlgkhjqxwy])%(u)s)|'
'(?:%(e)s(?:r%(consonant_end)s)?)|'
'(?:(?:[zcs]h?|[rmdtnlgkhy])%(e)s)|'
'(?:[bpmfwyl]?%(o)s)|'
'(?:(?:[zcs]h|[bpmfdtnlgkhzcswy])?%(a)s)|'
'(?:r%(consonant_end)s)'
')' + ('[0-5]?' if tone_numbers else '')
) % {
'consonant_end': consonant_end, 'a': _vowels['a'], 'e': _vowels['e'],
'i': _vowels['i'], 'o': _vowels['o'], 'u': _vowels['u'],
'v': _vowels['v']
} | [
"def",
"_build_syl",
"(",
"vowels",
",",
"tone_numbers",
"=",
"False",
")",
":",
"# This is the end-of-syllable-consonant lookahead assertion.",
"consonant_end",
"=",
"'(?![{a}{e}{i}{o}{u}{v}]|u:)'",
".",
"format",
"(",
"a",
"=",
"_a",
",",
"e",
"=",
"_e",
",",
"i",... | Builds a Pinyin syllable re pattern.
Syllables can be preceded by a middle dot (tone mark). Syllables that end
in a consonant are only valid if they aren't followed directly by a vowel
with no apostrophe in between.
The rough approach used to validate a Pinyin syllable is:
1. Get the longest valid syllable.
2. If it ends in a consonant make sure it's not followed directly by a
vowel (hyphens and apostrophes don't count).
3. If the above didn't match, repeat for the next longest valid match.
Lookahead assertions are used to ensure that hyphens and apostrophes are
only considered valid if used correctly. This helps to weed out non-Pinyin
strings. | [
"Builds",
"a",
"Pinyin",
"syllable",
"re",
"pattern",
"."
] | 09bf543696277f71de502506984661a60d24494c | https://github.com/tsroten/zhon/blob/09bf543696277f71de502506984661a60d24494c/zhon/pinyin.py#L56-L125 | train | 31,031 |
tsroten/zhon | zhon/pinyin.py | _build_word | def _build_word(syl, vowels):
"""Builds a Pinyin word re pattern from a Pinyin syllable re pattern.
A word is defined as a series of consecutive valid Pinyin syllables
with optional hyphens and apostrophes interspersed. Hyphens must be
followed immediately by another valid Pinyin syllable. Apostrophes must be
followed by another valid Pinyin syllable that starts with an 'a', 'e', or
'o'.
"""
return "(?:{syl}(?:-(?={syl})|'(?=[{a}{e}{o}])(?={syl}))?)+".format(
syl=syl, a=vowels['a'], e=vowels['e'], o=vowels['o']) | python | def _build_word(syl, vowels):
"""Builds a Pinyin word re pattern from a Pinyin syllable re pattern.
A word is defined as a series of consecutive valid Pinyin syllables
with optional hyphens and apostrophes interspersed. Hyphens must be
followed immediately by another valid Pinyin syllable. Apostrophes must be
followed by another valid Pinyin syllable that starts with an 'a', 'e', or
'o'.
"""
return "(?:{syl}(?:-(?={syl})|'(?=[{a}{e}{o}])(?={syl}))?)+".format(
syl=syl, a=vowels['a'], e=vowels['e'], o=vowels['o']) | [
"def",
"_build_word",
"(",
"syl",
",",
"vowels",
")",
":",
"return",
"\"(?:{syl}(?:-(?={syl})|'(?=[{a}{e}{o}])(?={syl}))?)+\"",
".",
"format",
"(",
"syl",
"=",
"syl",
",",
"a",
"=",
"vowels",
"[",
"'a'",
"]",
",",
"e",
"=",
"vowels",
"[",
"'e'",
"]",
",",
... | Builds a Pinyin word re pattern from a Pinyin syllable re pattern.
A word is defined as a series of consecutive valid Pinyin syllables
with optional hyphens and apostrophes interspersed. Hyphens must be
followed immediately by another valid Pinyin syllable. Apostrophes must be
followed by another valid Pinyin syllable that starts with an 'a', 'e', or
'o'. | [
"Builds",
"a",
"Pinyin",
"word",
"re",
"pattern",
"from",
"a",
"Pinyin",
"syllable",
"re",
"pattern",
"."
] | 09bf543696277f71de502506984661a60d24494c | https://github.com/tsroten/zhon/blob/09bf543696277f71de502506984661a60d24494c/zhon/pinyin.py#L128-L139 | train | 31,032 |
tsroten/zhon | zhon/pinyin.py | _build_sentence | def _build_sentence(word):
"""Builds a Pinyin sentence re pattern from a Pinyin word re pattern.
A sentence is defined as a series of valid Pinyin words, punctuation
(non-stops), and spaces followed by a single stop and zero or more
container-closing punctuation marks (e.g. apostrophe and brackets).
"""
return (
"(?:{word}|[{non_stops}]|(?<![{stops} ]) )+"
"[{stops}]['\"\]\}}\)]*"
).format(word=word, non_stops=non_stops.replace('-', '\-'),
stops=stops) | python | def _build_sentence(word):
"""Builds a Pinyin sentence re pattern from a Pinyin word re pattern.
A sentence is defined as a series of valid Pinyin words, punctuation
(non-stops), and spaces followed by a single stop and zero or more
container-closing punctuation marks (e.g. apostrophe and brackets).
"""
return (
"(?:{word}|[{non_stops}]|(?<![{stops} ]) )+"
"[{stops}]['\"\]\}}\)]*"
).format(word=word, non_stops=non_stops.replace('-', '\-'),
stops=stops) | [
"def",
"_build_sentence",
"(",
"word",
")",
":",
"return",
"(",
"\"(?:{word}|[{non_stops}]|(?<![{stops} ]) )+\"",
"\"[{stops}]['\\\"\\]\\}}\\)]*\"",
")",
".",
"format",
"(",
"word",
"=",
"word",
",",
"non_stops",
"=",
"non_stops",
".",
"replace",
"(",
"'-'",
",",
... | Builds a Pinyin sentence re pattern from a Pinyin word re pattern.
A sentence is defined as a series of valid Pinyin words, punctuation
(non-stops), and spaces followed by a single stop and zero or more
container-closing punctuation marks (e.g. apostrophe and brackets). | [
"Builds",
"a",
"Pinyin",
"sentence",
"re",
"pattern",
"from",
"a",
"Pinyin",
"word",
"re",
"pattern",
"."
] | 09bf543696277f71de502506984661a60d24494c | https://github.com/tsroten/zhon/blob/09bf543696277f71de502506984661a60d24494c/zhon/pinyin.py#L142-L154 | train | 31,033 |
choderalab/pymbar | examples/parallel-tempering-2dpmf/parallel-tempering-2dpmf.py | read_file | def read_file(filename):
"""Read contents of the specified file.
Parameters:
-----------
filename : str
The name of the file to be read
Returns:
lines : list of str
The contents of the file, split by line
"""
infile = open(filename, 'r')
lines = infile.readlines()
infile.close()
return lines | python | def read_file(filename):
"""Read contents of the specified file.
Parameters:
-----------
filename : str
The name of the file to be read
Returns:
lines : list of str
The contents of the file, split by line
"""
infile = open(filename, 'r')
lines = infile.readlines()
infile.close()
return lines | [
"def",
"read_file",
"(",
"filename",
")",
":",
"infile",
"=",
"open",
"(",
"filename",
",",
"'r'",
")",
"lines",
"=",
"infile",
".",
"readlines",
"(",
")",
"infile",
".",
"close",
"(",
")",
"return",
"lines"
] | Read contents of the specified file.
Parameters:
-----------
filename : str
The name of the file to be read
Returns:
lines : list of str
The contents of the file, split by line | [
"Read",
"contents",
"of",
"the",
"specified",
"file",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/examples/parallel-tempering-2dpmf/parallel-tempering-2dpmf.py#L58-L76 | train | 31,034 |
choderalab/pymbar | pymbar/old_mbar.py | MBAR._computeWeights | def _computeWeights(self, logform=False, include_nonzero=False, recalc_denom=True, return_f_k=False):
"""Compute the normalized weights corresponding to samples for the given reduced potential.
Compute the normalized weights corresponding to samples for the given reduced potential.
Also stores the all_log_denom array for reuse.
Parameters
----------
logform : bool, optional
Whether the output is in logarithmic form, which is better for stability, though sometimes
the exponential form is requires.
include_nonzero : bool, optional
whether to compute weights for states with nonzero states. Not necessary
when performing self-consistent iteration.
recalc_denom : bool, optional
recalculate the denominator, must be done if the free energies change.
default is to do it, so that errors are not made. But can be turned
off if it is known the free energies have not changed.
return_f_k : bool, optional
return the self-consistent f_k values
Returns
-------
if logform==True:
Log_W_nk (double) - Log_W_nk[n,k] is the normalized log weight of sample n from state k.
else:
W_nk (double) - W_nk[n,k] is the log weight of sample n from state k.
if return_f_k==True:
optionally return the self-consistent free energy from these weights.
"""
if (include_nonzero):
f_k = self.f_k
K = self.K
else:
f_k = self.f_k[self.states_with_samples]
K = len(self.states_with_samples)
# array of either weights or normalized log weights
Warray_nk = np.zeros([self.N, K], dtype=np.float64)
if (return_f_k):
f_k_out = np.zeros([K], dtype=np.float64)
if (recalc_denom):
self.log_weight_denom = self._computeUnnormalizedLogWeights(
np.zeros([self.N], dtype=np.float64))
for k in range(K):
if (include_nonzero):
index = k
else:
index = self.states_with_samples[k]
log_w_n = -self.u_kn[index, :] + self.log_weight_denom + f_k[k]
if (return_f_k):
f_k_out[k] = f_k[k] - _logsum(log_w_n)
if (include_nonzero):
# renormalize the weights, needed for nonzero states.
log_w_n += (f_k_out[k] - f_k[k])
if (logform):
Warray_nk[:, k] = log_w_n
else:
Warray_nk[:, k] = np.exp(log_w_n)
# Return weights (or log weights)
if (return_f_k):
f_k_out[:] = f_k_out[:] - f_k_out[0]
return Warray_nk, f_k_out
else:
return Warray_nk | python | def _computeWeights(self, logform=False, include_nonzero=False, recalc_denom=True, return_f_k=False):
"""Compute the normalized weights corresponding to samples for the given reduced potential.
Compute the normalized weights corresponding to samples for the given reduced potential.
Also stores the all_log_denom array for reuse.
Parameters
----------
logform : bool, optional
Whether the output is in logarithmic form, which is better for stability, though sometimes
the exponential form is requires.
include_nonzero : bool, optional
whether to compute weights for states with nonzero states. Not necessary
when performing self-consistent iteration.
recalc_denom : bool, optional
recalculate the denominator, must be done if the free energies change.
default is to do it, so that errors are not made. But can be turned
off if it is known the free energies have not changed.
return_f_k : bool, optional
return the self-consistent f_k values
Returns
-------
if logform==True:
Log_W_nk (double) - Log_W_nk[n,k] is the normalized log weight of sample n from state k.
else:
W_nk (double) - W_nk[n,k] is the log weight of sample n from state k.
if return_f_k==True:
optionally return the self-consistent free energy from these weights.
"""
if (include_nonzero):
f_k = self.f_k
K = self.K
else:
f_k = self.f_k[self.states_with_samples]
K = len(self.states_with_samples)
# array of either weights or normalized log weights
Warray_nk = np.zeros([self.N, K], dtype=np.float64)
if (return_f_k):
f_k_out = np.zeros([K], dtype=np.float64)
if (recalc_denom):
self.log_weight_denom = self._computeUnnormalizedLogWeights(
np.zeros([self.N], dtype=np.float64))
for k in range(K):
if (include_nonzero):
index = k
else:
index = self.states_with_samples[k]
log_w_n = -self.u_kn[index, :] + self.log_weight_denom + f_k[k]
if (return_f_k):
f_k_out[k] = f_k[k] - _logsum(log_w_n)
if (include_nonzero):
# renormalize the weights, needed for nonzero states.
log_w_n += (f_k_out[k] - f_k[k])
if (logform):
Warray_nk[:, k] = log_w_n
else:
Warray_nk[:, k] = np.exp(log_w_n)
# Return weights (or log weights)
if (return_f_k):
f_k_out[:] = f_k_out[:] - f_k_out[0]
return Warray_nk, f_k_out
else:
return Warray_nk | [
"def",
"_computeWeights",
"(",
"self",
",",
"logform",
"=",
"False",
",",
"include_nonzero",
"=",
"False",
",",
"recalc_denom",
"=",
"True",
",",
"return_f_k",
"=",
"False",
")",
":",
"if",
"(",
"include_nonzero",
")",
":",
"f_k",
"=",
"self",
".",
"f_k"... | Compute the normalized weights corresponding to samples for the given reduced potential.
Compute the normalized weights corresponding to samples for the given reduced potential.
Also stores the all_log_denom array for reuse.
Parameters
----------
logform : bool, optional
Whether the output is in logarithmic form, which is better for stability, though sometimes
the exponential form is requires.
include_nonzero : bool, optional
whether to compute weights for states with nonzero states. Not necessary
when performing self-consistent iteration.
recalc_denom : bool, optional
recalculate the denominator, must be done if the free energies change.
default is to do it, so that errors are not made. But can be turned
off if it is known the free energies have not changed.
return_f_k : bool, optional
return the self-consistent f_k values
Returns
-------
if logform==True:
Log_W_nk (double) - Log_W_nk[n,k] is the normalized log weight of sample n from state k.
else:
W_nk (double) - W_nk[n,k] is the log weight of sample n from state k.
if return_f_k==True:
optionally return the self-consistent free energy from these weights. | [
"Compute",
"the",
"normalized",
"weights",
"corresponding",
"to",
"samples",
"for",
"the",
"given",
"reduced",
"potential",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/old_mbar.py#L1654-L1725 | train | 31,035 |
choderalab/pymbar | pymbar/old_mbar.py | MBAR._pseudoinverse | def _pseudoinverse(self, A, tol=1.0e-10):
"""
Compute the Moore-Penrose pseudoinverse.
REQUIRED ARGUMENTS
A (np KxK matrix) - the square matrix whose pseudoinverse is to be computed
RETURN VALUES
Ainv (np KxK matrix) - the pseudoinverse
OPTIONAL VALUES
tol - the tolerance (relative to largest magnitude singlular value) below which singular values are to not be include in forming pseudoinverse (default: 1.0e-10)
NOTES
This implementation is provided because the 'pinv' function of np is broken in the version we were using.
TODO
Can we get rid of this and use np.linalg.pinv instead?
"""
# DEBUG
# TODO: Should we use pinv, or _pseudoinverse?
# return np.linalg.pinv(A)
# Get size
[M, N] = A.shape
if N != M:
raise DataError("pseudoinverse can only be computed for square matrices: dimensions were %d x %d" % (
M, N))
# Make sure A contains no nan.
if(np.any(np.isnan(A))):
print("attempted to compute pseudoinverse of A =")
print(A)
raise ParameterError("A contains nan.")
# DEBUG
diagonal_loading = False
if diagonal_loading:
# Modify matrix by diagonal loading.
eigs = linalg.eigvalsh(A)
most_negative_eigenvalue = eigs.min()
if (most_negative_eigenvalue < 0.0):
print("most negative eigenvalue = %e" % most_negative_eigenvalue)
# Choose loading value.
gamma = -most_negative_eigenvalue * 1.05
# Modify Theta by diagonal loading
A += gamma * np.eye(A.shape[0])
# Compute SVD of A.
[U, S, Vt] = linalg.svd(A)
# Compute pseudoinverse by taking square root of nonzero singular
# values.
Ainv = np.matrix(np.zeros([M, M], dtype=np.float64))
for k in range(M):
if (abs(S[k]) > tol * abs(S[0])):
Ainv += (1.0/S[k]) * np.outer(U[:, k], Vt[k, :]).T
return Ainv | python | def _pseudoinverse(self, A, tol=1.0e-10):
"""
Compute the Moore-Penrose pseudoinverse.
REQUIRED ARGUMENTS
A (np KxK matrix) - the square matrix whose pseudoinverse is to be computed
RETURN VALUES
Ainv (np KxK matrix) - the pseudoinverse
OPTIONAL VALUES
tol - the tolerance (relative to largest magnitude singlular value) below which singular values are to not be include in forming pseudoinverse (default: 1.0e-10)
NOTES
This implementation is provided because the 'pinv' function of np is broken in the version we were using.
TODO
Can we get rid of this and use np.linalg.pinv instead?
"""
# DEBUG
# TODO: Should we use pinv, or _pseudoinverse?
# return np.linalg.pinv(A)
# Get size
[M, N] = A.shape
if N != M:
raise DataError("pseudoinverse can only be computed for square matrices: dimensions were %d x %d" % (
M, N))
# Make sure A contains no nan.
if(np.any(np.isnan(A))):
print("attempted to compute pseudoinverse of A =")
print(A)
raise ParameterError("A contains nan.")
# DEBUG
diagonal_loading = False
if diagonal_loading:
# Modify matrix by diagonal loading.
eigs = linalg.eigvalsh(A)
most_negative_eigenvalue = eigs.min()
if (most_negative_eigenvalue < 0.0):
print("most negative eigenvalue = %e" % most_negative_eigenvalue)
# Choose loading value.
gamma = -most_negative_eigenvalue * 1.05
# Modify Theta by diagonal loading
A += gamma * np.eye(A.shape[0])
# Compute SVD of A.
[U, S, Vt] = linalg.svd(A)
# Compute pseudoinverse by taking square root of nonzero singular
# values.
Ainv = np.matrix(np.zeros([M, M], dtype=np.float64))
for k in range(M):
if (abs(S[k]) > tol * abs(S[0])):
Ainv += (1.0/S[k]) * np.outer(U[:, k], Vt[k, :]).T
return Ainv | [
"def",
"_pseudoinverse",
"(",
"self",
",",
"A",
",",
"tol",
"=",
"1.0e-10",
")",
":",
"# DEBUG",
"# TODO: Should we use pinv, or _pseudoinverse?",
"# return np.linalg.pinv(A)",
"# Get size",
"[",
"M",
",",
"N",
"]",
"=",
"A",
".",
"shape",
"if",
"N",
"!=",
"M"... | Compute the Moore-Penrose pseudoinverse.
REQUIRED ARGUMENTS
A (np KxK matrix) - the square matrix whose pseudoinverse is to be computed
RETURN VALUES
Ainv (np KxK matrix) - the pseudoinverse
OPTIONAL VALUES
tol - the tolerance (relative to largest magnitude singlular value) below which singular values are to not be include in forming pseudoinverse (default: 1.0e-10)
NOTES
This implementation is provided because the 'pinv' function of np is broken in the version we were using.
TODO
Can we get rid of this and use np.linalg.pinv instead? | [
"Compute",
"the",
"Moore",
"-",
"Penrose",
"pseudoinverse",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/old_mbar.py#L1729-L1789 | train | 31,036 |
choderalab/pymbar | pymbar/old_mbar.py | MBAR._zerosamestates | def _zerosamestates(self, A):
"""
zeros out states that should be identical
REQUIRED ARGUMENTS
A: the matrix whose entries are to be zeroed.
"""
for pair in self.samestates:
A[pair[0], pair[1]] = 0
A[pair[1], pair[0]] = 0 | python | def _zerosamestates(self, A):
"""
zeros out states that should be identical
REQUIRED ARGUMENTS
A: the matrix whose entries are to be zeroed.
"""
for pair in self.samestates:
A[pair[0], pair[1]] = 0
A[pair[1], pair[0]] = 0 | [
"def",
"_zerosamestates",
"(",
"self",
",",
"A",
")",
":",
"for",
"pair",
"in",
"self",
".",
"samestates",
":",
"A",
"[",
"pair",
"[",
"0",
"]",
",",
"pair",
"[",
"1",
"]",
"]",
"=",
"0",
"A",
"[",
"pair",
"[",
"1",
"]",
",",
"pair",
"[",
"... | zeros out states that should be identical
REQUIRED ARGUMENTS
A: the matrix whose entries are to be zeroed. | [
"zeros",
"out",
"states",
"that",
"should",
"be",
"identical"
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/old_mbar.py#L1792-L1804 | train | 31,037 |
choderalab/pymbar | pymbar/old_mbar.py | MBAR._initializeFreeEnergies | def _initializeFreeEnergies(self, verbose=False, method='zeros'):
"""
Compute an initial guess at the relative free energies.
OPTIONAL ARGUMENTS
verbose (boolean) - If True, will print debug information (default: False)
method (string) - Method for initializing guess at free energies.
'zeros' - all free energies are initially set to zero
'mean-reduced-potential' - the mean reduced potential is used
"""
if (method == 'zeros'):
# Use zeros for initial free energies.
if verbose:
print("Initializing free energies to zero.")
self.f_k[:] = 0.0
elif (method == 'mean-reduced-potential'):
# Compute initial guess at free energies from the mean reduced
# potential from each state
if verbose:
print("Initializing free energies with mean reduced potential for each state.")
means = np.zeros([self.K], float)
for k in self.states_with_samples:
means[k] = self.u_kn[k, 0:self.N_k[k]].mean()
if (np.max(np.abs(means)) < 0.000001):
print("Warning: All mean reduced potentials are close to zero. If you are using energy differences in the u_kln matrix, then the mean reduced potentials will be zero, and this is expected behavoir.")
self.f_k = means
elif (method == 'BAR'):
# For now, make a simple list of those states with samples.
initialization_order = np.where(self.N_k > 0)[0]
# Initialize all f_k to zero.
self.f_k[:] = 0.0
# Initialize the rest
for index in range(0, np.size(initialization_order) - 1):
k = initialization_order[index]
l = initialization_order[index + 1]
# forward work
# here, we actually need to distinguish which states are which
w_F = (
self.u_kn[l,self.x_kindices==k] - self.u_kn[k,self.x_kindices==k])
#self.u_kln[k, l, 0:self.N_k[k]] - self.u_kln[k, k, 0:self.N_k[k]])
# reverse work
w_R = (
self.u_kn[k,self.x_kindices==l] - self.u_kn[l,self.x_kindices==l])
#self.u_kln[l, k, 0:self.N_k[l]] - self.u_kln[l, l, 0:self.N_k[l]])
if (len(w_F) > 0 and len(w_R) > 0):
# BAR solution doesn't need to be incredibly accurate to
# kickstart NR.
import pymbar.bar
self.f_k[l] = self.f_k[k] + pymbar.bar.BAR(
w_F, w_R, relative_tolerance=0.000001, verbose=False, compute_uncertainty=False)
else:
# no states observed, so we don't need to initialize this free energy anyway, as
# the solution is noniterative.
self.f_k[l] = 0
else:
# The specified method is not implemented.
raise ParameterError('Method ' + method + ' unrecognized.')
# Shift all free energies such that f_0 = 0.
self.f_k[:] = self.f_k[:] - self.f_k[0]
return | python | def _initializeFreeEnergies(self, verbose=False, method='zeros'):
"""
Compute an initial guess at the relative free energies.
OPTIONAL ARGUMENTS
verbose (boolean) - If True, will print debug information (default: False)
method (string) - Method for initializing guess at free energies.
'zeros' - all free energies are initially set to zero
'mean-reduced-potential' - the mean reduced potential is used
"""
if (method == 'zeros'):
# Use zeros for initial free energies.
if verbose:
print("Initializing free energies to zero.")
self.f_k[:] = 0.0
elif (method == 'mean-reduced-potential'):
# Compute initial guess at free energies from the mean reduced
# potential from each state
if verbose:
print("Initializing free energies with mean reduced potential for each state.")
means = np.zeros([self.K], float)
for k in self.states_with_samples:
means[k] = self.u_kn[k, 0:self.N_k[k]].mean()
if (np.max(np.abs(means)) < 0.000001):
print("Warning: All mean reduced potentials are close to zero. If you are using energy differences in the u_kln matrix, then the mean reduced potentials will be zero, and this is expected behavoir.")
self.f_k = means
elif (method == 'BAR'):
# For now, make a simple list of those states with samples.
initialization_order = np.where(self.N_k > 0)[0]
# Initialize all f_k to zero.
self.f_k[:] = 0.0
# Initialize the rest
for index in range(0, np.size(initialization_order) - 1):
k = initialization_order[index]
l = initialization_order[index + 1]
# forward work
# here, we actually need to distinguish which states are which
w_F = (
self.u_kn[l,self.x_kindices==k] - self.u_kn[k,self.x_kindices==k])
#self.u_kln[k, l, 0:self.N_k[k]] - self.u_kln[k, k, 0:self.N_k[k]])
# reverse work
w_R = (
self.u_kn[k,self.x_kindices==l] - self.u_kn[l,self.x_kindices==l])
#self.u_kln[l, k, 0:self.N_k[l]] - self.u_kln[l, l, 0:self.N_k[l]])
if (len(w_F) > 0 and len(w_R) > 0):
# BAR solution doesn't need to be incredibly accurate to
# kickstart NR.
import pymbar.bar
self.f_k[l] = self.f_k[k] + pymbar.bar.BAR(
w_F, w_R, relative_tolerance=0.000001, verbose=False, compute_uncertainty=False)
else:
# no states observed, so we don't need to initialize this free energy anyway, as
# the solution is noniterative.
self.f_k[l] = 0
else:
# The specified method is not implemented.
raise ParameterError('Method ' + method + ' unrecognized.')
# Shift all free energies such that f_0 = 0.
self.f_k[:] = self.f_k[:] - self.f_k[0]
return | [
"def",
"_initializeFreeEnergies",
"(",
"self",
",",
"verbose",
"=",
"False",
",",
"method",
"=",
"'zeros'",
")",
":",
"if",
"(",
"method",
"==",
"'zeros'",
")",
":",
"# Use zeros for initial free energies.",
"if",
"verbose",
":",
"print",
"(",
"\"Initializing fr... | Compute an initial guess at the relative free energies.
OPTIONAL ARGUMENTS
verbose (boolean) - If True, will print debug information (default: False)
method (string) - Method for initializing guess at free energies.
'zeros' - all free energies are initially set to zero
'mean-reduced-potential' - the mean reduced potential is used | [
"Compute",
"an",
"initial",
"guess",
"at",
"the",
"relative",
"free",
"energies",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/old_mbar.py#L2009-L2074 | train | 31,038 |
choderalab/pymbar | pymbar/old_mbar.py | MBAR._amIdoneIterating | def _amIdoneIterating(self, f_k_new, relative_tolerance, iteration, maximum_iterations, print_warning, verbose):
"""
Convenience function to test whether we are done iterating, same for all iteration types
REQUIRED ARGUMENTS
f_k_new (array): new free energies
f_k (array) : older free energies
relative_tolerance (float): the relative tolerance for terminating
verbose (bool): verbose response
iterations (int): current number of iterations
print_warning (bool): sometimes, we want to surpress the warning.
RETURN VALUES
yesIam (bool): indicates that the iteration has converged.
"""
yesIam = False
# Compute change from old to new estimate.
Delta_f_k = f_k_new - self.f_k[self.states_with_samples]
# Check convergence criteria.
# Terminate when max((f - fold) / f) < relative_tolerance for all
# nonzero f.
max_delta = np.max(
np.abs(Delta_f_k) / np.max(np.abs(f_k_new)))
# Update stored free energies.
f_k = f_k_new.copy()
self.f_k[self.states_with_samples] = f_k
# write out current estimate
if verbose:
print("current f_k for states with samples =")
print(f_k)
print("relative max_delta = %e" % max_delta)
# Check convergence criteria.
# Terminate when max((f - fold) / f) < relative_tolerance for all
# nonzero f.
if np.isnan(max_delta) or (max_delta < relative_tolerance):
yesIam = True
if (yesIam):
# Report convergence, or warn user if convergence was not achieved.
if np.all(self.f_k == 0.0):
# all f_k appear to be zero
print('WARNING: All f_k appear to be zero.')
elif (max_delta < relative_tolerance):
# Convergence achieved.
if verbose:
print('Converged to tolerance of %e in %d iterations.' % (max_delta, iteration + 1))
elif (print_warning):
# Warn that convergence was not achieved.
# many times, self-consistent iteration is used in conjunction with another program. In that case,
# we don't really need to warn about anything, since we are not
# running it to convergence.
print('WARNING: Did not converge to within specified tolerance.')
print('max_delta = %e, TOLERANCE = %e, MAX_ITS = %d, iterations completed = %d' % (max_delta, relative_tolerance, maximum_iterations, iteration))
return yesIam | python | def _amIdoneIterating(self, f_k_new, relative_tolerance, iteration, maximum_iterations, print_warning, verbose):
"""
Convenience function to test whether we are done iterating, same for all iteration types
REQUIRED ARGUMENTS
f_k_new (array): new free energies
f_k (array) : older free energies
relative_tolerance (float): the relative tolerance for terminating
verbose (bool): verbose response
iterations (int): current number of iterations
print_warning (bool): sometimes, we want to surpress the warning.
RETURN VALUES
yesIam (bool): indicates that the iteration has converged.
"""
yesIam = False
# Compute change from old to new estimate.
Delta_f_k = f_k_new - self.f_k[self.states_with_samples]
# Check convergence criteria.
# Terminate when max((f - fold) / f) < relative_tolerance for all
# nonzero f.
max_delta = np.max(
np.abs(Delta_f_k) / np.max(np.abs(f_k_new)))
# Update stored free energies.
f_k = f_k_new.copy()
self.f_k[self.states_with_samples] = f_k
# write out current estimate
if verbose:
print("current f_k for states with samples =")
print(f_k)
print("relative max_delta = %e" % max_delta)
# Check convergence criteria.
# Terminate when max((f - fold) / f) < relative_tolerance for all
# nonzero f.
if np.isnan(max_delta) or (max_delta < relative_tolerance):
yesIam = True
if (yesIam):
# Report convergence, or warn user if convergence was not achieved.
if np.all(self.f_k == 0.0):
# all f_k appear to be zero
print('WARNING: All f_k appear to be zero.')
elif (max_delta < relative_tolerance):
# Convergence achieved.
if verbose:
print('Converged to tolerance of %e in %d iterations.' % (max_delta, iteration + 1))
elif (print_warning):
# Warn that convergence was not achieved.
# many times, self-consistent iteration is used in conjunction with another program. In that case,
# we don't really need to warn about anything, since we are not
# running it to convergence.
print('WARNING: Did not converge to within specified tolerance.')
print('max_delta = %e, TOLERANCE = %e, MAX_ITS = %d, iterations completed = %d' % (max_delta, relative_tolerance, maximum_iterations, iteration))
return yesIam | [
"def",
"_amIdoneIterating",
"(",
"self",
",",
"f_k_new",
",",
"relative_tolerance",
",",
"iteration",
",",
"maximum_iterations",
",",
"print_warning",
",",
"verbose",
")",
":",
"yesIam",
"=",
"False",
"# Compute change from old to new estimate.",
"Delta_f_k",
"=",
"f_... | Convenience function to test whether we are done iterating, same for all iteration types
REQUIRED ARGUMENTS
f_k_new (array): new free energies
f_k (array) : older free energies
relative_tolerance (float): the relative tolerance for terminating
verbose (bool): verbose response
iterations (int): current number of iterations
print_warning (bool): sometimes, we want to surpress the warning.
RETURN VALUES
yesIam (bool): indicates that the iteration has converged. | [
"Convenience",
"function",
"to",
"test",
"whether",
"we",
"are",
"done",
"iterating",
"same",
"for",
"all",
"iteration",
"types"
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/old_mbar.py#L2152-L2212 | train | 31,039 |
choderalab/pymbar | pymbar/old_mbar.py | MBAR._selfConsistentIteration | def _selfConsistentIteration(self, relative_tolerance=1.0e-6, maximum_iterations=1000, verbose=True, print_warning=False):
"""
Determine free energies by self-consistent iteration.
OPTIONAL ARGUMENTS
relative_tolerance (float between 0 and 1) - relative tolerance for convergence (default 1.0e-5)
maximum_iterations (int) - maximum number of self-consistent iterations (default 1000)
verbose (boolean) - verbosity level for debug output
NOTES
Self-consistent iteration of the MBAR equations is used, as described in Appendix C.1 of [1].
"""
# Iteratively update dimensionless free energies until convergence to
# specified tolerance, or maximum allowed number of iterations has been
# exceeded.
if verbose:
print("MBAR: Computing dimensionless free energies by iteration. This may take from seconds to minutes, depending on the quantity of data...")
for iteration in range(0, maximum_iterations):
if verbose:
print('Self-consistent iteration %d' % iteration)
# compute the free energies by self consistent iteration (which
# also involves calculating the weights)
(W_nk, f_k_new) = self._computeWeights(
logform=True, return_f_k=True)
if self._amIdoneIterating(
f_k_new, relative_tolerance, iteration,
maximum_iterations, print_warning, verbose):
break
return | python | def _selfConsistentIteration(self, relative_tolerance=1.0e-6, maximum_iterations=1000, verbose=True, print_warning=False):
"""
Determine free energies by self-consistent iteration.
OPTIONAL ARGUMENTS
relative_tolerance (float between 0 and 1) - relative tolerance for convergence (default 1.0e-5)
maximum_iterations (int) - maximum number of self-consistent iterations (default 1000)
verbose (boolean) - verbosity level for debug output
NOTES
Self-consistent iteration of the MBAR equations is used, as described in Appendix C.1 of [1].
"""
# Iteratively update dimensionless free energies until convergence to
# specified tolerance, or maximum allowed number of iterations has been
# exceeded.
if verbose:
print("MBAR: Computing dimensionless free energies by iteration. This may take from seconds to minutes, depending on the quantity of data...")
for iteration in range(0, maximum_iterations):
if verbose:
print('Self-consistent iteration %d' % iteration)
# compute the free energies by self consistent iteration (which
# also involves calculating the weights)
(W_nk, f_k_new) = self._computeWeights(
logform=True, return_f_k=True)
if self._amIdoneIterating(
f_k_new, relative_tolerance, iteration,
maximum_iterations, print_warning, verbose):
break
return | [
"def",
"_selfConsistentIteration",
"(",
"self",
",",
"relative_tolerance",
"=",
"1.0e-6",
",",
"maximum_iterations",
"=",
"1000",
",",
"verbose",
"=",
"True",
",",
"print_warning",
"=",
"False",
")",
":",
"# Iteratively update dimensionless free energies until convergence... | Determine free energies by self-consistent iteration.
OPTIONAL ARGUMENTS
relative_tolerance (float between 0 and 1) - relative tolerance for convergence (default 1.0e-5)
maximum_iterations (int) - maximum number of self-consistent iterations (default 1000)
verbose (boolean) - verbosity level for debug output
NOTES
Self-consistent iteration of the MBAR equations is used, as described in Appendix C.1 of [1]. | [
"Determine",
"free",
"energies",
"by",
"self",
"-",
"consistent",
"iteration",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/old_mbar.py#L2215-L2251 | train | 31,040 |
choderalab/pymbar | pymbar/mbar.py | MBAR._pseudoinverse | def _pseudoinverse(self, A, tol=1.0e-10):
"""Compute the Moore-Penrose pseudoinverse, wraps np.linalg.pinv
REQUIRED ARGUMENTS
A (np KxK matrix) - the square matrix whose pseudoinverse is to be computed
RETURN VALUES
Ainv (np KxK matrix) - the pseudoinverse
OPTIONAL VALUES
tol - the tolerance (relative to largest magnitude singlular value) below which singular values are to not be include in forming pseudoinverse (default: 1.0e-10)
NOTES
In previous versions of pymbar / Numpy, we wrote our own pseudoinverse
because of a bug in Numpy.
"""
return np.linalg.pinv(A, rcond=tol) | python | def _pseudoinverse(self, A, tol=1.0e-10):
"""Compute the Moore-Penrose pseudoinverse, wraps np.linalg.pinv
REQUIRED ARGUMENTS
A (np KxK matrix) - the square matrix whose pseudoinverse is to be computed
RETURN VALUES
Ainv (np KxK matrix) - the pseudoinverse
OPTIONAL VALUES
tol - the tolerance (relative to largest magnitude singlular value) below which singular values are to not be include in forming pseudoinverse (default: 1.0e-10)
NOTES
In previous versions of pymbar / Numpy, we wrote our own pseudoinverse
because of a bug in Numpy.
"""
return np.linalg.pinv(A, rcond=tol) | [
"def",
"_pseudoinverse",
"(",
"self",
",",
"A",
",",
"tol",
"=",
"1.0e-10",
")",
":",
"return",
"np",
".",
"linalg",
".",
"pinv",
"(",
"A",
",",
"rcond",
"=",
"tol",
")"
] | Compute the Moore-Penrose pseudoinverse, wraps np.linalg.pinv
REQUIRED ARGUMENTS
A (np KxK matrix) - the square matrix whose pseudoinverse is to be computed
RETURN VALUES
Ainv (np KxK matrix) - the pseudoinverse
OPTIONAL VALUES
tol - the tolerance (relative to largest magnitude singlular value) below which singular values are to not be include in forming pseudoinverse (default: 1.0e-10)
NOTES
In previous versions of pymbar / Numpy, we wrote our own pseudoinverse
because of a bug in Numpy. | [
"Compute",
"the",
"Moore",
"-",
"Penrose",
"pseudoinverse",
"wraps",
"np",
".",
"linalg",
".",
"pinv"
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar.py#L1546-L1564 | train | 31,041 |
choderalab/pymbar | pymbar/mbar_solvers.py | validate_inputs | def validate_inputs(u_kn, N_k, f_k):
"""Check types and return inputs for MBAR calculations.
Parameters
----------
u_kn or q_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies or unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
u_kn or q_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies or unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='float'
The number of samples in each state. Converted to float because this cast is required when log is calculated.
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
"""
n_states, n_samples = u_kn.shape
u_kn = ensure_type(u_kn, 'float', 2, "u_kn or Q_kn", shape=(n_states, n_samples))
N_k = ensure_type(N_k, 'float', 1, "N_k", shape=(n_states,), warn_on_cast=False) # Autocast to float because will be eventually used in float calculations.
f_k = ensure_type(f_k, 'float', 1, "f_k", shape=(n_states,))
return u_kn, N_k, f_k | python | def validate_inputs(u_kn, N_k, f_k):
"""Check types and return inputs for MBAR calculations.
Parameters
----------
u_kn or q_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies or unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
u_kn or q_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies or unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='float'
The number of samples in each state. Converted to float because this cast is required when log is calculated.
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
"""
n_states, n_samples = u_kn.shape
u_kn = ensure_type(u_kn, 'float', 2, "u_kn or Q_kn", shape=(n_states, n_samples))
N_k = ensure_type(N_k, 'float', 1, "N_k", shape=(n_states,), warn_on_cast=False) # Autocast to float because will be eventually used in float calculations.
f_k = ensure_type(f_k, 'float', 1, "f_k", shape=(n_states,))
return u_kn, N_k, f_k | [
"def",
"validate_inputs",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
":",
"n_states",
",",
"n_samples",
"=",
"u_kn",
".",
"shape",
"u_kn",
"=",
"ensure_type",
"(",
"u_kn",
",",
"'float'",
",",
"2",
",",
"\"u_kn or Q_kn\"",
",",
"shape",
"=",
"(",
"n_sta... | Check types and return inputs for MBAR calculations.
Parameters
----------
u_kn or q_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies or unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
u_kn or q_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies or unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='float'
The number of samples in each state. Converted to float because this cast is required when log is calculated.
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state | [
"Check",
"types",
"and",
"return",
"inputs",
"for",
"MBAR",
"calculations",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar_solvers.py#L17-L44 | train | 31,042 |
choderalab/pymbar | pymbar/mbar_solvers.py | self_consistent_update | def self_consistent_update(u_kn, N_k, f_k):
"""Return an improved guess for the dimensionless free energies
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
f_k : np.ndarray, shape=(n_states), dtype='float'
Updated estimate of f_k
Notes
-----
Equation C3 in MBAR JCP paper.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
states_with_samples = (N_k > 0)
# Only the states with samples can contribute to the denominator term.
log_denominator_n = logsumexp(f_k[states_with_samples] - u_kn[states_with_samples].T, b=N_k[states_with_samples], axis=1)
# All states can contribute to the numerator term.
return -1. * logsumexp(-log_denominator_n - u_kn, axis=1) | python | def self_consistent_update(u_kn, N_k, f_k):
"""Return an improved guess for the dimensionless free energies
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
f_k : np.ndarray, shape=(n_states), dtype='float'
Updated estimate of f_k
Notes
-----
Equation C3 in MBAR JCP paper.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
states_with_samples = (N_k > 0)
# Only the states with samples can contribute to the denominator term.
log_denominator_n = logsumexp(f_k[states_with_samples] - u_kn[states_with_samples].T, b=N_k[states_with_samples], axis=1)
# All states can contribute to the numerator term.
return -1. * logsumexp(-log_denominator_n - u_kn, axis=1) | [
"def",
"self_consistent_update",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
":",
"u_kn",
",",
"N_k",
",",
"f_k",
"=",
"validate_inputs",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
"states_with_samples",
"=",
"(",
"N_k",
">",
"0",
")",
"# Only the states wi... | Return an improved guess for the dimensionless free energies
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
f_k : np.ndarray, shape=(n_states), dtype='float'
Updated estimate of f_k
Notes
-----
Equation C3 in MBAR JCP paper. | [
"Return",
"an",
"improved",
"guess",
"for",
"the",
"dimensionless",
"free",
"energies"
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar_solvers.py#L47-L77 | train | 31,043 |
choderalab/pymbar | pymbar/mbar_solvers.py | mbar_gradient | def mbar_gradient(u_kn, N_k, f_k):
"""Gradient of MBAR objective function.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
grad : np.ndarray, dtype=float, shape=(n_states)
Gradient of mbar_objective
Notes
-----
This is equation C6 in the JCP MBAR paper.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
log_numerator_k = logsumexp(-log_denominator_n - u_kn, axis=1)
return -1 * N_k * (1.0 - np.exp(f_k + log_numerator_k)) | python | def mbar_gradient(u_kn, N_k, f_k):
"""Gradient of MBAR objective function.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
grad : np.ndarray, dtype=float, shape=(n_states)
Gradient of mbar_objective
Notes
-----
This is equation C6 in the JCP MBAR paper.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
log_numerator_k = logsumexp(-log_denominator_n - u_kn, axis=1)
return -1 * N_k * (1.0 - np.exp(f_k + log_numerator_k)) | [
"def",
"mbar_gradient",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
":",
"u_kn",
",",
"N_k",
",",
"f_k",
"=",
"validate_inputs",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
"log_denominator_n",
"=",
"logsumexp",
"(",
"f_k",
"-",
"u_kn",
".",
"T",
",",
... | Gradient of MBAR objective function.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
grad : np.ndarray, dtype=float, shape=(n_states)
Gradient of mbar_objective
Notes
-----
This is equation C6 in the JCP MBAR paper. | [
"Gradient",
"of",
"MBAR",
"objective",
"function",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar_solvers.py#L80-L105 | train | 31,044 |
choderalab/pymbar | pymbar/mbar_solvers.py | mbar_objective_and_gradient | def mbar_objective_and_gradient(u_kn, N_k, f_k):
"""Calculates both objective function and gradient for MBAR.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
obj : float
Objective function
grad : np.ndarray, dtype=float, shape=(n_states)
Gradient of objective function
Notes
-----
This objective function is essentially a doubly-summed partition function and is
quite sensitive to precision loss from both overflow and underflow. For optimal
results, u_kn can be preconditioned by subtracting out a `n` dependent
vector.
More optimal precision, the objective function uses math.fsum for the
outermost sum and logsumexp for the inner sum.
The gradient is equation C6 in the JCP MBAR paper; the objective
function is its integral.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
log_numerator_k = logsumexp(-log_denominator_n - u_kn, axis=1)
grad = -1 * N_k * (1.0 - np.exp(f_k + log_numerator_k))
obj = math.fsum(log_denominator_n) - N_k.dot(f_k)
return obj, grad | python | def mbar_objective_and_gradient(u_kn, N_k, f_k):
"""Calculates both objective function and gradient for MBAR.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
obj : float
Objective function
grad : np.ndarray, dtype=float, shape=(n_states)
Gradient of objective function
Notes
-----
This objective function is essentially a doubly-summed partition function and is
quite sensitive to precision loss from both overflow and underflow. For optimal
results, u_kn can be preconditioned by subtracting out a `n` dependent
vector.
More optimal precision, the objective function uses math.fsum for the
outermost sum and logsumexp for the inner sum.
The gradient is equation C6 in the JCP MBAR paper; the objective
function is its integral.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
log_numerator_k = logsumexp(-log_denominator_n - u_kn, axis=1)
grad = -1 * N_k * (1.0 - np.exp(f_k + log_numerator_k))
obj = math.fsum(log_denominator_n) - N_k.dot(f_k)
return obj, grad | [
"def",
"mbar_objective_and_gradient",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
":",
"u_kn",
",",
"N_k",
",",
"f_k",
"=",
"validate_inputs",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
"log_denominator_n",
"=",
"logsumexp",
"(",
"f_k",
"-",
"u_kn",
".",
... | Calculates both objective function and gradient for MBAR.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
obj : float
Objective function
grad : np.ndarray, dtype=float, shape=(n_states)
Gradient of objective function
Notes
-----
This objective function is essentially a doubly-summed partition function and is
quite sensitive to precision loss from both overflow and underflow. For optimal
results, u_kn can be preconditioned by subtracting out a `n` dependent
vector.
More optimal precision, the objective function uses math.fsum for the
outermost sum and logsumexp for the inner sum.
The gradient is equation C6 in the JCP MBAR paper; the objective
function is its integral. | [
"Calculates",
"both",
"objective",
"function",
"and",
"gradient",
"for",
"MBAR",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar_solvers.py#L108-L149 | train | 31,045 |
choderalab/pymbar | pymbar/mbar_solvers.py | mbar_hessian | def mbar_hessian(u_kn, N_k, f_k):
"""Hessian of MBAR objective function.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
H : np.ndarray, dtype=float, shape=(n_states, n_states)
Hessian of mbar objective function.
Notes
-----
Equation (C9) in JCP MBAR paper.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
W = mbar_W_nk(u_kn, N_k, f_k)
H = W.T.dot(W)
H *= N_k
H *= N_k[:, np.newaxis]
H -= np.diag(W.sum(0) * N_k)
return -1.0 * H | python | def mbar_hessian(u_kn, N_k, f_k):
"""Hessian of MBAR objective function.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
H : np.ndarray, dtype=float, shape=(n_states, n_states)
Hessian of mbar objective function.
Notes
-----
Equation (C9) in JCP MBAR paper.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
W = mbar_W_nk(u_kn, N_k, f_k)
H = W.T.dot(W)
H *= N_k
H *= N_k[:, np.newaxis]
H -= np.diag(W.sum(0) * N_k)
return -1.0 * H | [
"def",
"mbar_hessian",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
":",
"u_kn",
",",
"N_k",
",",
"f_k",
"=",
"validate_inputs",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
"W",
"=",
"mbar_W_nk",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
"H",
"=",
"... | Hessian of MBAR objective function.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
H : np.ndarray, dtype=float, shape=(n_states, n_states)
Hessian of mbar objective function.
Notes
-----
Equation (C9) in JCP MBAR paper. | [
"Hessian",
"of",
"MBAR",
"objective",
"function",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar_solvers.py#L152-L182 | train | 31,046 |
choderalab/pymbar | pymbar/mbar_solvers.py | mbar_log_W_nk | def mbar_log_W_nk(u_kn, N_k, f_k):
"""Calculate the log weight matrix.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
logW_nk : np.ndarray, dtype='float', shape=(n_samples, n_states)
The normalized log weights.
Notes
-----
Equation (9) in JCP MBAR paper.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
logW = f_k - u_kn.T - log_denominator_n[:, np.newaxis]
return logW | python | def mbar_log_W_nk(u_kn, N_k, f_k):
"""Calculate the log weight matrix.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
logW_nk : np.ndarray, dtype='float', shape=(n_samples, n_states)
The normalized log weights.
Notes
-----
Equation (9) in JCP MBAR paper.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
logW = f_k - u_kn.T - log_denominator_n[:, np.newaxis]
return logW | [
"def",
"mbar_log_W_nk",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
":",
"u_kn",
",",
"N_k",
",",
"f_k",
"=",
"validate_inputs",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
"log_denominator_n",
"=",
"logsumexp",
"(",
"f_k",
"-",
"u_kn",
".",
"T",
",",
... | Calculate the log weight matrix.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
logW_nk : np.ndarray, dtype='float', shape=(n_samples, n_states)
The normalized log weights.
Notes
-----
Equation (9) in JCP MBAR paper. | [
"Calculate",
"the",
"log",
"weight",
"matrix",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar_solvers.py#L185-L210 | train | 31,047 |
choderalab/pymbar | pymbar/mbar_solvers.py | mbar_W_nk | def mbar_W_nk(u_kn, N_k, f_k):
"""Calculate the weight matrix.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
W_nk : np.ndarray, dtype='float', shape=(n_samples, n_states)
The normalized weights.
Notes
-----
Equation (9) in JCP MBAR paper.
"""
return np.exp(mbar_log_W_nk(u_kn, N_k, f_k)) | python | def mbar_W_nk(u_kn, N_k, f_k):
"""Calculate the weight matrix.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
W_nk : np.ndarray, dtype='float', shape=(n_samples, n_states)
The normalized weights.
Notes
-----
Equation (9) in JCP MBAR paper.
"""
return np.exp(mbar_log_W_nk(u_kn, N_k, f_k)) | [
"def",
"mbar_W_nk",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
":",
"return",
"np",
".",
"exp",
"(",
"mbar_log_W_nk",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
")"
] | Calculate the weight matrix.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
W_nk : np.ndarray, dtype='float', shape=(n_samples, n_states)
The normalized weights.
Notes
-----
Equation (9) in JCP MBAR paper. | [
"Calculate",
"the",
"weight",
"matrix",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar_solvers.py#L213-L234 | train | 31,048 |
choderalab/pymbar | pymbar/mbar_solvers.py | precondition_u_kn | def precondition_u_kn(u_kn, N_k, f_k):
"""Subtract a sample-dependent constant from u_kn to improve precision
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
Notes
-----
Returns u_kn - x_n, where x_n is based on the current estimate of f_k.
Upon subtraction of x_n, the MBAR objective function changes by an
additive constant, but its derivatives remain unchanged. We choose
x_n such that the current objective function value is zero, which
should give maximum precision in the objective function.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
u_kn = u_kn - u_kn.min(0)
u_kn += (logsumexp(f_k - u_kn.T, b=N_k, axis=1)) - N_k.dot(f_k) / float(N_k.sum())
return u_kn | python | def precondition_u_kn(u_kn, N_k, f_k):
"""Subtract a sample-dependent constant from u_kn to improve precision
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
Notes
-----
Returns u_kn - x_n, where x_n is based on the current estimate of f_k.
Upon subtraction of x_n, the MBAR objective function changes by an
additive constant, but its derivatives remain unchanged. We choose
x_n such that the current objective function value is zero, which
should give maximum precision in the objective function.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
u_kn = u_kn - u_kn.min(0)
u_kn += (logsumexp(f_k - u_kn.T, b=N_k, axis=1)) - N_k.dot(f_k) / float(N_k.sum())
return u_kn | [
"def",
"precondition_u_kn",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
":",
"u_kn",
",",
"N_k",
",",
"f_k",
"=",
"validate_inputs",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
")",
"u_kn",
"=",
"u_kn",
"-",
"u_kn",
".",
"min",
"(",
"0",
")",
"u_kn",
"+="... | Subtract a sample-dependent constant from u_kn to improve precision
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
Notes
-----
Returns u_kn - x_n, where x_n is based on the current estimate of f_k.
Upon subtraction of x_n, the MBAR objective function changes by an
additive constant, but its derivatives remain unchanged. We choose
x_n such that the current objective function value is zero, which
should give maximum precision in the objective function. | [
"Subtract",
"a",
"sample",
"-",
"dependent",
"constant",
"from",
"u_kn",
"to",
"improve",
"precision"
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar_solvers.py#L352-L380 | train | 31,049 |
choderalab/pymbar | pymbar/mbar_solvers.py | solve_mbar_once | def solve_mbar_once(u_kn_nonzero, N_k_nonzero, f_k_nonzero, method="hybr", tol=1E-12, options=None):
"""Solve MBAR self-consistent equations using some form of equation solver.
Parameters
----------
u_kn_nonzero : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
for the nonempty states
N_k_nonzero : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state for the nonempty states
f_k_nonzero : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies for the nonempty states
method : str, optional, default="hybr"
The optimization routine to use. This can be any of the methods
available via scipy.optimize.minimize() or scipy.optimize.root().
tol : float, optional, default=1E-14
The convergance tolerance for minimize() or root()
verbose: bool
Whether to print information about the solution method.
options: dict, optional, default=None
Optional dictionary of algorithm-specific parameters. See
scipy.optimize.root or scipy.optimize.minimize for details.
Returns
-------
f_k : np.ndarray
The converged reduced free energies.
results : dict
Dictionary containing entire results of optimization routine, may
be useful when debugging convergence.
Notes
-----
This function requires that N_k_nonzero > 0--that is, you should have
already dropped all the states for which you have no samples.
Internally, this function works in a reduced coordinate system defined
by subtracting off the first component of f_k and fixing that component
to be zero.
For fast but precise convergence, we recommend calling this function
multiple times to polish the result. `solve_mbar()` facilitates this.
"""
u_kn_nonzero, N_k_nonzero, f_k_nonzero = validate_inputs(u_kn_nonzero, N_k_nonzero, f_k_nonzero)
f_k_nonzero = f_k_nonzero - f_k_nonzero[0] # Work with reduced dimensions with f_k[0] := 0
u_kn_nonzero = precondition_u_kn(u_kn_nonzero, N_k_nonzero, f_k_nonzero)
pad = lambda x: np.pad(x, (1, 0), mode='constant') # Helper function inserts zero before first element
unpad_second_arg = lambda obj, grad: (obj, grad[1:]) # Helper function drops first element of gradient
# Create objective functions / nonlinear equations to send to scipy.optimize, fixing f_0 = 0
grad = lambda x: mbar_gradient(u_kn_nonzero, N_k_nonzero, pad(x))[1:] # Objective function gradient
grad_and_obj = lambda x: unpad_second_arg(*mbar_objective_and_gradient(u_kn_nonzero, N_k_nonzero, pad(x))) # Objective function gradient and objective function
hess = lambda x: mbar_hessian(u_kn_nonzero, N_k_nonzero, pad(x))[1:][:, 1:] # Hessian of objective function
with warnings.catch_warnings(record=True) as w:
if method in ["L-BFGS-B", "dogleg", "CG", "BFGS", "Newton-CG", "TNC", "trust-ncg", "SLSQP"]:
if method in ["L-BFGS-B", "CG"]:
hess = None # To suppress warning from passing a hessian function.
results = scipy.optimize.minimize(grad_and_obj, f_k_nonzero[1:], jac=True, hess=hess, method=method, tol=tol, options=options)
f_k_nonzero = pad(results["x"])
elif method == 'adaptive':
results = adaptive(u_kn_nonzero, N_k_nonzero, f_k_nonzero, tol=tol, options=options)
f_k_nonzero = results # they are the same for adaptive, until we decide to return more.
else:
results = scipy.optimize.root(grad, f_k_nonzero[1:], jac=hess, method=method, tol=tol, options=options)
f_k_nonzero = pad(results["x"])
# If there were runtime warnings, show the messages
if len(w) > 0:
can_ignore = True
for warn_msg in w:
if "Unknown solver options" in str(warn_msg.message):
continue
warnings.showwarning(warn_msg.message, warn_msg.category,
warn_msg.filename, warn_msg.lineno, warn_msg.file, "")
can_ignore = False # If any warning is not just unknown options, can ]not skip check
if not can_ignore:
# Ensure MBAR solved correctly
w_nk_check = mbar_W_nk(u_kn_nonzero, N_k_nonzero, f_k_nonzero)
check_w_normalized(w_nk_check, N_k_nonzero)
print("MBAR weights converged within tolerance, despite the SciPy Warnings. Please validate your results.")
return f_k_nonzero, results | python | def solve_mbar_once(u_kn_nonzero, N_k_nonzero, f_k_nonzero, method="hybr", tol=1E-12, options=None):
"""Solve MBAR self-consistent equations using some form of equation solver.
Parameters
----------
u_kn_nonzero : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
for the nonempty states
N_k_nonzero : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state for the nonempty states
f_k_nonzero : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies for the nonempty states
method : str, optional, default="hybr"
The optimization routine to use. This can be any of the methods
available via scipy.optimize.minimize() or scipy.optimize.root().
tol : float, optional, default=1E-14
The convergance tolerance for minimize() or root()
verbose: bool
Whether to print information about the solution method.
options: dict, optional, default=None
Optional dictionary of algorithm-specific parameters. See
scipy.optimize.root or scipy.optimize.minimize for details.
Returns
-------
f_k : np.ndarray
The converged reduced free energies.
results : dict
Dictionary containing entire results of optimization routine, may
be useful when debugging convergence.
Notes
-----
This function requires that N_k_nonzero > 0--that is, you should have
already dropped all the states for which you have no samples.
Internally, this function works in a reduced coordinate system defined
by subtracting off the first component of f_k and fixing that component
to be zero.
For fast but precise convergence, we recommend calling this function
multiple times to polish the result. `solve_mbar()` facilitates this.
"""
u_kn_nonzero, N_k_nonzero, f_k_nonzero = validate_inputs(u_kn_nonzero, N_k_nonzero, f_k_nonzero)
f_k_nonzero = f_k_nonzero - f_k_nonzero[0] # Work with reduced dimensions with f_k[0] := 0
u_kn_nonzero = precondition_u_kn(u_kn_nonzero, N_k_nonzero, f_k_nonzero)
pad = lambda x: np.pad(x, (1, 0), mode='constant') # Helper function inserts zero before first element
unpad_second_arg = lambda obj, grad: (obj, grad[1:]) # Helper function drops first element of gradient
# Create objective functions / nonlinear equations to send to scipy.optimize, fixing f_0 = 0
grad = lambda x: mbar_gradient(u_kn_nonzero, N_k_nonzero, pad(x))[1:] # Objective function gradient
grad_and_obj = lambda x: unpad_second_arg(*mbar_objective_and_gradient(u_kn_nonzero, N_k_nonzero, pad(x))) # Objective function gradient and objective function
hess = lambda x: mbar_hessian(u_kn_nonzero, N_k_nonzero, pad(x))[1:][:, 1:] # Hessian of objective function
with warnings.catch_warnings(record=True) as w:
if method in ["L-BFGS-B", "dogleg", "CG", "BFGS", "Newton-CG", "TNC", "trust-ncg", "SLSQP"]:
if method in ["L-BFGS-B", "CG"]:
hess = None # To suppress warning from passing a hessian function.
results = scipy.optimize.minimize(grad_and_obj, f_k_nonzero[1:], jac=True, hess=hess, method=method, tol=tol, options=options)
f_k_nonzero = pad(results["x"])
elif method == 'adaptive':
results = adaptive(u_kn_nonzero, N_k_nonzero, f_k_nonzero, tol=tol, options=options)
f_k_nonzero = results # they are the same for adaptive, until we decide to return more.
else:
results = scipy.optimize.root(grad, f_k_nonzero[1:], jac=hess, method=method, tol=tol, options=options)
f_k_nonzero = pad(results["x"])
# If there were runtime warnings, show the messages
if len(w) > 0:
can_ignore = True
for warn_msg in w:
if "Unknown solver options" in str(warn_msg.message):
continue
warnings.showwarning(warn_msg.message, warn_msg.category,
warn_msg.filename, warn_msg.lineno, warn_msg.file, "")
can_ignore = False # If any warning is not just unknown options, can ]not skip check
if not can_ignore:
# Ensure MBAR solved correctly
w_nk_check = mbar_W_nk(u_kn_nonzero, N_k_nonzero, f_k_nonzero)
check_w_normalized(w_nk_check, N_k_nonzero)
print("MBAR weights converged within tolerance, despite the SciPy Warnings. Please validate your results.")
return f_k_nonzero, results | [
"def",
"solve_mbar_once",
"(",
"u_kn_nonzero",
",",
"N_k_nonzero",
",",
"f_k_nonzero",
",",
"method",
"=",
"\"hybr\"",
",",
"tol",
"=",
"1E-12",
",",
"options",
"=",
"None",
")",
":",
"u_kn_nonzero",
",",
"N_k_nonzero",
",",
"f_k_nonzero",
"=",
"validate_input... | Solve MBAR self-consistent equations using some form of equation solver.
Parameters
----------
u_kn_nonzero : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
for the nonempty states
N_k_nonzero : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state for the nonempty states
f_k_nonzero : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies for the nonempty states
method : str, optional, default="hybr"
The optimization routine to use. This can be any of the methods
available via scipy.optimize.minimize() or scipy.optimize.root().
tol : float, optional, default=1E-14
The convergance tolerance for minimize() or root()
verbose: bool
Whether to print information about the solution method.
options: dict, optional, default=None
Optional dictionary of algorithm-specific parameters. See
scipy.optimize.root or scipy.optimize.minimize for details.
Returns
-------
f_k : np.ndarray
The converged reduced free energies.
results : dict
Dictionary containing entire results of optimization routine, may
be useful when debugging convergence.
Notes
-----
This function requires that N_k_nonzero > 0--that is, you should have
already dropped all the states for which you have no samples.
Internally, this function works in a reduced coordinate system defined
by subtracting off the first component of f_k and fixing that component
to be zero.
For fast but precise convergence, we recommend calling this function
multiple times to polish the result. `solve_mbar()` facilitates this. | [
"Solve",
"MBAR",
"self",
"-",
"consistent",
"equations",
"using",
"some",
"form",
"of",
"equation",
"solver",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar_solvers.py#L383-L465 | train | 31,050 |
choderalab/pymbar | pymbar/mbar_solvers.py | solve_mbar | def solve_mbar(u_kn_nonzero, N_k_nonzero, f_k_nonzero, solver_protocol=None):
"""Solve MBAR self-consistent equations using some sequence of equation solvers.
Parameters
----------
u_kn_nonzero : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
for the nonempty states
N_k_nonzero : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state for the nonempty states
f_k_nonzero : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies for the nonempty states
solver_protocol: tuple(dict()), optional, default=None
Optional list of dictionaries of steps in solver protocol.
If None, a default protocol will be used.
Returns
-------
f_k : np.ndarray
The converged reduced free energies.
all_results : list(dict())
List of results from each step of solver_protocol. Each element in
list contains the results dictionary from solve_mbar_once()
for the corresponding step.
Notes
-----
This function requires that N_k_nonzero > 0--that is, you should have
already dropped all the states for which you have no samples.
Internally, this function works in a reduced coordinate system defined
by subtracting off the first component of f_k and fixing that component
to be zero.
This function calls `solve_mbar_once()` multiple times to achieve
converged results. Generally, a single call to solve_mbar_once()
will not give fully converged answers because of limited numerical precision.
Each call to `solve_mbar_once()` re-conditions the nonlinear
equations using the current guess.
"""
if solver_protocol is None:
solver_protocol = DEFAULT_SOLVER_PROTOCOL
for protocol in solver_protocol:
if protocol['method'] is None:
protocol['method'] = DEFAULT_SOLVER_METHOD
all_results = []
for k, options in enumerate(solver_protocol):
f_k_nonzero, results = solve_mbar_once(u_kn_nonzero, N_k_nonzero, f_k_nonzero, **options)
all_results.append(results)
all_results.append(("Final gradient norm: %.3g" % np.linalg.norm(mbar_gradient(u_kn_nonzero, N_k_nonzero, f_k_nonzero))))
return f_k_nonzero, all_results | python | def solve_mbar(u_kn_nonzero, N_k_nonzero, f_k_nonzero, solver_protocol=None):
"""Solve MBAR self-consistent equations using some sequence of equation solvers.
Parameters
----------
u_kn_nonzero : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
for the nonempty states
N_k_nonzero : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state for the nonempty states
f_k_nonzero : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies for the nonempty states
solver_protocol: tuple(dict()), optional, default=None
Optional list of dictionaries of steps in solver protocol.
If None, a default protocol will be used.
Returns
-------
f_k : np.ndarray
The converged reduced free energies.
all_results : list(dict())
List of results from each step of solver_protocol. Each element in
list contains the results dictionary from solve_mbar_once()
for the corresponding step.
Notes
-----
This function requires that N_k_nonzero > 0--that is, you should have
already dropped all the states for which you have no samples.
Internally, this function works in a reduced coordinate system defined
by subtracting off the first component of f_k and fixing that component
to be zero.
This function calls `solve_mbar_once()` multiple times to achieve
converged results. Generally, a single call to solve_mbar_once()
will not give fully converged answers because of limited numerical precision.
Each call to `solve_mbar_once()` re-conditions the nonlinear
equations using the current guess.
"""
if solver_protocol is None:
solver_protocol = DEFAULT_SOLVER_PROTOCOL
for protocol in solver_protocol:
if protocol['method'] is None:
protocol['method'] = DEFAULT_SOLVER_METHOD
all_results = []
for k, options in enumerate(solver_protocol):
f_k_nonzero, results = solve_mbar_once(u_kn_nonzero, N_k_nonzero, f_k_nonzero, **options)
all_results.append(results)
all_results.append(("Final gradient norm: %.3g" % np.linalg.norm(mbar_gradient(u_kn_nonzero, N_k_nonzero, f_k_nonzero))))
return f_k_nonzero, all_results | [
"def",
"solve_mbar",
"(",
"u_kn_nonzero",
",",
"N_k_nonzero",
",",
"f_k_nonzero",
",",
"solver_protocol",
"=",
"None",
")",
":",
"if",
"solver_protocol",
"is",
"None",
":",
"solver_protocol",
"=",
"DEFAULT_SOLVER_PROTOCOL",
"for",
"protocol",
"in",
"solver_protocol"... | Solve MBAR self-consistent equations using some sequence of equation solvers.
Parameters
----------
u_kn_nonzero : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
for the nonempty states
N_k_nonzero : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state for the nonempty states
f_k_nonzero : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies for the nonempty states
solver_protocol: tuple(dict()), optional, default=None
Optional list of dictionaries of steps in solver protocol.
If None, a default protocol will be used.
Returns
-------
f_k : np.ndarray
The converged reduced free energies.
all_results : list(dict())
List of results from each step of solver_protocol. Each element in
list contains the results dictionary from solve_mbar_once()
for the corresponding step.
Notes
-----
This function requires that N_k_nonzero > 0--that is, you should have
already dropped all the states for which you have no samples.
Internally, this function works in a reduced coordinate system defined
by subtracting off the first component of f_k and fixing that component
to be zero.
This function calls `solve_mbar_once()` multiple times to achieve
converged results. Generally, a single call to solve_mbar_once()
will not give fully converged answers because of limited numerical precision.
Each call to `solve_mbar_once()` re-conditions the nonlinear
equations using the current guess. | [
"Solve",
"MBAR",
"self",
"-",
"consistent",
"equations",
"using",
"some",
"sequence",
"of",
"equation",
"solvers",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar_solvers.py#L468-L518 | train | 31,051 |
choderalab/pymbar | pymbar/mbar_solvers.py | solve_mbar_for_all_states | def solve_mbar_for_all_states(u_kn, N_k, f_k, solver_protocol):
"""Solve for free energies of states with samples, then calculate for
empty states.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
solver_protocol: tuple(dict()), optional, default=None
Sequence of dictionaries of steps in solver protocol for final
stage of refinement.
Returns
-------
f_k : np.ndarray, shape=(n_states), dtype='float'
The free energies of states
"""
states_with_samples = np.where(N_k > 0)[0]
if len(states_with_samples) == 1:
f_k_nonzero = np.array([0.0])
else:
f_k_nonzero, all_results = solve_mbar(u_kn[states_with_samples], N_k[states_with_samples],
f_k[states_with_samples], solver_protocol=solver_protocol)
f_k[states_with_samples] = f_k_nonzero
# Update all free energies because those from states with zero samples are not correctly computed by solvers.
f_k = self_consistent_update(u_kn, N_k, f_k)
# This is necessary because state 0 might have had zero samples,
# but we still want that state to be the reference with free energy 0.
f_k -= f_k[0]
return f_k | python | def solve_mbar_for_all_states(u_kn, N_k, f_k, solver_protocol):
"""Solve for free energies of states with samples, then calculate for
empty states.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
solver_protocol: tuple(dict()), optional, default=None
Sequence of dictionaries of steps in solver protocol for final
stage of refinement.
Returns
-------
f_k : np.ndarray, shape=(n_states), dtype='float'
The free energies of states
"""
states_with_samples = np.where(N_k > 0)[0]
if len(states_with_samples) == 1:
f_k_nonzero = np.array([0.0])
else:
f_k_nonzero, all_results = solve_mbar(u_kn[states_with_samples], N_k[states_with_samples],
f_k[states_with_samples], solver_protocol=solver_protocol)
f_k[states_with_samples] = f_k_nonzero
# Update all free energies because those from states with zero samples are not correctly computed by solvers.
f_k = self_consistent_update(u_kn, N_k, f_k)
# This is necessary because state 0 might have had zero samples,
# but we still want that state to be the reference with free energy 0.
f_k -= f_k[0]
return f_k | [
"def",
"solve_mbar_for_all_states",
"(",
"u_kn",
",",
"N_k",
",",
"f_k",
",",
"solver_protocol",
")",
":",
"states_with_samples",
"=",
"np",
".",
"where",
"(",
"N_k",
">",
"0",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"states_with_samples",
")",
"==",
"1",
... | Solve for free energies of states with samples, then calculate for
empty states.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
solver_protocol: tuple(dict()), optional, default=None
Sequence of dictionaries of steps in solver protocol for final
stage of refinement.
Returns
-------
f_k : np.ndarray, shape=(n_states), dtype='float'
The free energies of states | [
"Solve",
"for",
"free",
"energies",
"of",
"states",
"with",
"samples",
"then",
"calculate",
"for",
"empty",
"states",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar_solvers.py#L521-L558 | train | 31,052 |
choderalab/pymbar | examples/constant-force-optical-trap/force-bias-optical-trap.py | construct_nonuniform_bins | def construct_nonuniform_bins(x_n, nbins):
"""Construct histogram using bins of unequal size to ensure approximately equal population in each bin.
ARGUMENTS
x_n (1D numpy array of floats) - x_n[n] is data point n
RETURN VALUES
bin_left_boundary_i (1D numpy array of floats) - data in bin i will satisfy bin_left_boundary_i[i] <= x < bin_left_boundary_i[i+1]
bin_center_i (1D numpy array of floats) - bin_center_i[i] is the center of bin i
bin_width_i (1D numpy array of floats) - bin_width_i[i] is the width of bin i
bin_n (1D numpy array of int32) - bin_n[n] is the bin index (in range(nbins)) of x_n[n]
"""
# Determine number of samples.
N = x_n.size
# Get indices of elements of x_n sorted in order.
sorted_indices = x_n.argsort()
# Allocate storage for results.
bin_left_boundary_i = zeros([nbins+1], float64)
bin_right_boundary_i = zeros([nbins+1], float64)
bin_center_i = zeros([nbins], float64)
bin_width_i = zeros([nbins], float64)
bin_n = zeros([N], int32)
# Determine sampled range, adding a little bit to the rightmost range to ensure no samples escape the range.
x_min = x_n.min()
x_max = x_n.max()
x_max += (x_max - x_min) * 1.0e-5
# Determine bin boundaries and bin assignments.
for bin_index in range(nbins):
# indices of first and last data points in this span
first_index = int(float(N) / float(nbins) * float(bin_index))
last_index = int(float(N) / float(nbins) * float(bin_index+1))
# store left bin boundary
bin_left_boundary_i[bin_index] = x_n[sorted_indices[first_index]]
# store assignments
bin_n[sorted_indices[first_index:last_index]] = bin_index
# set rightmost boundary
bin_left_boundary_i[nbins] = x_max
# Determine bin centers and widths
for bin_index in range(nbins):
bin_center_i[bin_index] = (bin_left_boundary_i[bin_index] + bin_left_boundary_i[bin_index+1]) / 2.0
bin_width_i[bin_index] = (bin_left_boundary_i[bin_index+1] - bin_left_boundary_i[bin_index])
# DEBUG
# outfile = open('states.out', 'w')
# for n in range(N):
# outfile.write('%8f %8d\n' % (x_n[n], bin_n[n]))
# outfile.close()
return (bin_left_boundary_i, bin_center_i, bin_width_i, bin_n) | python | def construct_nonuniform_bins(x_n, nbins):
"""Construct histogram using bins of unequal size to ensure approximately equal population in each bin.
ARGUMENTS
x_n (1D numpy array of floats) - x_n[n] is data point n
RETURN VALUES
bin_left_boundary_i (1D numpy array of floats) - data in bin i will satisfy bin_left_boundary_i[i] <= x < bin_left_boundary_i[i+1]
bin_center_i (1D numpy array of floats) - bin_center_i[i] is the center of bin i
bin_width_i (1D numpy array of floats) - bin_width_i[i] is the width of bin i
bin_n (1D numpy array of int32) - bin_n[n] is the bin index (in range(nbins)) of x_n[n]
"""
# Determine number of samples.
N = x_n.size
# Get indices of elements of x_n sorted in order.
sorted_indices = x_n.argsort()
# Allocate storage for results.
bin_left_boundary_i = zeros([nbins+1], float64)
bin_right_boundary_i = zeros([nbins+1], float64)
bin_center_i = zeros([nbins], float64)
bin_width_i = zeros([nbins], float64)
bin_n = zeros([N], int32)
# Determine sampled range, adding a little bit to the rightmost range to ensure no samples escape the range.
x_min = x_n.min()
x_max = x_n.max()
x_max += (x_max - x_min) * 1.0e-5
# Determine bin boundaries and bin assignments.
for bin_index in range(nbins):
# indices of first and last data points in this span
first_index = int(float(N) / float(nbins) * float(bin_index))
last_index = int(float(N) / float(nbins) * float(bin_index+1))
# store left bin boundary
bin_left_boundary_i[bin_index] = x_n[sorted_indices[first_index]]
# store assignments
bin_n[sorted_indices[first_index:last_index]] = bin_index
# set rightmost boundary
bin_left_boundary_i[nbins] = x_max
# Determine bin centers and widths
for bin_index in range(nbins):
bin_center_i[bin_index] = (bin_left_boundary_i[bin_index] + bin_left_boundary_i[bin_index+1]) / 2.0
bin_width_i[bin_index] = (bin_left_boundary_i[bin_index+1] - bin_left_boundary_i[bin_index])
# DEBUG
# outfile = open('states.out', 'w')
# for n in range(N):
# outfile.write('%8f %8d\n' % (x_n[n], bin_n[n]))
# outfile.close()
return (bin_left_boundary_i, bin_center_i, bin_width_i, bin_n) | [
"def",
"construct_nonuniform_bins",
"(",
"x_n",
",",
"nbins",
")",
":",
"# Determine number of samples.",
"N",
"=",
"x_n",
".",
"size",
"# Get indices of elements of x_n sorted in order.",
"sorted_indices",
"=",
"x_n",
".",
"argsort",
"(",
")",
"# Allocate storage for res... | Construct histogram using bins of unequal size to ensure approximately equal population in each bin.
ARGUMENTS
x_n (1D numpy array of floats) - x_n[n] is data point n
RETURN VALUES
bin_left_boundary_i (1D numpy array of floats) - data in bin i will satisfy bin_left_boundary_i[i] <= x < bin_left_boundary_i[i+1]
bin_center_i (1D numpy array of floats) - bin_center_i[i] is the center of bin i
bin_width_i (1D numpy array of floats) - bin_width_i[i] is the width of bin i
bin_n (1D numpy array of int32) - bin_n[n] is the bin index (in range(nbins)) of x_n[n] | [
"Construct",
"histogram",
"using",
"bins",
"of",
"unequal",
"size",
"to",
"ensure",
"approximately",
"equal",
"population",
"in",
"each",
"bin",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/examples/constant-force-optical-trap/force-bias-optical-trap.py#L47-L104 | train | 31,053 |
choderalab/pymbar | pymbar/timeseries.py | integratedAutocorrelationTime | def integratedAutocorrelationTime(A_n, B_n=None, fast=False, mintime=3):
"""Estimate the integrated autocorrelation time.
See Also
--------
statisticalInefficiency
"""
g = statisticalInefficiency(A_n, B_n, fast, mintime)
tau = (g - 1.0) / 2.0
return tau | python | def integratedAutocorrelationTime(A_n, B_n=None, fast=False, mintime=3):
"""Estimate the integrated autocorrelation time.
See Also
--------
statisticalInefficiency
"""
g = statisticalInefficiency(A_n, B_n, fast, mintime)
tau = (g - 1.0) / 2.0
return tau | [
"def",
"integratedAutocorrelationTime",
"(",
"A_n",
",",
"B_n",
"=",
"None",
",",
"fast",
"=",
"False",
",",
"mintime",
"=",
"3",
")",
":",
"g",
"=",
"statisticalInefficiency",
"(",
"A_n",
",",
"B_n",
",",
"fast",
",",
"mintime",
")",
"tau",
"=",
"(",
... | Estimate the integrated autocorrelation time.
See Also
--------
statisticalInefficiency | [
"Estimate",
"the",
"integrated",
"autocorrelation",
"time",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/timeseries.py#L357-L368 | train | 31,054 |
choderalab/pymbar | pymbar/timeseries.py | integratedAutocorrelationTimeMultiple | def integratedAutocorrelationTimeMultiple(A_kn, fast=False):
"""Estimate the integrated autocorrelation time from multiple timeseries.
See Also
--------
statisticalInefficiencyMultiple
"""
g = statisticalInefficiencyMultiple(A_kn, fast, False)
tau = (g - 1.0) / 2.0
return tau | python | def integratedAutocorrelationTimeMultiple(A_kn, fast=False):
"""Estimate the integrated autocorrelation time from multiple timeseries.
See Also
--------
statisticalInefficiencyMultiple
"""
g = statisticalInefficiencyMultiple(A_kn, fast, False)
tau = (g - 1.0) / 2.0
return tau | [
"def",
"integratedAutocorrelationTimeMultiple",
"(",
"A_kn",
",",
"fast",
"=",
"False",
")",
":",
"g",
"=",
"statisticalInefficiencyMultiple",
"(",
"A_kn",
",",
"fast",
",",
"False",
")",
"tau",
"=",
"(",
"g",
"-",
"1.0",
")",
"/",
"2.0",
"return",
"tau"
] | Estimate the integrated autocorrelation time from multiple timeseries.
See Also
--------
statisticalInefficiencyMultiple | [
"Estimate",
"the",
"integrated",
"autocorrelation",
"time",
"from",
"multiple",
"timeseries",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/timeseries.py#L372-L383 | train | 31,055 |
choderalab/pymbar | pymbar/timeseries.py | subsampleCorrelatedData | def subsampleCorrelatedData(A_t, g=None, fast=False, conservative=False, verbose=False):
"""Determine the indices of an uncorrelated subsample of the data.
Parameters
----------
A_t : np.ndarray
A_t[t] is the t-th value of timeseries A(t). Length is deduced from vector.
g : float, optional
if provided, the statistical inefficiency g is used to subsample the timeseries -- otherwise it will be computed (default: None)
fast : bool, optional, default=False
fast can be set to True to give a less accurate but very quick estimate (default: False)
conservative : bool, optional, default=False
if set to True, uniformly-spaced indices are chosen with interval ceil(g), where
g is the statistical inefficiency. Otherwise, indices are chosen non-uniformly with interval of
approximately g in order to end up with approximately T/g total indices
verbose : bool, optional, default=False
if True, some output is printed
Returns
-------
indices : list of int
the indices of an uncorrelated subsample of the data
Notes
-----
The statistical inefficiency is computed with the function computeStatisticalInefficiency().
ToDo
----
Instead of using regular stride, use irregular stride so more data can be fit in when g is non-integral.
Examples
--------
Subsample a correlated timeseries to extract an effectively uncorrelated dataset.
>>> from pymbar import testsystems
>>> A_t = testsystems.correlated_timeseries_example(N=10000, tau=5.0) # generate a test correlated timeseries
>>> indices = subsampleCorrelatedData(A_t) # compute indices of uncorrelated timeseries
>>> A_n = A_t[indices] # extract uncorrelated samples
Extract uncorrelated samples from multiple timeseries data from the same process.
>>> # Generate multiple correlated timeseries data of different lengths.
>>> T_k = [1000, 2000, 3000, 4000, 5000]
>>> K = len(T_k) # number of timeseries
>>> tau = 5.0 # exponential relaxation time
>>> A_kt = [ testsystems.correlated_timeseries_example(N=T, tau=tau) for T in T_k ] # A_kt[k] is correlated timeseries k
>>> # Estimate statistical inefficiency from all timeseries data.
>>> g = statisticalInefficiencyMultiple(A_kt)
>>> # Count number of uncorrelated samples in each timeseries.
>>> N_k = np.array([ len(subsampleCorrelatedData(A_t, g=g)) for A_t in A_kt ]) # N_k[k] is the number of uncorrelated samples in timeseries k
>>> N = N_k.sum() # total number of uncorrelated samples
>>> # Subsample all trajectories to produce uncorrelated samples
>>> A_kn = [ A_t[subsampleCorrelatedData(A_t, g=g)] for A_t in A_kt ] # A_kn[k] is uncorrelated subset of trajectory A_kt[t]
>>> # Concatenate data into one timeseries.
>>> A_n = np.zeros([N], np.float32) # A_n[n] is nth sample in concatenated set of uncorrelated samples
>>> A_n[0:N_k[0]] = A_kn[0]
>>> for k in range(1,K): A_n[N_k[0:k].sum():N_k[0:k+1].sum()] = A_kn[k]
"""
# Create np copy of arrays.
A_t = np.array(A_t)
# Get the length of the timeseries.
T = A_t.size
# Compute the statistical inefficiency for the timeseries.
if not g:
if verbose:
print("Computing statistical inefficiency...")
g = statisticalInefficiency(A_t, A_t, fast=fast)
if verbose:
print("g = %f" % g)
if conservative:
# Round g up to determine the stride we can use to pick out regularly-spaced uncorrelated samples.
stride = int(math.ceil(g))
if verbose:
print("conservative subsampling: using stride of %d" % stride)
# Assemble list of indices of uncorrelated snapshots.
indices = range(0, T, stride)
else:
# Choose indices as floor(n*g), with n = 0,1,2,..., until we run out of data.
indices = []
n = 0
while int(round(n * g)) < T:
t = int(round(n * g))
# ensure we don't sample the same point twice
if (n == 0) or (t != indices[n - 1]):
indices.append(t)
n += 1
if verbose:
print("standard subsampling: using average stride of %f" % g)
# Number of samples in subsampled timeseries.
N = len(indices)
if verbose:
print("The resulting subsampled set has %d samples (original timeseries had %d)." % (N, T))
# Return the list of indices of uncorrelated snapshots.
return indices | python | def subsampleCorrelatedData(A_t, g=None, fast=False, conservative=False, verbose=False):
"""Determine the indices of an uncorrelated subsample of the data.
Parameters
----------
A_t : np.ndarray
A_t[t] is the t-th value of timeseries A(t). Length is deduced from vector.
g : float, optional
if provided, the statistical inefficiency g is used to subsample the timeseries -- otherwise it will be computed (default: None)
fast : bool, optional, default=False
fast can be set to True to give a less accurate but very quick estimate (default: False)
conservative : bool, optional, default=False
if set to True, uniformly-spaced indices are chosen with interval ceil(g), where
g is the statistical inefficiency. Otherwise, indices are chosen non-uniformly with interval of
approximately g in order to end up with approximately T/g total indices
verbose : bool, optional, default=False
if True, some output is printed
Returns
-------
indices : list of int
the indices of an uncorrelated subsample of the data
Notes
-----
The statistical inefficiency is computed with the function computeStatisticalInefficiency().
ToDo
----
Instead of using regular stride, use irregular stride so more data can be fit in when g is non-integral.
Examples
--------
Subsample a correlated timeseries to extract an effectively uncorrelated dataset.
>>> from pymbar import testsystems
>>> A_t = testsystems.correlated_timeseries_example(N=10000, tau=5.0) # generate a test correlated timeseries
>>> indices = subsampleCorrelatedData(A_t) # compute indices of uncorrelated timeseries
>>> A_n = A_t[indices] # extract uncorrelated samples
Extract uncorrelated samples from multiple timeseries data from the same process.
>>> # Generate multiple correlated timeseries data of different lengths.
>>> T_k = [1000, 2000, 3000, 4000, 5000]
>>> K = len(T_k) # number of timeseries
>>> tau = 5.0 # exponential relaxation time
>>> A_kt = [ testsystems.correlated_timeseries_example(N=T, tau=tau) for T in T_k ] # A_kt[k] is correlated timeseries k
>>> # Estimate statistical inefficiency from all timeseries data.
>>> g = statisticalInefficiencyMultiple(A_kt)
>>> # Count number of uncorrelated samples in each timeseries.
>>> N_k = np.array([ len(subsampleCorrelatedData(A_t, g=g)) for A_t in A_kt ]) # N_k[k] is the number of uncorrelated samples in timeseries k
>>> N = N_k.sum() # total number of uncorrelated samples
>>> # Subsample all trajectories to produce uncorrelated samples
>>> A_kn = [ A_t[subsampleCorrelatedData(A_t, g=g)] for A_t in A_kt ] # A_kn[k] is uncorrelated subset of trajectory A_kt[t]
>>> # Concatenate data into one timeseries.
>>> A_n = np.zeros([N], np.float32) # A_n[n] is nth sample in concatenated set of uncorrelated samples
>>> A_n[0:N_k[0]] = A_kn[0]
>>> for k in range(1,K): A_n[N_k[0:k].sum():N_k[0:k+1].sum()] = A_kn[k]
"""
# Create np copy of arrays.
A_t = np.array(A_t)
# Get the length of the timeseries.
T = A_t.size
# Compute the statistical inefficiency for the timeseries.
if not g:
if verbose:
print("Computing statistical inefficiency...")
g = statisticalInefficiency(A_t, A_t, fast=fast)
if verbose:
print("g = %f" % g)
if conservative:
# Round g up to determine the stride we can use to pick out regularly-spaced uncorrelated samples.
stride = int(math.ceil(g))
if verbose:
print("conservative subsampling: using stride of %d" % stride)
# Assemble list of indices of uncorrelated snapshots.
indices = range(0, T, stride)
else:
# Choose indices as floor(n*g), with n = 0,1,2,..., until we run out of data.
indices = []
n = 0
while int(round(n * g)) < T:
t = int(round(n * g))
# ensure we don't sample the same point twice
if (n == 0) or (t != indices[n - 1]):
indices.append(t)
n += 1
if verbose:
print("standard subsampling: using average stride of %f" % g)
# Number of samples in subsampled timeseries.
N = len(indices)
if verbose:
print("The resulting subsampled set has %d samples (original timeseries had %d)." % (N, T))
# Return the list of indices of uncorrelated snapshots.
return indices | [
"def",
"subsampleCorrelatedData",
"(",
"A_t",
",",
"g",
"=",
"None",
",",
"fast",
"=",
"False",
",",
"conservative",
"=",
"False",
",",
"verbose",
"=",
"False",
")",
":",
"# Create np copy of arrays.",
"A_t",
"=",
"np",
".",
"array",
"(",
"A_t",
")",
"# ... | Determine the indices of an uncorrelated subsample of the data.
Parameters
----------
A_t : np.ndarray
A_t[t] is the t-th value of timeseries A(t). Length is deduced from vector.
g : float, optional
if provided, the statistical inefficiency g is used to subsample the timeseries -- otherwise it will be computed (default: None)
fast : bool, optional, default=False
fast can be set to True to give a less accurate but very quick estimate (default: False)
conservative : bool, optional, default=False
if set to True, uniformly-spaced indices are chosen with interval ceil(g), where
g is the statistical inefficiency. Otherwise, indices are chosen non-uniformly with interval of
approximately g in order to end up with approximately T/g total indices
verbose : bool, optional, default=False
if True, some output is printed
Returns
-------
indices : list of int
the indices of an uncorrelated subsample of the data
Notes
-----
The statistical inefficiency is computed with the function computeStatisticalInefficiency().
ToDo
----
Instead of using regular stride, use irregular stride so more data can be fit in when g is non-integral.
Examples
--------
Subsample a correlated timeseries to extract an effectively uncorrelated dataset.
>>> from pymbar import testsystems
>>> A_t = testsystems.correlated_timeseries_example(N=10000, tau=5.0) # generate a test correlated timeseries
>>> indices = subsampleCorrelatedData(A_t) # compute indices of uncorrelated timeseries
>>> A_n = A_t[indices] # extract uncorrelated samples
Extract uncorrelated samples from multiple timeseries data from the same process.
>>> # Generate multiple correlated timeseries data of different lengths.
>>> T_k = [1000, 2000, 3000, 4000, 5000]
>>> K = len(T_k) # number of timeseries
>>> tau = 5.0 # exponential relaxation time
>>> A_kt = [ testsystems.correlated_timeseries_example(N=T, tau=tau) for T in T_k ] # A_kt[k] is correlated timeseries k
>>> # Estimate statistical inefficiency from all timeseries data.
>>> g = statisticalInefficiencyMultiple(A_kt)
>>> # Count number of uncorrelated samples in each timeseries.
>>> N_k = np.array([ len(subsampleCorrelatedData(A_t, g=g)) for A_t in A_kt ]) # N_k[k] is the number of uncorrelated samples in timeseries k
>>> N = N_k.sum() # total number of uncorrelated samples
>>> # Subsample all trajectories to produce uncorrelated samples
>>> A_kn = [ A_t[subsampleCorrelatedData(A_t, g=g)] for A_t in A_kt ] # A_kn[k] is uncorrelated subset of trajectory A_kt[t]
>>> # Concatenate data into one timeseries.
>>> A_n = np.zeros([N], np.float32) # A_n[n] is nth sample in concatenated set of uncorrelated samples
>>> A_n[0:N_k[0]] = A_kn[0]
>>> for k in range(1,K): A_n[N_k[0:k].sum():N_k[0:k+1].sum()] = A_kn[k] | [
"Determine",
"the",
"indices",
"of",
"an",
"uncorrelated",
"subsample",
"of",
"the",
"data",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/timeseries.py#L632-L736 | train | 31,056 |
choderalab/pymbar | pymbar/bar.py | BARzero | def BARzero(w_F, w_R, DeltaF):
"""A function that when zeroed is equivalent to the solution of
the Bennett acceptance ratio.
from http://journals.aps.org/prl/pdf/10.1103/PhysRevLett.91.140601
D_F = M + w_F - Delta F
D_R = M + w_R - Delta F
we want:
\sum_N_F (1+exp(D_F))^-1 = \sum N_R N_R <(1+exp(-D_R))^-1>
ln \sum N_F (1+exp(D_F))^-1>_F = \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R
ln \sum N_F (1+exp(D_F))^-1>_F - \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R = 0
Parameters
----------
w_F : np.ndarray
w_F[t] is the forward work value from snapshot t.
t = 0...(T_F-1) Length T_F is deduced from vector.
w_R : np.ndarray
w_R[t] is the reverse work value from snapshot t.
t = 0...(T_R-1) Length T_R is deduced from vector.
DeltaF : float
Our current guess
Returns
-------
fzero : float
a variable that is zeroed when DeltaF satisfies BAR.
Examples
--------
Compute free energy difference between two specified samples of work values.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> DeltaF = BARzero(w_F, w_R, 0.0)
"""
np.seterr(over='raise') # raise exceptions to overflows
w_F = np.array(w_F, np.float64)
w_R = np.array(w_R, np.float64)
DeltaF = float(DeltaF)
# Recommended stable implementation of BAR.
# Determine number of forward and reverse work values provided.
T_F = float(w_F.size) # number of forward work values
T_R = float(w_R.size) # number of reverse work values
# Compute log ratio of forward and reverse counts.
M = np.log(T_F / T_R)
# Compute log numerator. We have to watch out for overflows. We
# do this by making sure that 1+exp(x) doesn't overflow, choosing
# to always exponentiate a negative number.
# log f(W) = - log [1 + exp((M + W - DeltaF))]
# = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )
# = - maxarg - log(exp[-maxarg] + exp[(M + W - DeltaF) - maxarg])
# where maxarg = max((M + W - DeltaF), 0)
exp_arg_F = (M + w_F - DeltaF)
# use boolean logic to zero out the ones that are less than 0, but not if greater than zero.
max_arg_F = np.choose(np.less(0.0, exp_arg_F), (0.0, exp_arg_F))
try:
log_f_F = - max_arg_F - np.log(np.exp(-max_arg_F) + np.exp(exp_arg_F - max_arg_F))
except:
# give up; if there's overflow, return zero
print("The input data results in overflow in BAR")
return np.nan
log_numer = logsumexp(log_f_F)
# Compute log_denominator.
# log f(R) = - log [1 + exp(-(M + W - DeltaF))]
# = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )
# = - maxarg - log[exp[-maxarg] + (T_F/T_R) exp[(M + W - DeltaF) - maxarg]]
# where maxarg = max( -(M + W - DeltaF), 0)
exp_arg_R = -(M - w_R - DeltaF)
# use boolean logic to zero out the ones that are less than 0, but not if greater than zero.
max_arg_R = np.choose(np.less(0.0, exp_arg_R), (0.0, exp_arg_R))
try:
log_f_R = - max_arg_R - np.log(np.exp(-max_arg_R) + np.exp(exp_arg_R - max_arg_R))
except:
print("The input data results in overflow in BAR")
return np.nan
log_denom = logsumexp(log_f_R)
# This function must be zeroed to find a root
fzero = log_numer - log_denom
np.seterr(over='warn') # return options to standard settings so we don't disturb other functionality.
return fzero | python | def BARzero(w_F, w_R, DeltaF):
"""A function that when zeroed is equivalent to the solution of
the Bennett acceptance ratio.
from http://journals.aps.org/prl/pdf/10.1103/PhysRevLett.91.140601
D_F = M + w_F - Delta F
D_R = M + w_R - Delta F
we want:
\sum_N_F (1+exp(D_F))^-1 = \sum N_R N_R <(1+exp(-D_R))^-1>
ln \sum N_F (1+exp(D_F))^-1>_F = \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R
ln \sum N_F (1+exp(D_F))^-1>_F - \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R = 0
Parameters
----------
w_F : np.ndarray
w_F[t] is the forward work value from snapshot t.
t = 0...(T_F-1) Length T_F is deduced from vector.
w_R : np.ndarray
w_R[t] is the reverse work value from snapshot t.
t = 0...(T_R-1) Length T_R is deduced from vector.
DeltaF : float
Our current guess
Returns
-------
fzero : float
a variable that is zeroed when DeltaF satisfies BAR.
Examples
--------
Compute free energy difference between two specified samples of work values.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> DeltaF = BARzero(w_F, w_R, 0.0)
"""
np.seterr(over='raise') # raise exceptions to overflows
w_F = np.array(w_F, np.float64)
w_R = np.array(w_R, np.float64)
DeltaF = float(DeltaF)
# Recommended stable implementation of BAR.
# Determine number of forward and reverse work values provided.
T_F = float(w_F.size) # number of forward work values
T_R = float(w_R.size) # number of reverse work values
# Compute log ratio of forward and reverse counts.
M = np.log(T_F / T_R)
# Compute log numerator. We have to watch out for overflows. We
# do this by making sure that 1+exp(x) doesn't overflow, choosing
# to always exponentiate a negative number.
# log f(W) = - log [1 + exp((M + W - DeltaF))]
# = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )
# = - maxarg - log(exp[-maxarg] + exp[(M + W - DeltaF) - maxarg])
# where maxarg = max((M + W - DeltaF), 0)
exp_arg_F = (M + w_F - DeltaF)
# use boolean logic to zero out the ones that are less than 0, but not if greater than zero.
max_arg_F = np.choose(np.less(0.0, exp_arg_F), (0.0, exp_arg_F))
try:
log_f_F = - max_arg_F - np.log(np.exp(-max_arg_F) + np.exp(exp_arg_F - max_arg_F))
except:
# give up; if there's overflow, return zero
print("The input data results in overflow in BAR")
return np.nan
log_numer = logsumexp(log_f_F)
# Compute log_denominator.
# log f(R) = - log [1 + exp(-(M + W - DeltaF))]
# = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )
# = - maxarg - log[exp[-maxarg] + (T_F/T_R) exp[(M + W - DeltaF) - maxarg]]
# where maxarg = max( -(M + W - DeltaF), 0)
exp_arg_R = -(M - w_R - DeltaF)
# use boolean logic to zero out the ones that are less than 0, but not if greater than zero.
max_arg_R = np.choose(np.less(0.0, exp_arg_R), (0.0, exp_arg_R))
try:
log_f_R = - max_arg_R - np.log(np.exp(-max_arg_R) + np.exp(exp_arg_R - max_arg_R))
except:
print("The input data results in overflow in BAR")
return np.nan
log_denom = logsumexp(log_f_R)
# This function must be zeroed to find a root
fzero = log_numer - log_denom
np.seterr(over='warn') # return options to standard settings so we don't disturb other functionality.
return fzero | [
"def",
"BARzero",
"(",
"w_F",
",",
"w_R",
",",
"DeltaF",
")",
":",
"np",
".",
"seterr",
"(",
"over",
"=",
"'raise'",
")",
"# raise exceptions to overflows",
"w_F",
"=",
"np",
".",
"array",
"(",
"w_F",
",",
"np",
".",
"float64",
")",
"w_R",
"=",
"np",... | A function that when zeroed is equivalent to the solution of
the Bennett acceptance ratio.
from http://journals.aps.org/prl/pdf/10.1103/PhysRevLett.91.140601
D_F = M + w_F - Delta F
D_R = M + w_R - Delta F
we want:
\sum_N_F (1+exp(D_F))^-1 = \sum N_R N_R <(1+exp(-D_R))^-1>
ln \sum N_F (1+exp(D_F))^-1>_F = \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R
ln \sum N_F (1+exp(D_F))^-1>_F - \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R = 0
Parameters
----------
w_F : np.ndarray
w_F[t] is the forward work value from snapshot t.
t = 0...(T_F-1) Length T_F is deduced from vector.
w_R : np.ndarray
w_R[t] is the reverse work value from snapshot t.
t = 0...(T_R-1) Length T_R is deduced from vector.
DeltaF : float
Our current guess
Returns
-------
fzero : float
a variable that is zeroed when DeltaF satisfies BAR.
Examples
--------
Compute free energy difference between two specified samples of work values.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> DeltaF = BARzero(w_F, w_R, 0.0) | [
"A",
"function",
"that",
"when",
"zeroed",
"is",
"equivalent",
"to",
"the",
"solution",
"of",
"the",
"Bennett",
"acceptance",
"ratio",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/bar.py#L54-L147 | train | 31,057 |
choderalab/pymbar | pymbar/utils.py | kln_to_kn | def kln_to_kn(kln, N_k = None, cleanup = False):
""" Convert KxKxN_max array to KxN max array
if self.N is not initialized, it will be here.
Parameters
----------
u_kln : np.ndarray, float, shape=(KxLxN_max)
N_k (optional) : np.array
the N_k matrix from the previous formatting form
cleanup (optional) : bool
optional command to clean up, since u_kln can get very large
Outputs
-------
u_kn: np.ndarray, float, shape=(LxN)
"""
#print "warning: KxLxN_max arrays deprecated; convering into new preferred KxN shape"
# rewrite into kn shape
[K, L, N_max] = np.shape(kln)
if N_k is None:
# We assume that all N_k are N_max.
# Not really an easier way to do this without being given the answer.
N_k = N_max * np.ones([L], dtype=np.int64)
N = np.sum(N_k)
kn = np.zeros([L, N], dtype=np.float64)
i = 0
for k in range(K): # loop through the old K; some might be zero
for ik in range(N_k[k]):
kn[:, i] = kln[k, :, ik]
i += 1
if cleanup:
del(kln) # very big, let's explicitly delete
return kn | python | def kln_to_kn(kln, N_k = None, cleanup = False):
""" Convert KxKxN_max array to KxN max array
if self.N is not initialized, it will be here.
Parameters
----------
u_kln : np.ndarray, float, shape=(KxLxN_max)
N_k (optional) : np.array
the N_k matrix from the previous formatting form
cleanup (optional) : bool
optional command to clean up, since u_kln can get very large
Outputs
-------
u_kn: np.ndarray, float, shape=(LxN)
"""
#print "warning: KxLxN_max arrays deprecated; convering into new preferred KxN shape"
# rewrite into kn shape
[K, L, N_max] = np.shape(kln)
if N_k is None:
# We assume that all N_k are N_max.
# Not really an easier way to do this without being given the answer.
N_k = N_max * np.ones([L], dtype=np.int64)
N = np.sum(N_k)
kn = np.zeros([L, N], dtype=np.float64)
i = 0
for k in range(K): # loop through the old K; some might be zero
for ik in range(N_k[k]):
kn[:, i] = kln[k, :, ik]
i += 1
if cleanup:
del(kln) # very big, let's explicitly delete
return kn | [
"def",
"kln_to_kn",
"(",
"kln",
",",
"N_k",
"=",
"None",
",",
"cleanup",
"=",
"False",
")",
":",
"#print \"warning: KxLxN_max arrays deprecated; convering into new preferred KxN shape\"",
"# rewrite into kn shape",
"[",
"K",
",",
"L",
",",
"N_max",
"]",
"=",
"np",
"... | Convert KxKxN_max array to KxN max array
if self.N is not initialized, it will be here.
Parameters
----------
u_kln : np.ndarray, float, shape=(KxLxN_max)
N_k (optional) : np.array
the N_k matrix from the previous formatting form
cleanup (optional) : bool
optional command to clean up, since u_kln can get very large
Outputs
-------
u_kn: np.ndarray, float, shape=(LxN) | [
"Convert",
"KxKxN_max",
"array",
"to",
"KxN",
"max",
"array"
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/utils.py#L46-L85 | train | 31,058 |
choderalab/pymbar | pymbar/utils.py | kn_to_n | def kn_to_n(kn, N_k = None, cleanup = False):
""" Convert KxN_max array to N array
Parameters
----------
u_kn: np.ndarray, float, shape=(KxN_max)
N_k (optional) : np.array
the N_k matrix from the previous formatting form
cleanup (optional) : bool
optional command to clean up, since u_kln can get very large
Outputs
-------
u_n: np.ndarray, float, shape=(N)
"""
#print "warning: KxN arrays deprecated; convering into new preferred N shape"
# rewrite into kn shape
# rewrite into kn shape
[K, N_max] = np.shape(kn)
if N_k is None:
# We assume that all N_k are N_max.
# Not really an easier way to do this without being given the answer.
N_k = N_max*np.ones([K], dtype=np.int64)
N = np.sum(N_k)
n = np.zeros([N], dtype=np.float64)
i = 0
for k in range(K): # loop through the old K; some might be zero
for ik in range(N_k[k]):
n[i] = kn[k, ik]
i += 1
if cleanup:
del(kn) # very big, let's explicitly delete
return n | python | def kn_to_n(kn, N_k = None, cleanup = False):
""" Convert KxN_max array to N array
Parameters
----------
u_kn: np.ndarray, float, shape=(KxN_max)
N_k (optional) : np.array
the N_k matrix from the previous formatting form
cleanup (optional) : bool
optional command to clean up, since u_kln can get very large
Outputs
-------
u_n: np.ndarray, float, shape=(N)
"""
#print "warning: KxN arrays deprecated; convering into new preferred N shape"
# rewrite into kn shape
# rewrite into kn shape
[K, N_max] = np.shape(kn)
if N_k is None:
# We assume that all N_k are N_max.
# Not really an easier way to do this without being given the answer.
N_k = N_max*np.ones([K], dtype=np.int64)
N = np.sum(N_k)
n = np.zeros([N], dtype=np.float64)
i = 0
for k in range(K): # loop through the old K; some might be zero
for ik in range(N_k[k]):
n[i] = kn[k, ik]
i += 1
if cleanup:
del(kn) # very big, let's explicitly delete
return n | [
"def",
"kn_to_n",
"(",
"kn",
",",
"N_k",
"=",
"None",
",",
"cleanup",
"=",
"False",
")",
":",
"#print \"warning: KxN arrays deprecated; convering into new preferred N shape\"",
"# rewrite into kn shape",
"# rewrite into kn shape",
"[",
"K",
",",
"N_max",
"]",
"=",
"np",... | Convert KxN_max array to N array
Parameters
----------
u_kn: np.ndarray, float, shape=(KxN_max)
N_k (optional) : np.array
the N_k matrix from the previous formatting form
cleanup (optional) : bool
optional command to clean up, since u_kln can get very large
Outputs
-------
u_n: np.ndarray, float, shape=(N) | [
"Convert",
"KxN_max",
"array",
"to",
"N",
"array"
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/utils.py#L88-L125 | train | 31,059 |
choderalab/pymbar | pymbar/utils.py | ensure_type | def ensure_type(val, dtype, ndim, name, length=None, can_be_none=False, shape=None,
warn_on_cast=True, add_newaxis_on_deficient_ndim=False):
"""Typecheck the size, shape and dtype of a numpy array, with optional
casting.
Parameters
----------
val : {np.ndaraay, None}
The array to check
dtype : {nd.dtype, str}
The dtype you'd like the array to have
ndim : int
The number of dimensions you'd like the array to have
name : str
name of the array. This is used when throwing exceptions, so that
we can describe to the user which array is messed up.
length : int, optional
How long should the array be?
can_be_none : bool
Is ``val == None`` acceptable?
shape : tuple, optional
What should be shape of the array be? If the provided tuple has
Nones in it, those will be semantically interpreted as matching
any length in that dimension. So, for example, using the shape
spec ``(None, None, 3)`` will ensure that the last dimension is of
length three without constraining the first two dimensions
warn_on_cast : bool, default=True
Raise a warning when the dtypes don't match and a cast is done.
add_newaxis_on_deficient_ndim : bool, default=True
Add a new axis to the beginining of the array if the number of
dimensions is deficient by one compared to your specification. For
instance, if you're trying to get out an array of ``ndim == 3``,
but the user provides an array of ``shape == (10, 10)``, a new axis will
be created with length 1 in front, so that the return value is of
shape ``(1, 10, 10)``.
Notes
-----
The returned value will always be C-contiguous.
Returns
-------
typechecked_val : np.ndarray, None
If `val=None` and `can_be_none=True`, then this will return None.
Otherwise, it will return val (or a copy of val). If the dtype wasn't right,
it'll be casted to the right shape. If the array was not C-contiguous, it'll
be copied as well.
"""
if can_be_none and val is None:
return None
if not isinstance(val, np.ndarray):
# special case: if the user is looking for a 1d array, and
# they request newaxis upconversion, and provided a scalar
# then we should reshape the scalar to be a 1d length-1 array
if add_newaxis_on_deficient_ndim and ndim == 1 and np.isscalar(val):
val = np.array([val])
else:
raise TypeError(("%s must be numpy array. "
" You supplied type %s" % (name, type(val))))
if warn_on_cast and val.dtype != dtype:
warnings.warn("Casting %s dtype=%s to %s " % (name, val.dtype, dtype),
TypeCastPerformanceWarning)
if not val.ndim == ndim:
if add_newaxis_on_deficient_ndim and val.ndim + 1 == ndim:
val = val[np.newaxis, ...]
else:
raise ValueError(("%s must be ndim %s. "
"You supplied %s" % (name, ndim, val.ndim)))
val = np.ascontiguousarray(val, dtype=dtype)
if length is not None and len(val) != length:
raise ValueError(("%s must be length %s. "
"You supplied %s" % (name, length, len(val))))
if shape is not None:
# the shape specified given by the user can look like (None, None 3)
# which indicates that ANY length is accepted in dimension 0 or
# dimension 1
sentenel = object()
error = ValueError(("%s must be shape %s. You supplied "
"%s" % (name, str(shape).replace('None', 'Any'), val.shape)))
for a, b in zip_longest(val.shape, shape, fillvalue=sentenel):
if a is sentenel or b is sentenel:
# if the sentenel was reached, it means that the ndim didn't
# match or something. this really shouldn't happen
raise error
if b is None:
# if the user's shape spec has a None in it, it matches anything
continue
if a != b:
# check for equality
raise error
return val | python | def ensure_type(val, dtype, ndim, name, length=None, can_be_none=False, shape=None,
warn_on_cast=True, add_newaxis_on_deficient_ndim=False):
"""Typecheck the size, shape and dtype of a numpy array, with optional
casting.
Parameters
----------
val : {np.ndaraay, None}
The array to check
dtype : {nd.dtype, str}
The dtype you'd like the array to have
ndim : int
The number of dimensions you'd like the array to have
name : str
name of the array. This is used when throwing exceptions, so that
we can describe to the user which array is messed up.
length : int, optional
How long should the array be?
can_be_none : bool
Is ``val == None`` acceptable?
shape : tuple, optional
What should be shape of the array be? If the provided tuple has
Nones in it, those will be semantically interpreted as matching
any length in that dimension. So, for example, using the shape
spec ``(None, None, 3)`` will ensure that the last dimension is of
length three without constraining the first two dimensions
warn_on_cast : bool, default=True
Raise a warning when the dtypes don't match and a cast is done.
add_newaxis_on_deficient_ndim : bool, default=True
Add a new axis to the beginining of the array if the number of
dimensions is deficient by one compared to your specification. For
instance, if you're trying to get out an array of ``ndim == 3``,
but the user provides an array of ``shape == (10, 10)``, a new axis will
be created with length 1 in front, so that the return value is of
shape ``(1, 10, 10)``.
Notes
-----
The returned value will always be C-contiguous.
Returns
-------
typechecked_val : np.ndarray, None
If `val=None` and `can_be_none=True`, then this will return None.
Otherwise, it will return val (or a copy of val). If the dtype wasn't right,
it'll be casted to the right shape. If the array was not C-contiguous, it'll
be copied as well.
"""
if can_be_none and val is None:
return None
if not isinstance(val, np.ndarray):
# special case: if the user is looking for a 1d array, and
# they request newaxis upconversion, and provided a scalar
# then we should reshape the scalar to be a 1d length-1 array
if add_newaxis_on_deficient_ndim and ndim == 1 and np.isscalar(val):
val = np.array([val])
else:
raise TypeError(("%s must be numpy array. "
" You supplied type %s" % (name, type(val))))
if warn_on_cast and val.dtype != dtype:
warnings.warn("Casting %s dtype=%s to %s " % (name, val.dtype, dtype),
TypeCastPerformanceWarning)
if not val.ndim == ndim:
if add_newaxis_on_deficient_ndim and val.ndim + 1 == ndim:
val = val[np.newaxis, ...]
else:
raise ValueError(("%s must be ndim %s. "
"You supplied %s" % (name, ndim, val.ndim)))
val = np.ascontiguousarray(val, dtype=dtype)
if length is not None and len(val) != length:
raise ValueError(("%s must be length %s. "
"You supplied %s" % (name, length, len(val))))
if shape is not None:
# the shape specified given by the user can look like (None, None 3)
# which indicates that ANY length is accepted in dimension 0 or
# dimension 1
sentenel = object()
error = ValueError(("%s must be shape %s. You supplied "
"%s" % (name, str(shape).replace('None', 'Any'), val.shape)))
for a, b in zip_longest(val.shape, shape, fillvalue=sentenel):
if a is sentenel or b is sentenel:
# if the sentenel was reached, it means that the ndim didn't
# match or something. this really shouldn't happen
raise error
if b is None:
# if the user's shape spec has a None in it, it matches anything
continue
if a != b:
# check for equality
raise error
return val | [
"def",
"ensure_type",
"(",
"val",
",",
"dtype",
",",
"ndim",
",",
"name",
",",
"length",
"=",
"None",
",",
"can_be_none",
"=",
"False",
",",
"shape",
"=",
"None",
",",
"warn_on_cast",
"=",
"True",
",",
"add_newaxis_on_deficient_ndim",
"=",
"False",
")",
... | Typecheck the size, shape and dtype of a numpy array, with optional
casting.
Parameters
----------
val : {np.ndaraay, None}
The array to check
dtype : {nd.dtype, str}
The dtype you'd like the array to have
ndim : int
The number of dimensions you'd like the array to have
name : str
name of the array. This is used when throwing exceptions, so that
we can describe to the user which array is messed up.
length : int, optional
How long should the array be?
can_be_none : bool
Is ``val == None`` acceptable?
shape : tuple, optional
What should be shape of the array be? If the provided tuple has
Nones in it, those will be semantically interpreted as matching
any length in that dimension. So, for example, using the shape
spec ``(None, None, 3)`` will ensure that the last dimension is of
length three without constraining the first two dimensions
warn_on_cast : bool, default=True
Raise a warning when the dtypes don't match and a cast is done.
add_newaxis_on_deficient_ndim : bool, default=True
Add a new axis to the beginining of the array if the number of
dimensions is deficient by one compared to your specification. For
instance, if you're trying to get out an array of ``ndim == 3``,
but the user provides an array of ``shape == (10, 10)``, a new axis will
be created with length 1 in front, so that the return value is of
shape ``(1, 10, 10)``.
Notes
-----
The returned value will always be C-contiguous.
Returns
-------
typechecked_val : np.ndarray, None
If `val=None` and `can_be_none=True`, then this will return None.
Otherwise, it will return val (or a copy of val). If the dtype wasn't right,
it'll be casted to the right shape. If the array was not C-contiguous, it'll
be copied as well. | [
"Typecheck",
"the",
"size",
"shape",
"and",
"dtype",
"of",
"a",
"numpy",
"array",
"with",
"optional",
"casting",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/utils.py#L128-L226 | train | 31,060 |
choderalab/pymbar | pymbar/utils.py | logsumexp | def logsumexp(a, axis=None, b=None, use_numexpr=True):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int, optional, default=None
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed.
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`.
use_numexpr : bool, optional, default=True
If True, use the numexpr library to speed up the calculation, which
can give a 2-4X speedup when working with large arrays.
Returns
-------
res : ndarray
The result, ``log(sum(exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``log(sum(b*exp(a)))``
is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2, scipy.misc.logsumexp (soon to be replaced with scipy.special.logsumexp)
Notes
-----
This is based on scipy.misc.logsumexp but with optional numexpr
support for improved performance.
"""
a = np.asarray(a)
a_max = np.amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~np.isfinite(a_max)] = 0
elif not np.isfinite(a_max):
a_max = 0
if b is not None:
b = np.asarray(b)
if use_numexpr and HAVE_NUMEXPR:
out = np.log(numexpr.evaluate("b * exp(a - a_max)").sum(axis))
else:
out = np.log(np.sum(b * np.exp(a - a_max), axis=axis))
else:
if use_numexpr and HAVE_NUMEXPR:
out = np.log(numexpr.evaluate("exp(a - a_max)").sum(axis))
else:
out = np.log(np.sum(np.exp(a - a_max), axis=axis))
a_max = np.squeeze(a_max, axis=axis)
out += a_max
return out | python | def logsumexp(a, axis=None, b=None, use_numexpr=True):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int, optional, default=None
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed.
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`.
use_numexpr : bool, optional, default=True
If True, use the numexpr library to speed up the calculation, which
can give a 2-4X speedup when working with large arrays.
Returns
-------
res : ndarray
The result, ``log(sum(exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``log(sum(b*exp(a)))``
is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2, scipy.misc.logsumexp (soon to be replaced with scipy.special.logsumexp)
Notes
-----
This is based on scipy.misc.logsumexp but with optional numexpr
support for improved performance.
"""
a = np.asarray(a)
a_max = np.amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~np.isfinite(a_max)] = 0
elif not np.isfinite(a_max):
a_max = 0
if b is not None:
b = np.asarray(b)
if use_numexpr and HAVE_NUMEXPR:
out = np.log(numexpr.evaluate("b * exp(a - a_max)").sum(axis))
else:
out = np.log(np.sum(b * np.exp(a - a_max), axis=axis))
else:
if use_numexpr and HAVE_NUMEXPR:
out = np.log(numexpr.evaluate("exp(a - a_max)").sum(axis))
else:
out = np.log(np.sum(np.exp(a - a_max), axis=axis))
a_max = np.squeeze(a_max, axis=axis)
out += a_max
return out | [
"def",
"logsumexp",
"(",
"a",
",",
"axis",
"=",
"None",
",",
"b",
"=",
"None",
",",
"use_numexpr",
"=",
"True",
")",
":",
"a",
"=",
"np",
".",
"asarray",
"(",
"a",
")",
"a_max",
"=",
"np",
".",
"amax",
"(",
"a",
",",
"axis",
"=",
"axis",
",",... | Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int, optional, default=None
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed.
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`.
use_numexpr : bool, optional, default=True
If True, use the numexpr library to speed up the calculation, which
can give a 2-4X speedup when working with large arrays.
Returns
-------
res : ndarray
The result, ``log(sum(exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``log(sum(b*exp(a)))``
is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2, scipy.misc.logsumexp (soon to be replaced with scipy.special.logsumexp)
Notes
-----
This is based on scipy.misc.logsumexp but with optional numexpr
support for improved performance. | [
"Compute",
"the",
"log",
"of",
"the",
"sum",
"of",
"exponentials",
"of",
"input",
"elements",
"."
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/utils.py#L271-L329 | train | 31,061 |
choderalab/pymbar | pymbar/utils.py | check_w_normalized | def check_w_normalized(W, N_k, tolerance = 1.0e-4):
"""Check the weight matrix W is properly normalized. The sum over N should be 1, and the sum over k by N_k should aslo be 1
Parameters
----------
W : np.ndarray, shape=(N, K), dtype='float'
The normalized weight matrix for snapshots and states.
W[n, k] is the weight of snapshot n in state k.
N_k : np.ndarray, shape=(K), dtype='int'
N_k[k] is the number of samples from state k.
tolerance : float, optional, default=1.0e-4
Tolerance for checking equality of sums
Returns
-------
None : NoneType
Returns a None object if test passes, otherwise raises a ParameterError with appropriate message if W is not normalized within tolerance.
"""
[N, K] = W.shape
column_sums = np.sum(W, axis=0)
badcolumns = (np.abs(column_sums - 1) > tolerance)
if np.any(badcolumns):
which_badcolumns = np.arange(K)[badcolumns]
firstbad = which_badcolumns[0]
raise ParameterError(
'Warning: Should have \sum_n W_nk = 1. Actual column sum for state %d was %f. %d other columns have similar problems' %
(firstbad, column_sums[firstbad], np.sum(badcolumns)))
row_sums = np.sum(W * N_k, axis=1)
badrows = (np.abs(row_sums - 1) > tolerance)
if np.any(badrows):
which_badrows = np.arange(N)[badrows]
firstbad = which_badrows[0]
raise ParameterError(
'Warning: Should have \sum_k N_k W_nk = 1. Actual row sum for sample %d was %f. %d other rows have similar problems' %
(firstbad, row_sums[firstbad], np.sum(badrows)))
return | python | def check_w_normalized(W, N_k, tolerance = 1.0e-4):
"""Check the weight matrix W is properly normalized. The sum over N should be 1, and the sum over k by N_k should aslo be 1
Parameters
----------
W : np.ndarray, shape=(N, K), dtype='float'
The normalized weight matrix for snapshots and states.
W[n, k] is the weight of snapshot n in state k.
N_k : np.ndarray, shape=(K), dtype='int'
N_k[k] is the number of samples from state k.
tolerance : float, optional, default=1.0e-4
Tolerance for checking equality of sums
Returns
-------
None : NoneType
Returns a None object if test passes, otherwise raises a ParameterError with appropriate message if W is not normalized within tolerance.
"""
[N, K] = W.shape
column_sums = np.sum(W, axis=0)
badcolumns = (np.abs(column_sums - 1) > tolerance)
if np.any(badcolumns):
which_badcolumns = np.arange(K)[badcolumns]
firstbad = which_badcolumns[0]
raise ParameterError(
'Warning: Should have \sum_n W_nk = 1. Actual column sum for state %d was %f. %d other columns have similar problems' %
(firstbad, column_sums[firstbad], np.sum(badcolumns)))
row_sums = np.sum(W * N_k, axis=1)
badrows = (np.abs(row_sums - 1) > tolerance)
if np.any(badrows):
which_badrows = np.arange(N)[badrows]
firstbad = which_badrows[0]
raise ParameterError(
'Warning: Should have \sum_k N_k W_nk = 1. Actual row sum for sample %d was %f. %d other rows have similar problems' %
(firstbad, row_sums[firstbad], np.sum(badrows)))
return | [
"def",
"check_w_normalized",
"(",
"W",
",",
"N_k",
",",
"tolerance",
"=",
"1.0e-4",
")",
":",
"[",
"N",
",",
"K",
"]",
"=",
"W",
".",
"shape",
"column_sums",
"=",
"np",
".",
"sum",
"(",
"W",
",",
"axis",
"=",
"0",
")",
"badcolumns",
"=",
"(",
"... | Check the weight matrix W is properly normalized. The sum over N should be 1, and the sum over k by N_k should aslo be 1
Parameters
----------
W : np.ndarray, shape=(N, K), dtype='float'
The normalized weight matrix for snapshots and states.
W[n, k] is the weight of snapshot n in state k.
N_k : np.ndarray, shape=(K), dtype='int'
N_k[k] is the number of samples from state k.
tolerance : float, optional, default=1.0e-4
Tolerance for checking equality of sums
Returns
-------
None : NoneType
Returns a None object if test passes, otherwise raises a ParameterError with appropriate message if W is not normalized within tolerance. | [
"Check",
"the",
"weight",
"matrix",
"W",
"is",
"properly",
"normalized",
".",
"The",
"sum",
"over",
"N",
"should",
"be",
"1",
"and",
"the",
"sum",
"over",
"k",
"by",
"N_k",
"should",
"aslo",
"be",
"1"
] | 69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740 | https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/utils.py#L332-L371 | train | 31,062 |
phha/click_config_file | click_config_file.py | configuration_callback | def configuration_callback(cmd_name, option_name, config_file_name,
saved_callback, provider, implicit, ctx,
param, value):
"""
Callback for reading the config file.
Also takes care of calling user specified custom callback afterwards.
cmd_name : str
The command name. This is used to determine the configuration directory.
option_name : str
The name of the option. This is used for error messages.
config_file_name : str
The name of the configuration file.
saved_callback: callable
User-specified callback to be called later.
provider : callable
A callable that parses the configuration file and returns a dictionary
of the configuration parameters. Will be called as
`provider(file_path, cmd_name)`. Default: `configobj_provider()`
implicit : bool
Whether a implicit value should be applied if no configuration option
value was provided.
Default: `False`
ctx : object
Click context.
"""
ctx.default_map = ctx.default_map or {}
cmd_name = cmd_name or ctx.info_name
if implicit:
default_value = os.path.join(
click.get_app_dir(cmd_name), config_file_name)
param.default = default_value
value = value or default_value
if value:
try:
config = provider(value, cmd_name)
except Exception as e:
raise click.BadOptionUsage(option_name,
"Error reading configuration file: {}".format(e), ctx)
ctx.default_map.update(config)
return saved_callback(ctx, param, value) if saved_callback else value | python | def configuration_callback(cmd_name, option_name, config_file_name,
saved_callback, provider, implicit, ctx,
param, value):
"""
Callback for reading the config file.
Also takes care of calling user specified custom callback afterwards.
cmd_name : str
The command name. This is used to determine the configuration directory.
option_name : str
The name of the option. This is used for error messages.
config_file_name : str
The name of the configuration file.
saved_callback: callable
User-specified callback to be called later.
provider : callable
A callable that parses the configuration file and returns a dictionary
of the configuration parameters. Will be called as
`provider(file_path, cmd_name)`. Default: `configobj_provider()`
implicit : bool
Whether a implicit value should be applied if no configuration option
value was provided.
Default: `False`
ctx : object
Click context.
"""
ctx.default_map = ctx.default_map or {}
cmd_name = cmd_name or ctx.info_name
if implicit:
default_value = os.path.join(
click.get_app_dir(cmd_name), config_file_name)
param.default = default_value
value = value or default_value
if value:
try:
config = provider(value, cmd_name)
except Exception as e:
raise click.BadOptionUsage(option_name,
"Error reading configuration file: {}".format(e), ctx)
ctx.default_map.update(config)
return saved_callback(ctx, param, value) if saved_callback else value | [
"def",
"configuration_callback",
"(",
"cmd_name",
",",
"option_name",
",",
"config_file_name",
",",
"saved_callback",
",",
"provider",
",",
"implicit",
",",
"ctx",
",",
"param",
",",
"value",
")",
":",
"ctx",
".",
"default_map",
"=",
"ctx",
".",
"default_map",... | Callback for reading the config file.
Also takes care of calling user specified custom callback afterwards.
cmd_name : str
The command name. This is used to determine the configuration directory.
option_name : str
The name of the option. This is used for error messages.
config_file_name : str
The name of the configuration file.
saved_callback: callable
User-specified callback to be called later.
provider : callable
A callable that parses the configuration file and returns a dictionary
of the configuration parameters. Will be called as
`provider(file_path, cmd_name)`. Default: `configobj_provider()`
implicit : bool
Whether a implicit value should be applied if no configuration option
value was provided.
Default: `False`
ctx : object
Click context. | [
"Callback",
"for",
"reading",
"the",
"config",
"file",
"."
] | 1c3b3ec13b41d94683bbb9467d184df6ca3b6fe1 | https://github.com/phha/click_config_file/blob/1c3b3ec13b41d94683bbb9467d184df6ca3b6fe1/click_config_file.py#L51-L95 | train | 31,063 |
phha/click_config_file | click_config_file.py | configuration_option | def configuration_option(*param_decls, **attrs):
"""
Adds configuration file support to a click application.
This will create an option of type `click.File` expecting the path to a
configuration file. When specified, it overwrites the default values for
all other click arguments or options with the corresponding value from the
configuration file.
The default name of the option is `--config`.
By default, the configuration will be read from a configuration directory
as determined by `click.get_app_dir`.
This decorator accepts the same arguments as `click.option` and
`click.Path`. In addition, the following keyword arguments are available:
cmd_name : str
The command name. This is used to determine the configuration
directory. Default: `ctx.info_name`
config_file_name : str
The name of the configuration file. Default: `'config'``
implicit: bool
If 'True' then implicitly create a value for the configuration option
using the above parameters. If a configuration file exists in this
path it will be applied even if no configuration option was suppplied
as a CLI argument or environment variable.
If 'False` only apply a configuration file that has been explicitely
specified.
Default: `False`
provider : callable
A callable that parses the configuration file and returns a dictionary
of the configuration parameters. Will be called as
`provider(file_path, cmd_name)`. Default: `configobj_provider()`
"""
param_decls = param_decls or ('--config', )
option_name = param_decls[0]
def decorator(f):
attrs.setdefault('is_eager', True)
attrs.setdefault('help', 'Read configuration from FILE.')
attrs.setdefault('expose_value', False)
implicit = attrs.pop('implicit', True)
cmd_name = attrs.pop('cmd_name', None)
config_file_name = attrs.pop('config_file_name', 'config')
provider = attrs.pop('provider', configobj_provider())
path_default_params = {
'exists': False,
'file_okay': True,
'dir_okay': False,
'writable': False,
'readable': True,
'resolve_path': False
}
path_params = {
k: attrs.pop(k, v)
for k, v in path_default_params.items()
}
attrs['type'] = click.Path(**path_params)
saved_callback = attrs.pop('callback', None)
partial_callback = functools.partial(
configuration_callback, cmd_name, option_name, config_file_name, saved_callback, provider, implicit)
attrs['callback'] = partial_callback
return click.option(*param_decls, **attrs)(f)
return decorator | python | def configuration_option(*param_decls, **attrs):
"""
Adds configuration file support to a click application.
This will create an option of type `click.File` expecting the path to a
configuration file. When specified, it overwrites the default values for
all other click arguments or options with the corresponding value from the
configuration file.
The default name of the option is `--config`.
By default, the configuration will be read from a configuration directory
as determined by `click.get_app_dir`.
This decorator accepts the same arguments as `click.option` and
`click.Path`. In addition, the following keyword arguments are available:
cmd_name : str
The command name. This is used to determine the configuration
directory. Default: `ctx.info_name`
config_file_name : str
The name of the configuration file. Default: `'config'``
implicit: bool
If 'True' then implicitly create a value for the configuration option
using the above parameters. If a configuration file exists in this
path it will be applied even if no configuration option was suppplied
as a CLI argument or environment variable.
If 'False` only apply a configuration file that has been explicitely
specified.
Default: `False`
provider : callable
A callable that parses the configuration file and returns a dictionary
of the configuration parameters. Will be called as
`provider(file_path, cmd_name)`. Default: `configobj_provider()`
"""
param_decls = param_decls or ('--config', )
option_name = param_decls[0]
def decorator(f):
attrs.setdefault('is_eager', True)
attrs.setdefault('help', 'Read configuration from FILE.')
attrs.setdefault('expose_value', False)
implicit = attrs.pop('implicit', True)
cmd_name = attrs.pop('cmd_name', None)
config_file_name = attrs.pop('config_file_name', 'config')
provider = attrs.pop('provider', configobj_provider())
path_default_params = {
'exists': False,
'file_okay': True,
'dir_okay': False,
'writable': False,
'readable': True,
'resolve_path': False
}
path_params = {
k: attrs.pop(k, v)
for k, v in path_default_params.items()
}
attrs['type'] = click.Path(**path_params)
saved_callback = attrs.pop('callback', None)
partial_callback = functools.partial(
configuration_callback, cmd_name, option_name, config_file_name, saved_callback, provider, implicit)
attrs['callback'] = partial_callback
return click.option(*param_decls, **attrs)(f)
return decorator | [
"def",
"configuration_option",
"(",
"*",
"param_decls",
",",
"*",
"*",
"attrs",
")",
":",
"param_decls",
"=",
"param_decls",
"or",
"(",
"'--config'",
",",
")",
"option_name",
"=",
"param_decls",
"[",
"0",
"]",
"def",
"decorator",
"(",
"f",
")",
":",
"att... | Adds configuration file support to a click application.
This will create an option of type `click.File` expecting the path to a
configuration file. When specified, it overwrites the default values for
all other click arguments or options with the corresponding value from the
configuration file.
The default name of the option is `--config`.
By default, the configuration will be read from a configuration directory
as determined by `click.get_app_dir`.
This decorator accepts the same arguments as `click.option` and
`click.Path`. In addition, the following keyword arguments are available:
cmd_name : str
The command name. This is used to determine the configuration
directory. Default: `ctx.info_name`
config_file_name : str
The name of the configuration file. Default: `'config'``
implicit: bool
If 'True' then implicitly create a value for the configuration option
using the above parameters. If a configuration file exists in this
path it will be applied even if no configuration option was suppplied
as a CLI argument or environment variable.
If 'False` only apply a configuration file that has been explicitely
specified.
Default: `False`
provider : callable
A callable that parses the configuration file and returns a dictionary
of the configuration parameters. Will be called as
`provider(file_path, cmd_name)`. Default: `configobj_provider()` | [
"Adds",
"configuration",
"file",
"support",
"to",
"a",
"click",
"application",
"."
] | 1c3b3ec13b41d94683bbb9467d184df6ca3b6fe1 | https://github.com/phha/click_config_file/blob/1c3b3ec13b41d94683bbb9467d184df6ca3b6fe1/click_config_file.py#L98-L164 | train | 31,064 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/formatting/deserialize/__init__.py | decode_value | def decode_value(stream):
"""Decode the contents of a value from a serialized stream.
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Decoded value
:rtype: bytes
"""
length = decode_length(stream)
(value,) = unpack_value(">{:d}s".format(length), stream)
return value | python | def decode_value(stream):
"""Decode the contents of a value from a serialized stream.
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Decoded value
:rtype: bytes
"""
length = decode_length(stream)
(value,) = unpack_value(">{:d}s".format(length), stream)
return value | [
"def",
"decode_value",
"(",
"stream",
")",
":",
"length",
"=",
"decode_length",
"(",
"stream",
")",
"(",
"value",
",",
")",
"=",
"unpack_value",
"(",
"\">{:d}s\"",
".",
"format",
"(",
"length",
")",
",",
"stream",
")",
"return",
"value"
] | Decode the contents of a value from a serialized stream.
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Decoded value
:rtype: bytes | [
"Decode",
"the",
"contents",
"of",
"a",
"value",
"from",
"a",
"serialized",
"stream",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/formatting/deserialize/__init__.py#L51-L61 | train | 31,065 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/formatting/deserialize/__init__.py | decode_tag | def decode_tag(stream):
"""Decode a tag value from a serialized stream.
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Decoded tag
:rtype: bytes
"""
(reserved, tag) = unpack_value(">cc", stream)
if reserved != b"\x00":
raise DeserializationError("Invalid tag: reserved byte is not null")
return tag | python | def decode_tag(stream):
"""Decode a tag value from a serialized stream.
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Decoded tag
:rtype: bytes
"""
(reserved, tag) = unpack_value(">cc", stream)
if reserved != b"\x00":
raise DeserializationError("Invalid tag: reserved byte is not null")
return tag | [
"def",
"decode_tag",
"(",
"stream",
")",
":",
"(",
"reserved",
",",
"tag",
")",
"=",
"unpack_value",
"(",
"\">cc\"",
",",
"stream",
")",
"if",
"reserved",
"!=",
"b\"\\x00\"",
":",
"raise",
"DeserializationError",
"(",
"\"Invalid tag: reserved byte is not null\"",
... | Decode a tag value from a serialized stream.
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Decoded tag
:rtype: bytes | [
"Decode",
"a",
"tag",
"value",
"from",
"a",
"serialized",
"stream",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/formatting/deserialize/__init__.py#L76-L89 | train | 31,066 |
aws/aws-dynamodb-encryption-python | setup.py | read | def read(*args):
"""Reads complete file contents."""
return io.open(os.path.join(HERE, *args), encoding="utf-8").read() | python | def read(*args):
"""Reads complete file contents."""
return io.open(os.path.join(HERE, *args), encoding="utf-8").read() | [
"def",
"read",
"(",
"*",
"args",
")",
":",
"return",
"io",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"HERE",
",",
"*",
"args",
")",
",",
"encoding",
"=",
"\"utf-8\"",
")",
".",
"read",
"(",
")"
] | Reads complete file contents. | [
"Reads",
"complete",
"file",
"contents",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/setup.py#L12-L14 | train | 31,067 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/crypto/authentication.py | sign_item | def sign_item(encrypted_item, signing_key, crypto_config):
# type: (dynamodb_types.ITEM, DelegatedKey, CryptoConfig) -> dynamodb_types.BINARY_ATTRIBUTE
"""Generate the signature DynamoDB atttribute.
:param dict encrypted_item: Encrypted DynamoDB item
:param DelegatedKey signing_key: DelegatedKey to use to calculate the signature
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Item signature DynamoDB attribute value
:rtype: dict
"""
signature = signing_key.sign(
algorithm=signing_key.algorithm,
data=_string_to_sign(
item=encrypted_item,
table_name=crypto_config.encryption_context.table_name,
attribute_actions=crypto_config.attribute_actions,
),
)
return {Tag.BINARY.dynamodb_tag: signature} | python | def sign_item(encrypted_item, signing_key, crypto_config):
# type: (dynamodb_types.ITEM, DelegatedKey, CryptoConfig) -> dynamodb_types.BINARY_ATTRIBUTE
"""Generate the signature DynamoDB atttribute.
:param dict encrypted_item: Encrypted DynamoDB item
:param DelegatedKey signing_key: DelegatedKey to use to calculate the signature
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Item signature DynamoDB attribute value
:rtype: dict
"""
signature = signing_key.sign(
algorithm=signing_key.algorithm,
data=_string_to_sign(
item=encrypted_item,
table_name=crypto_config.encryption_context.table_name,
attribute_actions=crypto_config.attribute_actions,
),
)
return {Tag.BINARY.dynamodb_tag: signature} | [
"def",
"sign_item",
"(",
"encrypted_item",
",",
"signing_key",
",",
"crypto_config",
")",
":",
"# type: (dynamodb_types.ITEM, DelegatedKey, CryptoConfig) -> dynamodb_types.BINARY_ATTRIBUTE",
"signature",
"=",
"signing_key",
".",
"sign",
"(",
"algorithm",
"=",
"signing_key",
"... | Generate the signature DynamoDB atttribute.
:param dict encrypted_item: Encrypted DynamoDB item
:param DelegatedKey signing_key: DelegatedKey to use to calculate the signature
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Item signature DynamoDB attribute value
:rtype: dict | [
"Generate",
"the",
"signature",
"DynamoDB",
"atttribute",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/crypto/authentication.py#L40-L58 | train | 31,068 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/crypto/authentication.py | verify_item_signature | def verify_item_signature(signature_attribute, encrypted_item, verification_key, crypto_config):
# type: (dynamodb_types.BINARY_ATTRIBUTE, dynamodb_types.ITEM, DelegatedKey, CryptoConfig) -> None
"""Verify the item signature.
:param dict signature_attribute: Item signature DynamoDB attribute value
:param dict encrypted_item: Encrypted DynamoDB item
:param DelegatedKey verification_key: DelegatedKey to use to calculate the signature
:param CryptoConfig crypto_config: Cryptographic configuration
"""
signature = signature_attribute[Tag.BINARY.dynamodb_tag]
verification_key.verify(
algorithm=verification_key.algorithm,
signature=signature,
data=_string_to_sign(
item=encrypted_item,
table_name=crypto_config.encryption_context.table_name,
attribute_actions=crypto_config.attribute_actions,
),
) | python | def verify_item_signature(signature_attribute, encrypted_item, verification_key, crypto_config):
# type: (dynamodb_types.BINARY_ATTRIBUTE, dynamodb_types.ITEM, DelegatedKey, CryptoConfig) -> None
"""Verify the item signature.
:param dict signature_attribute: Item signature DynamoDB attribute value
:param dict encrypted_item: Encrypted DynamoDB item
:param DelegatedKey verification_key: DelegatedKey to use to calculate the signature
:param CryptoConfig crypto_config: Cryptographic configuration
"""
signature = signature_attribute[Tag.BINARY.dynamodb_tag]
verification_key.verify(
algorithm=verification_key.algorithm,
signature=signature,
data=_string_to_sign(
item=encrypted_item,
table_name=crypto_config.encryption_context.table_name,
attribute_actions=crypto_config.attribute_actions,
),
) | [
"def",
"verify_item_signature",
"(",
"signature_attribute",
",",
"encrypted_item",
",",
"verification_key",
",",
"crypto_config",
")",
":",
"# type: (dynamodb_types.BINARY_ATTRIBUTE, dynamodb_types.ITEM, DelegatedKey, CryptoConfig) -> None",
"signature",
"=",
"signature_attribute",
"... | Verify the item signature.
:param dict signature_attribute: Item signature DynamoDB attribute value
:param dict encrypted_item: Encrypted DynamoDB item
:param DelegatedKey verification_key: DelegatedKey to use to calculate the signature
:param CryptoConfig crypto_config: Cryptographic configuration | [
"Verify",
"the",
"item",
"signature",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/crypto/authentication.py#L61-L79 | train | 31,069 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/crypto/authentication.py | _string_to_sign | def _string_to_sign(item, table_name, attribute_actions):
# type: (dynamodb_types.ITEM, Text, AttributeActions) -> bytes
"""Generate the string to sign from an encrypted item and configuration.
:param dict item: Encrypted DynamoDB item
:param str table_name: Table name to use when generating the string to sign
:param AttributeActions attribute_actions: Actions to take for item
"""
hasher = hashes.Hash(hashes.SHA256(), backend=default_backend())
data_to_sign = bytearray()
data_to_sign.extend(_hash_data(hasher=hasher, data="TABLE>{}<TABLE".format(table_name).encode(TEXT_ENCODING)))
for key in sorted(item.keys()):
action = attribute_actions.action(key)
if action is CryptoAction.DO_NOTHING:
continue
data_to_sign.extend(_hash_data(hasher=hasher, data=key.encode(TEXT_ENCODING)))
if action is CryptoAction.SIGN_ONLY:
data_to_sign.extend(SignatureValues.PLAINTEXT.sha256)
else:
data_to_sign.extend(SignatureValues.ENCRYPTED.sha256)
data_to_sign.extend(_hash_data(hasher=hasher, data=serialize_attribute(item[key])))
return bytes(data_to_sign) | python | def _string_to_sign(item, table_name, attribute_actions):
# type: (dynamodb_types.ITEM, Text, AttributeActions) -> bytes
"""Generate the string to sign from an encrypted item and configuration.
:param dict item: Encrypted DynamoDB item
:param str table_name: Table name to use when generating the string to sign
:param AttributeActions attribute_actions: Actions to take for item
"""
hasher = hashes.Hash(hashes.SHA256(), backend=default_backend())
data_to_sign = bytearray()
data_to_sign.extend(_hash_data(hasher=hasher, data="TABLE>{}<TABLE".format(table_name).encode(TEXT_ENCODING)))
for key in sorted(item.keys()):
action = attribute_actions.action(key)
if action is CryptoAction.DO_NOTHING:
continue
data_to_sign.extend(_hash_data(hasher=hasher, data=key.encode(TEXT_ENCODING)))
if action is CryptoAction.SIGN_ONLY:
data_to_sign.extend(SignatureValues.PLAINTEXT.sha256)
else:
data_to_sign.extend(SignatureValues.ENCRYPTED.sha256)
data_to_sign.extend(_hash_data(hasher=hasher, data=serialize_attribute(item[key])))
return bytes(data_to_sign) | [
"def",
"_string_to_sign",
"(",
"item",
",",
"table_name",
",",
"attribute_actions",
")",
":",
"# type: (dynamodb_types.ITEM, Text, AttributeActions) -> bytes",
"hasher",
"=",
"hashes",
".",
"Hash",
"(",
"hashes",
".",
"SHA256",
"(",
")",
",",
"backend",
"=",
"defaul... | Generate the string to sign from an encrypted item and configuration.
:param dict item: Encrypted DynamoDB item
:param str table_name: Table name to use when generating the string to sign
:param AttributeActions attribute_actions: Actions to take for item | [
"Generate",
"the",
"string",
"to",
"sign",
"from",
"an",
"encrypted",
"item",
"and",
"configuration",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/crypto/authentication.py#L82-L106 | train | 31,070 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/crypto/authentication.py | _hash_data | def _hash_data(hasher, data):
"""Generate hash of data using provided hash type.
:param hasher: Hasher instance to use as a base for calculating hash
:type hasher: cryptography.hazmat.primitives.hashes.Hash
:param bytes data: Data to sign
:returns: Hash of data
:rtype: bytes
"""
_hasher = hasher.copy()
_hasher.update(data)
return _hasher.finalize() | python | def _hash_data(hasher, data):
"""Generate hash of data using provided hash type.
:param hasher: Hasher instance to use as a base for calculating hash
:type hasher: cryptography.hazmat.primitives.hashes.Hash
:param bytes data: Data to sign
:returns: Hash of data
:rtype: bytes
"""
_hasher = hasher.copy()
_hasher.update(data)
return _hasher.finalize() | [
"def",
"_hash_data",
"(",
"hasher",
",",
"data",
")",
":",
"_hasher",
"=",
"hasher",
".",
"copy",
"(",
")",
"_hasher",
".",
"update",
"(",
"data",
")",
"return",
"_hasher",
".",
"finalize",
"(",
")"
] | Generate hash of data using provided hash type.
:param hasher: Hasher instance to use as a base for calculating hash
:type hasher: cryptography.hazmat.primitives.hashes.Hash
:param bytes data: Data to sign
:returns: Hash of data
:rtype: bytes | [
"Generate",
"hash",
"of",
"data",
"using",
"provided",
"hash",
"type",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/crypto/authentication.py#L109-L120 | train | 31,071 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/formatting/material_description.py | serialize | def serialize(material_description):
# type: (Dict[Text, Text]) -> dynamodb_types.BINARY_ATTRIBUTE
"""Serialize a material description dictionary into a DynamodDB attribute.
:param dict material_description: Material description dictionary
:returns: Serialized material description as a DynamoDB binary attribute value
:rtype: dict
:raises InvalidMaterialDescriptionError: if invalid name or value found in material description
"""
material_description_bytes = bytearray(_MATERIAL_DESCRIPTION_VERSION)
# TODO: verify Java sorting order
for name, value in sorted(material_description.items(), key=lambda x: x[0]):
try:
material_description_bytes.extend(encode_value(to_bytes(name)))
material_description_bytes.extend(encode_value(to_bytes(value)))
except (TypeError, struct.error):
raise InvalidMaterialDescriptionError(
'Invalid name or value in material description: "{name}"="{value}"'.format(name=name, value=value)
)
return {Tag.BINARY.dynamodb_tag: bytes(material_description_bytes)} | python | def serialize(material_description):
# type: (Dict[Text, Text]) -> dynamodb_types.BINARY_ATTRIBUTE
"""Serialize a material description dictionary into a DynamodDB attribute.
:param dict material_description: Material description dictionary
:returns: Serialized material description as a DynamoDB binary attribute value
:rtype: dict
:raises InvalidMaterialDescriptionError: if invalid name or value found in material description
"""
material_description_bytes = bytearray(_MATERIAL_DESCRIPTION_VERSION)
# TODO: verify Java sorting order
for name, value in sorted(material_description.items(), key=lambda x: x[0]):
try:
material_description_bytes.extend(encode_value(to_bytes(name)))
material_description_bytes.extend(encode_value(to_bytes(value)))
except (TypeError, struct.error):
raise InvalidMaterialDescriptionError(
'Invalid name or value in material description: "{name}"="{value}"'.format(name=name, value=value)
)
return {Tag.BINARY.dynamodb_tag: bytes(material_description_bytes)} | [
"def",
"serialize",
"(",
"material_description",
")",
":",
"# type: (Dict[Text, Text]) -> dynamodb_types.BINARY_ATTRIBUTE",
"material_description_bytes",
"=",
"bytearray",
"(",
"_MATERIAL_DESCRIPTION_VERSION",
")",
"# TODO: verify Java sorting order",
"for",
"name",
",",
"value",
... | Serialize a material description dictionary into a DynamodDB attribute.
:param dict material_description: Material description dictionary
:returns: Serialized material description as a DynamoDB binary attribute value
:rtype: dict
:raises InvalidMaterialDescriptionError: if invalid name or value found in material description | [
"Serialize",
"a",
"material",
"description",
"dictionary",
"into",
"a",
"DynamodDB",
"attribute",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/formatting/material_description.py#L44-L65 | train | 31,072 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/formatting/material_description.py | deserialize | def deserialize(serialized_material_description):
# type: (dynamodb_types.BINARY_ATTRIBUTE) -> Dict[Text, Text]
"""Deserialize a serialized material description attribute into a material description dictionary.
:param dict serialized_material_description: DynamoDB attribute value containing serialized material description.
:returns: Material description dictionary
:rtype: dict
:raises InvalidMaterialDescriptionError: if malformed version
:raises InvalidMaterialDescriptionVersionError: if unknown version is found
"""
try:
_raw_material_description = serialized_material_description[Tag.BINARY.dynamodb_tag]
material_description_bytes = io.BytesIO(_raw_material_description)
total_bytes = len(_raw_material_description)
except (TypeError, KeyError):
message = "Invalid material description"
_LOGGER.exception(message)
raise InvalidMaterialDescriptionError(message)
# We don't currently do anything with the version, but do check to make sure it is the one we know about.
_read_version(material_description_bytes)
material_description = {}
try:
while material_description_bytes.tell() < total_bytes:
name = to_str(decode_value(material_description_bytes))
value = to_str(decode_value(material_description_bytes))
material_description[name] = value
except struct.error:
message = "Invalid material description"
_LOGGER.exception(message)
raise InvalidMaterialDescriptionError(message)
return material_description | python | def deserialize(serialized_material_description):
# type: (dynamodb_types.BINARY_ATTRIBUTE) -> Dict[Text, Text]
"""Deserialize a serialized material description attribute into a material description dictionary.
:param dict serialized_material_description: DynamoDB attribute value containing serialized material description.
:returns: Material description dictionary
:rtype: dict
:raises InvalidMaterialDescriptionError: if malformed version
:raises InvalidMaterialDescriptionVersionError: if unknown version is found
"""
try:
_raw_material_description = serialized_material_description[Tag.BINARY.dynamodb_tag]
material_description_bytes = io.BytesIO(_raw_material_description)
total_bytes = len(_raw_material_description)
except (TypeError, KeyError):
message = "Invalid material description"
_LOGGER.exception(message)
raise InvalidMaterialDescriptionError(message)
# We don't currently do anything with the version, but do check to make sure it is the one we know about.
_read_version(material_description_bytes)
material_description = {}
try:
while material_description_bytes.tell() < total_bytes:
name = to_str(decode_value(material_description_bytes))
value = to_str(decode_value(material_description_bytes))
material_description[name] = value
except struct.error:
message = "Invalid material description"
_LOGGER.exception(message)
raise InvalidMaterialDescriptionError(message)
return material_description | [
"def",
"deserialize",
"(",
"serialized_material_description",
")",
":",
"# type: (dynamodb_types.BINARY_ATTRIBUTE) -> Dict[Text, Text]",
"try",
":",
"_raw_material_description",
"=",
"serialized_material_description",
"[",
"Tag",
".",
"BINARY",
".",
"dynamodb_tag",
"]",
"materi... | Deserialize a serialized material description attribute into a material description dictionary.
:param dict serialized_material_description: DynamoDB attribute value containing serialized material description.
:returns: Material description dictionary
:rtype: dict
:raises InvalidMaterialDescriptionError: if malformed version
:raises InvalidMaterialDescriptionVersionError: if unknown version is found | [
"Deserialize",
"a",
"serialized",
"material",
"description",
"attribute",
"into",
"a",
"material",
"description",
"dictionary",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/formatting/material_description.py#L68-L100 | train | 31,073 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/formatting/material_description.py | _read_version | def _read_version(material_description_bytes):
# type: (io.BytesIO) -> None
"""Read the version from the serialized material description and raise an error if it is unknown.
:param material_description_bytes: serializezd material description
:type material_description_bytes: io.BytesIO
:raises InvalidMaterialDescriptionError: if malformed version
:raises InvalidMaterialDescriptionVersionError: if unknown version is found
"""
try:
(version,) = unpack_value(">4s", material_description_bytes)
except struct.error:
message = "Malformed material description version"
_LOGGER.exception(message)
raise InvalidMaterialDescriptionError(message)
if version != _MATERIAL_DESCRIPTION_VERSION:
raise InvalidMaterialDescriptionVersionError("Invalid material description version: {}".format(repr(version))) | python | def _read_version(material_description_bytes):
# type: (io.BytesIO) -> None
"""Read the version from the serialized material description and raise an error if it is unknown.
:param material_description_bytes: serializezd material description
:type material_description_bytes: io.BytesIO
:raises InvalidMaterialDescriptionError: if malformed version
:raises InvalidMaterialDescriptionVersionError: if unknown version is found
"""
try:
(version,) = unpack_value(">4s", material_description_bytes)
except struct.error:
message = "Malformed material description version"
_LOGGER.exception(message)
raise InvalidMaterialDescriptionError(message)
if version != _MATERIAL_DESCRIPTION_VERSION:
raise InvalidMaterialDescriptionVersionError("Invalid material description version: {}".format(repr(version))) | [
"def",
"_read_version",
"(",
"material_description_bytes",
")",
":",
"# type: (io.BytesIO) -> None",
"try",
":",
"(",
"version",
",",
")",
"=",
"unpack_value",
"(",
"\">4s\"",
",",
"material_description_bytes",
")",
"except",
"struct",
".",
"error",
":",
"message",
... | Read the version from the serialized material description and raise an error if it is unknown.
:param material_description_bytes: serializezd material description
:type material_description_bytes: io.BytesIO
:raises InvalidMaterialDescriptionError: if malformed version
:raises InvalidMaterialDescriptionVersionError: if unknown version is found | [
"Read",
"the",
"version",
"from",
"the",
"serialized",
"material",
"description",
"and",
"raise",
"an",
"error",
"if",
"it",
"is",
"unknown",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/formatting/material_description.py#L103-L119 | train | 31,074 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/encrypted/item.py | encrypt_dynamodb_item | def encrypt_dynamodb_item(item, crypto_config):
# type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM
"""Encrypt a DynamoDB item.
>>> from dynamodb_encryption_sdk.encrypted.item import encrypt_dynamodb_item
>>> plaintext_item = {
... 'some': {'S': 'data'},
... 'more': {'N': '5'}
... }
>>> encrypted_item = encrypt_dynamodb_item(
... item=plaintext_item,
... crypto_config=my_crypto_config
... )
.. note::
This handles DynamoDB-formatted items and is for use with the boto3 DynamoDB client.
:param dict item: Plaintext DynamoDB item
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Encrypted and signed DynamoDB item
:rtype: dict
"""
if crypto_config.attribute_actions.take_no_actions:
# If we explicitly have been told not to do anything to this item, just copy it.
return item.copy()
for reserved_name in ReservedAttributes:
if reserved_name.value in item:
raise EncryptionError(
'Reserved attribute name "{}" is not allowed in plaintext item.'.format(reserved_name.value)
)
encryption_materials = crypto_config.encryption_materials()
inner_material_description = encryption_materials.material_description.copy()
try:
encryption_materials.encryption_key
except AttributeError:
if crypto_config.attribute_actions.contains_action(CryptoAction.ENCRYPT_AND_SIGN):
raise EncryptionError(
"Attribute actions ask for some attributes to be encrypted but no encryption key is available"
)
encrypted_item = item.copy()
else:
# Add the attribute encryption mode to the inner material description
encryption_mode = MaterialDescriptionValues.CBC_PKCS5_ATTRIBUTE_ENCRYPTION.value
inner_material_description[MaterialDescriptionKeys.ATTRIBUTE_ENCRYPTION_MODE.value] = encryption_mode
algorithm_descriptor = encryption_materials.encryption_key.algorithm + encryption_mode
encrypted_item = {}
for name, attribute in item.items():
if crypto_config.attribute_actions.action(name) is CryptoAction.ENCRYPT_AND_SIGN:
encrypted_item[name] = encrypt_attribute(
attribute_name=name,
attribute=attribute,
encryption_key=encryption_materials.encryption_key,
algorithm=algorithm_descriptor,
)
else:
encrypted_item[name] = attribute.copy()
signature_attribute = sign_item(encrypted_item, encryption_materials.signing_key, crypto_config)
encrypted_item[ReservedAttributes.SIGNATURE.value] = signature_attribute
try:
# Add the signing key algorithm identifier to the inner material description if provided
inner_material_description[
MaterialDescriptionKeys.SIGNING_KEY_ALGORITHM.value
] = encryption_materials.signing_key.signing_algorithm()
except NotImplementedError:
# Not all signing keys will provide this value
pass
material_description_attribute = serialize_material_description(inner_material_description)
encrypted_item[ReservedAttributes.MATERIAL_DESCRIPTION.value] = material_description_attribute
return encrypted_item | python | def encrypt_dynamodb_item(item, crypto_config):
# type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM
"""Encrypt a DynamoDB item.
>>> from dynamodb_encryption_sdk.encrypted.item import encrypt_dynamodb_item
>>> plaintext_item = {
... 'some': {'S': 'data'},
... 'more': {'N': '5'}
... }
>>> encrypted_item = encrypt_dynamodb_item(
... item=plaintext_item,
... crypto_config=my_crypto_config
... )
.. note::
This handles DynamoDB-formatted items and is for use with the boto3 DynamoDB client.
:param dict item: Plaintext DynamoDB item
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Encrypted and signed DynamoDB item
:rtype: dict
"""
if crypto_config.attribute_actions.take_no_actions:
# If we explicitly have been told not to do anything to this item, just copy it.
return item.copy()
for reserved_name in ReservedAttributes:
if reserved_name.value in item:
raise EncryptionError(
'Reserved attribute name "{}" is not allowed in plaintext item.'.format(reserved_name.value)
)
encryption_materials = crypto_config.encryption_materials()
inner_material_description = encryption_materials.material_description.copy()
try:
encryption_materials.encryption_key
except AttributeError:
if crypto_config.attribute_actions.contains_action(CryptoAction.ENCRYPT_AND_SIGN):
raise EncryptionError(
"Attribute actions ask for some attributes to be encrypted but no encryption key is available"
)
encrypted_item = item.copy()
else:
# Add the attribute encryption mode to the inner material description
encryption_mode = MaterialDescriptionValues.CBC_PKCS5_ATTRIBUTE_ENCRYPTION.value
inner_material_description[MaterialDescriptionKeys.ATTRIBUTE_ENCRYPTION_MODE.value] = encryption_mode
algorithm_descriptor = encryption_materials.encryption_key.algorithm + encryption_mode
encrypted_item = {}
for name, attribute in item.items():
if crypto_config.attribute_actions.action(name) is CryptoAction.ENCRYPT_AND_SIGN:
encrypted_item[name] = encrypt_attribute(
attribute_name=name,
attribute=attribute,
encryption_key=encryption_materials.encryption_key,
algorithm=algorithm_descriptor,
)
else:
encrypted_item[name] = attribute.copy()
signature_attribute = sign_item(encrypted_item, encryption_materials.signing_key, crypto_config)
encrypted_item[ReservedAttributes.SIGNATURE.value] = signature_attribute
try:
# Add the signing key algorithm identifier to the inner material description if provided
inner_material_description[
MaterialDescriptionKeys.SIGNING_KEY_ALGORITHM.value
] = encryption_materials.signing_key.signing_algorithm()
except NotImplementedError:
# Not all signing keys will provide this value
pass
material_description_attribute = serialize_material_description(inner_material_description)
encrypted_item[ReservedAttributes.MATERIAL_DESCRIPTION.value] = material_description_attribute
return encrypted_item | [
"def",
"encrypt_dynamodb_item",
"(",
"item",
",",
"crypto_config",
")",
":",
"# type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM",
"if",
"crypto_config",
".",
"attribute_actions",
".",
"take_no_actions",
":",
"# If we explicitly have been told not to do anything to th... | Encrypt a DynamoDB item.
>>> from dynamodb_encryption_sdk.encrypted.item import encrypt_dynamodb_item
>>> plaintext_item = {
... 'some': {'S': 'data'},
... 'more': {'N': '5'}
... }
>>> encrypted_item = encrypt_dynamodb_item(
... item=plaintext_item,
... crypto_config=my_crypto_config
... )
.. note::
This handles DynamoDB-formatted items and is for use with the boto3 DynamoDB client.
:param dict item: Plaintext DynamoDB item
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Encrypted and signed DynamoDB item
:rtype: dict | [
"Encrypt",
"a",
"DynamoDB",
"item",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/encrypted/item.py#L40-L119 | train | 31,075 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/encrypted/item.py | encrypt_python_item | def encrypt_python_item(item, crypto_config):
# type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM
"""Encrypt a dictionary for DynamoDB.
>>> from dynamodb_encryption_sdk.encrypted.item import encrypt_python_item
>>> plaintext_item = {
... 'some': 'data',
... 'more': 5
... }
>>> encrypted_item = encrypt_python_item(
... item=plaintext_item,
... crypto_config=my_crypto_config
... )
.. note::
This handles human-friendly dictionaries and is for use with the boto3 DynamoDB service or table resource.
:param dict item: Plaintext dictionary
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Encrypted and signed dictionary
:rtype: dict
"""
ddb_item = dict_to_ddb(item)
encrypted_ddb_item = encrypt_dynamodb_item(ddb_item, crypto_config)
return ddb_to_dict(encrypted_ddb_item) | python | def encrypt_python_item(item, crypto_config):
# type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM
"""Encrypt a dictionary for DynamoDB.
>>> from dynamodb_encryption_sdk.encrypted.item import encrypt_python_item
>>> plaintext_item = {
... 'some': 'data',
... 'more': 5
... }
>>> encrypted_item = encrypt_python_item(
... item=plaintext_item,
... crypto_config=my_crypto_config
... )
.. note::
This handles human-friendly dictionaries and is for use with the boto3 DynamoDB service or table resource.
:param dict item: Plaintext dictionary
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Encrypted and signed dictionary
:rtype: dict
"""
ddb_item = dict_to_ddb(item)
encrypted_ddb_item = encrypt_dynamodb_item(ddb_item, crypto_config)
return ddb_to_dict(encrypted_ddb_item) | [
"def",
"encrypt_python_item",
"(",
"item",
",",
"crypto_config",
")",
":",
"# type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM",
"ddb_item",
"=",
"dict_to_ddb",
"(",
"item",
")",
"encrypted_ddb_item",
"=",
"encrypt_dynamodb_item",
"(",
"ddb_item",
",",
"cryp... | Encrypt a dictionary for DynamoDB.
>>> from dynamodb_encryption_sdk.encrypted.item import encrypt_python_item
>>> plaintext_item = {
... 'some': 'data',
... 'more': 5
... }
>>> encrypted_item = encrypt_python_item(
... item=plaintext_item,
... crypto_config=my_crypto_config
... )
.. note::
This handles human-friendly dictionaries and is for use with the boto3 DynamoDB service or table resource.
:param dict item: Plaintext dictionary
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Encrypted and signed dictionary
:rtype: dict | [
"Encrypt",
"a",
"dictionary",
"for",
"DynamoDB",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/encrypted/item.py#L122-L147 | train | 31,076 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/encrypted/item.py | decrypt_dynamodb_item | def decrypt_dynamodb_item(item, crypto_config):
# type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM
"""Decrypt a DynamoDB item.
>>> from dynamodb_encryption_sdk.encrypted.item import decrypt_python_item
>>> encrypted_item = {
... 'some': {'B': b'ENCRYPTED_DATA'},
... 'more': {'B': b'ENCRYPTED_DATA'}
... }
>>> decrypted_item = decrypt_python_item(
... item=encrypted_item,
... crypto_config=my_crypto_config
... )
.. note::
This handles DynamoDB-formatted items and is for use with the boto3 DynamoDB client.
:param dict item: Encrypted and signed DynamoDB item
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Plaintext DynamoDB item
:rtype: dict
"""
unique_actions = set([crypto_config.attribute_actions.default_action.name])
unique_actions.update(set([action.name for action in crypto_config.attribute_actions.attribute_actions.values()]))
if crypto_config.attribute_actions.take_no_actions:
# If we explicitly have been told not to do anything to this item, just copy it.
return item.copy()
try:
signature_attribute = item.pop(ReservedAttributes.SIGNATURE.value)
except KeyError:
# The signature is always written, so if no signature is found then the item was not
# encrypted or signed.
raise DecryptionError("No signature attribute found in item")
inner_crypto_config = crypto_config.copy()
# Retrieve the material description from the item if found.
try:
material_description_attribute = item.pop(ReservedAttributes.MATERIAL_DESCRIPTION.value)
except KeyError:
# If no material description is found, we use inner_crypto_config as-is.
pass
else:
# If material description is found, override the material description in inner_crypto_config.
material_description = deserialize_material_description(material_description_attribute)
inner_crypto_config.encryption_context.material_description = material_description
decryption_materials = inner_crypto_config.decryption_materials()
verify_item_signature(signature_attribute, item, decryption_materials.verification_key, inner_crypto_config)
try:
decryption_key = decryption_materials.decryption_key
except AttributeError:
if inner_crypto_config.attribute_actions.contains_action(CryptoAction.ENCRYPT_AND_SIGN):
raise DecryptionError(
"Attribute actions ask for some attributes to be decrypted but no decryption key is available"
)
return item.copy()
decryption_mode = inner_crypto_config.encryption_context.material_description.get(
MaterialDescriptionKeys.ATTRIBUTE_ENCRYPTION_MODE.value
)
algorithm_descriptor = decryption_key.algorithm + decryption_mode
# Once the signature has been verified, actually decrypt the item attributes.
decrypted_item = {}
for name, attribute in item.items():
if inner_crypto_config.attribute_actions.action(name) is CryptoAction.ENCRYPT_AND_SIGN:
decrypted_item[name] = decrypt_attribute(
attribute_name=name, attribute=attribute, decryption_key=decryption_key, algorithm=algorithm_descriptor
)
else:
decrypted_item[name] = attribute.copy()
return decrypted_item | python | def decrypt_dynamodb_item(item, crypto_config):
# type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM
"""Decrypt a DynamoDB item.
>>> from dynamodb_encryption_sdk.encrypted.item import decrypt_python_item
>>> encrypted_item = {
... 'some': {'B': b'ENCRYPTED_DATA'},
... 'more': {'B': b'ENCRYPTED_DATA'}
... }
>>> decrypted_item = decrypt_python_item(
... item=encrypted_item,
... crypto_config=my_crypto_config
... )
.. note::
This handles DynamoDB-formatted items and is for use with the boto3 DynamoDB client.
:param dict item: Encrypted and signed DynamoDB item
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Plaintext DynamoDB item
:rtype: dict
"""
unique_actions = set([crypto_config.attribute_actions.default_action.name])
unique_actions.update(set([action.name for action in crypto_config.attribute_actions.attribute_actions.values()]))
if crypto_config.attribute_actions.take_no_actions:
# If we explicitly have been told not to do anything to this item, just copy it.
return item.copy()
try:
signature_attribute = item.pop(ReservedAttributes.SIGNATURE.value)
except KeyError:
# The signature is always written, so if no signature is found then the item was not
# encrypted or signed.
raise DecryptionError("No signature attribute found in item")
inner_crypto_config = crypto_config.copy()
# Retrieve the material description from the item if found.
try:
material_description_attribute = item.pop(ReservedAttributes.MATERIAL_DESCRIPTION.value)
except KeyError:
# If no material description is found, we use inner_crypto_config as-is.
pass
else:
# If material description is found, override the material description in inner_crypto_config.
material_description = deserialize_material_description(material_description_attribute)
inner_crypto_config.encryption_context.material_description = material_description
decryption_materials = inner_crypto_config.decryption_materials()
verify_item_signature(signature_attribute, item, decryption_materials.verification_key, inner_crypto_config)
try:
decryption_key = decryption_materials.decryption_key
except AttributeError:
if inner_crypto_config.attribute_actions.contains_action(CryptoAction.ENCRYPT_AND_SIGN):
raise DecryptionError(
"Attribute actions ask for some attributes to be decrypted but no decryption key is available"
)
return item.copy()
decryption_mode = inner_crypto_config.encryption_context.material_description.get(
MaterialDescriptionKeys.ATTRIBUTE_ENCRYPTION_MODE.value
)
algorithm_descriptor = decryption_key.algorithm + decryption_mode
# Once the signature has been verified, actually decrypt the item attributes.
decrypted_item = {}
for name, attribute in item.items():
if inner_crypto_config.attribute_actions.action(name) is CryptoAction.ENCRYPT_AND_SIGN:
decrypted_item[name] = decrypt_attribute(
attribute_name=name, attribute=attribute, decryption_key=decryption_key, algorithm=algorithm_descriptor
)
else:
decrypted_item[name] = attribute.copy()
return decrypted_item | [
"def",
"decrypt_dynamodb_item",
"(",
"item",
",",
"crypto_config",
")",
":",
"# type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM",
"unique_actions",
"=",
"set",
"(",
"[",
"crypto_config",
".",
"attribute_actions",
".",
"default_action",
".",
"name",
"]",
"... | Decrypt a DynamoDB item.
>>> from dynamodb_encryption_sdk.encrypted.item import decrypt_python_item
>>> encrypted_item = {
... 'some': {'B': b'ENCRYPTED_DATA'},
... 'more': {'B': b'ENCRYPTED_DATA'}
... }
>>> decrypted_item = decrypt_python_item(
... item=encrypted_item,
... crypto_config=my_crypto_config
... )
.. note::
This handles DynamoDB-formatted items and is for use with the boto3 DynamoDB client.
:param dict item: Encrypted and signed DynamoDB item
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Plaintext DynamoDB item
:rtype: dict | [
"Decrypt",
"a",
"DynamoDB",
"item",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/encrypted/item.py#L150-L228 | train | 31,077 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/encrypted/item.py | decrypt_python_item | def decrypt_python_item(item, crypto_config):
# type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM
"""Decrypt a dictionary for DynamoDB.
>>> from dynamodb_encryption_sdk.encrypted.item import decrypt_python_item
>>> encrypted_item = {
... 'some': Binary(b'ENCRYPTED_DATA'),
... 'more': Binary(b'ENCRYPTED_DATA')
... }
>>> decrypted_item = decrypt_python_item(
... item=encrypted_item,
... crypto_config=my_crypto_config
... )
.. note::
This handles human-friendly dictionaries and is for use with the boto3 DynamoDB service or table resource.
:param dict item: Encrypted and signed dictionary
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Plaintext dictionary
:rtype: dict
"""
ddb_item = dict_to_ddb(item)
decrypted_ddb_item = decrypt_dynamodb_item(ddb_item, crypto_config)
return ddb_to_dict(decrypted_ddb_item) | python | def decrypt_python_item(item, crypto_config):
# type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM
"""Decrypt a dictionary for DynamoDB.
>>> from dynamodb_encryption_sdk.encrypted.item import decrypt_python_item
>>> encrypted_item = {
... 'some': Binary(b'ENCRYPTED_DATA'),
... 'more': Binary(b'ENCRYPTED_DATA')
... }
>>> decrypted_item = decrypt_python_item(
... item=encrypted_item,
... crypto_config=my_crypto_config
... )
.. note::
This handles human-friendly dictionaries and is for use with the boto3 DynamoDB service or table resource.
:param dict item: Encrypted and signed dictionary
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Plaintext dictionary
:rtype: dict
"""
ddb_item = dict_to_ddb(item)
decrypted_ddb_item = decrypt_dynamodb_item(ddb_item, crypto_config)
return ddb_to_dict(decrypted_ddb_item) | [
"def",
"decrypt_python_item",
"(",
"item",
",",
"crypto_config",
")",
":",
"# type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM",
"ddb_item",
"=",
"dict_to_ddb",
"(",
"item",
")",
"decrypted_ddb_item",
"=",
"decrypt_dynamodb_item",
"(",
"ddb_item",
",",
"cryp... | Decrypt a dictionary for DynamoDB.
>>> from dynamodb_encryption_sdk.encrypted.item import decrypt_python_item
>>> encrypted_item = {
... 'some': Binary(b'ENCRYPTED_DATA'),
... 'more': Binary(b'ENCRYPTED_DATA')
... }
>>> decrypted_item = decrypt_python_item(
... item=encrypted_item,
... crypto_config=my_crypto_config
... )
.. note::
This handles human-friendly dictionaries and is for use with the boto3 DynamoDB service or table resource.
:param dict item: Encrypted and signed dictionary
:param CryptoConfig crypto_config: Cryptographic configuration
:returns: Plaintext dictionary
:rtype: dict | [
"Decrypt",
"a",
"dictionary",
"for",
"DynamoDB",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/encrypted/item.py#L231-L256 | train | 31,078 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/transform.py | dict_to_ddb | def dict_to_ddb(item):
# type: (Dict[str, Any]) -> Dict[str, Any]
# TODO: narrow these types down
"""Converts a native Python dictionary to a raw DynamoDB item.
:param dict item: Native item
:returns: DynamoDB item
:rtype: dict
"""
serializer = TypeSerializer()
return {key: serializer.serialize(value) for key, value in item.items()} | python | def dict_to_ddb(item):
# type: (Dict[str, Any]) -> Dict[str, Any]
# TODO: narrow these types down
"""Converts a native Python dictionary to a raw DynamoDB item.
:param dict item: Native item
:returns: DynamoDB item
:rtype: dict
"""
serializer = TypeSerializer()
return {key: serializer.serialize(value) for key, value in item.items()} | [
"def",
"dict_to_ddb",
"(",
"item",
")",
":",
"# type: (Dict[str, Any]) -> Dict[str, Any]",
"# TODO: narrow these types down",
"serializer",
"=",
"TypeSerializer",
"(",
")",
"return",
"{",
"key",
":",
"serializer",
".",
"serialize",
"(",
"value",
")",
"for",
"key",
"... | Converts a native Python dictionary to a raw DynamoDB item.
:param dict item: Native item
:returns: DynamoDB item
:rtype: dict | [
"Converts",
"a",
"native",
"Python",
"dictionary",
"to",
"a",
"raw",
"DynamoDB",
"item",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/transform.py#L25-L35 | train | 31,079 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/transform.py | ddb_to_dict | def ddb_to_dict(item):
# type: (Dict[str, Any]) -> Dict[str, Any]
# TODO: narrow these types down
"""Converts a raw DynamoDB item to a native Python dictionary.
:param dict item: DynamoDB item
:returns: Native item
:rtype: dict
"""
deserializer = TypeDeserializer()
return {key: deserializer.deserialize(value) for key, value in item.items()} | python | def ddb_to_dict(item):
# type: (Dict[str, Any]) -> Dict[str, Any]
# TODO: narrow these types down
"""Converts a raw DynamoDB item to a native Python dictionary.
:param dict item: DynamoDB item
:returns: Native item
:rtype: dict
"""
deserializer = TypeDeserializer()
return {key: deserializer.deserialize(value) for key, value in item.items()} | [
"def",
"ddb_to_dict",
"(",
"item",
")",
":",
"# type: (Dict[str, Any]) -> Dict[str, Any]",
"# TODO: narrow these types down",
"deserializer",
"=",
"TypeDeserializer",
"(",
")",
"return",
"{",
"key",
":",
"deserializer",
".",
"deserialize",
"(",
"value",
")",
"for",
"k... | Converts a raw DynamoDB item to a native Python dictionary.
:param dict item: DynamoDB item
:returns: Native item
:rtype: dict | [
"Converts",
"a",
"raw",
"DynamoDB",
"item",
"to",
"a",
"native",
"Python",
"dictionary",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/transform.py#L38-L48 | train | 31,080 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/validators.py | dictionary_validator | def dictionary_validator(key_type, value_type):
"""Validator for ``attrs`` that performs deep type checking of dictionaries."""
def _validate_dictionary(instance, attribute, value):
# pylint: disable=unused-argument
"""Validate that a dictionary is structured as expected.
:raises TypeError: if ``value`` is not a dictionary
:raises TypeError: if ``value`` keys are not all of ``key_type`` type
:raises TypeError: if ``value`` values are not all of ``value_type`` type
"""
if not isinstance(value, dict):
raise TypeError('"{}" must be a dictionary'.format(attribute.name))
for key, data in value.items():
if not isinstance(key, key_type):
raise TypeError(
'"{name}" dictionary keys must be of type "{type}"'.format(name=attribute.name, type=key_type)
)
if not isinstance(data, value_type):
raise TypeError(
'"{name}" dictionary values must be of type "{type}"'.format(name=attribute.name, type=value_type)
)
return _validate_dictionary | python | def dictionary_validator(key_type, value_type):
"""Validator for ``attrs`` that performs deep type checking of dictionaries."""
def _validate_dictionary(instance, attribute, value):
# pylint: disable=unused-argument
"""Validate that a dictionary is structured as expected.
:raises TypeError: if ``value`` is not a dictionary
:raises TypeError: if ``value`` keys are not all of ``key_type`` type
:raises TypeError: if ``value`` values are not all of ``value_type`` type
"""
if not isinstance(value, dict):
raise TypeError('"{}" must be a dictionary'.format(attribute.name))
for key, data in value.items():
if not isinstance(key, key_type):
raise TypeError(
'"{name}" dictionary keys must be of type "{type}"'.format(name=attribute.name, type=key_type)
)
if not isinstance(data, value_type):
raise TypeError(
'"{name}" dictionary values must be of type "{type}"'.format(name=attribute.name, type=value_type)
)
return _validate_dictionary | [
"def",
"dictionary_validator",
"(",
"key_type",
",",
"value_type",
")",
":",
"def",
"_validate_dictionary",
"(",
"instance",
",",
"attribute",
",",
"value",
")",
":",
"# pylint: disable=unused-argument",
"\"\"\"Validate that a dictionary is structured as expected.\n\n :r... | Validator for ``attrs`` that performs deep type checking of dictionaries. | [
"Validator",
"for",
"attrs",
"that",
"performs",
"deep",
"type",
"checking",
"of",
"dictionaries",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/validators.py#L23-L48 | train | 31,081 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/validators.py | iterable_validator | def iterable_validator(iterable_type, member_type):
"""Validator for ``attrs`` that performs deep type checking of iterables."""
def _validate_tuple(instance, attribute, value):
# pylint: disable=unused-argument
"""Validate that a dictionary is structured as expected.
:raises TypeError: if ``value`` is not of ``iterable_type`` type
:raises TypeError: if ``value`` members are not all of ``member_type`` type
"""
if not isinstance(value, iterable_type):
raise TypeError('"{name}" must be a {type}'.format(name=attribute.name, type=iterable_type))
for member in value:
if not isinstance(member, member_type):
raise TypeError(
'"{name}" members must all be of type "{type}"'.format(name=attribute.name, type=member_type)
)
return _validate_tuple | python | def iterable_validator(iterable_type, member_type):
"""Validator for ``attrs`` that performs deep type checking of iterables."""
def _validate_tuple(instance, attribute, value):
# pylint: disable=unused-argument
"""Validate that a dictionary is structured as expected.
:raises TypeError: if ``value`` is not of ``iterable_type`` type
:raises TypeError: if ``value`` members are not all of ``member_type`` type
"""
if not isinstance(value, iterable_type):
raise TypeError('"{name}" must be a {type}'.format(name=attribute.name, type=iterable_type))
for member in value:
if not isinstance(member, member_type):
raise TypeError(
'"{name}" members must all be of type "{type}"'.format(name=attribute.name, type=member_type)
)
return _validate_tuple | [
"def",
"iterable_validator",
"(",
"iterable_type",
",",
"member_type",
")",
":",
"def",
"_validate_tuple",
"(",
"instance",
",",
"attribute",
",",
"value",
")",
":",
"# pylint: disable=unused-argument",
"\"\"\"Validate that a dictionary is structured as expected.\n\n :ra... | Validator for ``attrs`` that performs deep type checking of iterables. | [
"Validator",
"for",
"attrs",
"that",
"performs",
"deep",
"type",
"checking",
"of",
"iterables",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/validators.py#L51-L70 | train | 31,082 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/validators.py | callable_validator | def callable_validator(instance, attribute, value):
# pylint: disable=unused-argument
"""Validate that an attribute value is callable.
:raises TypeError: if ``value`` is not callable
"""
if not callable(value):
raise TypeError('"{name}" value "{value}" must be callable'.format(name=attribute.name, value=value)) | python | def callable_validator(instance, attribute, value):
# pylint: disable=unused-argument
"""Validate that an attribute value is callable.
:raises TypeError: if ``value`` is not callable
"""
if not callable(value):
raise TypeError('"{name}" value "{value}" must be callable'.format(name=attribute.name, value=value)) | [
"def",
"callable_validator",
"(",
"instance",
",",
"attribute",
",",
"value",
")",
":",
"# pylint: disable=unused-argument",
"if",
"not",
"callable",
"(",
"value",
")",
":",
"raise",
"TypeError",
"(",
"'\"{name}\" value \"{value}\" must be callable'",
".",
"format",
"... | Validate that an attribute value is callable.
:raises TypeError: if ``value`` is not callable | [
"Validate",
"that",
"an",
"attribute",
"value",
"is",
"callable",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/validators.py#L73-L80 | train | 31,083 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/crypto/encryption.py | encrypt_attribute | def encrypt_attribute(attribute_name, attribute, encryption_key, algorithm):
# type: (Text, dynamodb_types.RAW_ATTRIBUTE, DelegatedKey, Text) -> dynamodb_types.BINARY_ATTRIBUTE
"""Encrypt a single DynamoDB attribute.
:param str attribute_name: DynamoDB attribute name
:param dict attribute: Plaintext DynamoDB attribute
:param DelegatedKey encryption_key: DelegatedKey to use to encrypt the attribute
:param str algorithm: Encryption algorithm descriptor (passed to encryption_key as algorithm)
:returns: Encrypted DynamoDB binary attribute
:rtype: dict
"""
serialized_attribute = serialize_attribute(attribute)
encrypted_attribute = encryption_key.encrypt(
algorithm=algorithm, name=attribute_name, plaintext=serialized_attribute
)
return {Tag.BINARY.dynamodb_tag: encrypted_attribute} | python | def encrypt_attribute(attribute_name, attribute, encryption_key, algorithm):
# type: (Text, dynamodb_types.RAW_ATTRIBUTE, DelegatedKey, Text) -> dynamodb_types.BINARY_ATTRIBUTE
"""Encrypt a single DynamoDB attribute.
:param str attribute_name: DynamoDB attribute name
:param dict attribute: Plaintext DynamoDB attribute
:param DelegatedKey encryption_key: DelegatedKey to use to encrypt the attribute
:param str algorithm: Encryption algorithm descriptor (passed to encryption_key as algorithm)
:returns: Encrypted DynamoDB binary attribute
:rtype: dict
"""
serialized_attribute = serialize_attribute(attribute)
encrypted_attribute = encryption_key.encrypt(
algorithm=algorithm, name=attribute_name, plaintext=serialized_attribute
)
return {Tag.BINARY.dynamodb_tag: encrypted_attribute} | [
"def",
"encrypt_attribute",
"(",
"attribute_name",
",",
"attribute",
",",
"encryption_key",
",",
"algorithm",
")",
":",
"# type: (Text, dynamodb_types.RAW_ATTRIBUTE, DelegatedKey, Text) -> dynamodb_types.BINARY_ATTRIBUTE",
"serialized_attribute",
"=",
"serialize_attribute",
"(",
"a... | Encrypt a single DynamoDB attribute.
:param str attribute_name: DynamoDB attribute name
:param dict attribute: Plaintext DynamoDB attribute
:param DelegatedKey encryption_key: DelegatedKey to use to encrypt the attribute
:param str algorithm: Encryption algorithm descriptor (passed to encryption_key as algorithm)
:returns: Encrypted DynamoDB binary attribute
:rtype: dict | [
"Encrypt",
"a",
"single",
"DynamoDB",
"attribute",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/crypto/encryption.py#L34-L49 | train | 31,084 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/crypto/encryption.py | decrypt_attribute | def decrypt_attribute(attribute_name, attribute, decryption_key, algorithm):
# type: (Text, dynamodb_types.RAW_ATTRIBUTE, DelegatedKey, Text) -> dynamodb_types.RAW_ATTRIBUTE
"""Decrypt a single DynamoDB attribute.
:param str attribute_name: DynamoDB attribute name
:param dict attribute: Encrypted DynamoDB attribute
:param DelegatedKey encryption_key: DelegatedKey to use to encrypt the attribute
:param str algorithm: Decryption algorithm descriptor (passed to encryption_key as algorithm)
:returns: Plaintext DynamoDB attribute
:rtype: dict
"""
encrypted_attribute = attribute[Tag.BINARY.dynamodb_tag]
decrypted_attribute = decryption_key.decrypt(
algorithm=algorithm, name=attribute_name, ciphertext=encrypted_attribute
)
return deserialize_attribute(decrypted_attribute) | python | def decrypt_attribute(attribute_name, attribute, decryption_key, algorithm):
# type: (Text, dynamodb_types.RAW_ATTRIBUTE, DelegatedKey, Text) -> dynamodb_types.RAW_ATTRIBUTE
"""Decrypt a single DynamoDB attribute.
:param str attribute_name: DynamoDB attribute name
:param dict attribute: Encrypted DynamoDB attribute
:param DelegatedKey encryption_key: DelegatedKey to use to encrypt the attribute
:param str algorithm: Decryption algorithm descriptor (passed to encryption_key as algorithm)
:returns: Plaintext DynamoDB attribute
:rtype: dict
"""
encrypted_attribute = attribute[Tag.BINARY.dynamodb_tag]
decrypted_attribute = decryption_key.decrypt(
algorithm=algorithm, name=attribute_name, ciphertext=encrypted_attribute
)
return deserialize_attribute(decrypted_attribute) | [
"def",
"decrypt_attribute",
"(",
"attribute_name",
",",
"attribute",
",",
"decryption_key",
",",
"algorithm",
")",
":",
"# type: (Text, dynamodb_types.RAW_ATTRIBUTE, DelegatedKey, Text) -> dynamodb_types.RAW_ATTRIBUTE",
"encrypted_attribute",
"=",
"attribute",
"[",
"Tag",
".",
... | Decrypt a single DynamoDB attribute.
:param str attribute_name: DynamoDB attribute name
:param dict attribute: Encrypted DynamoDB attribute
:param DelegatedKey encryption_key: DelegatedKey to use to encrypt the attribute
:param str algorithm: Decryption algorithm descriptor (passed to encryption_key as algorithm)
:returns: Plaintext DynamoDB attribute
:rtype: dict | [
"Decrypt",
"a",
"single",
"DynamoDB",
"attribute",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/crypto/encryption.py#L52-L67 | train | 31,085 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/delegated_keys/jce.py | _generate_rsa_key | def _generate_rsa_key(key_length):
"""Generate a new RSA private key.
:param int key_length: Required key length in bits
:returns: DER-encoded private key, private key identifier, and DER encoding identifier
:rtype: tuple(bytes, :class:`EncryptionKeyType`, :class:`KeyEncodingType`)
"""
private_key = rsa.generate_private_key(public_exponent=65537, key_size=key_length, backend=default_backend())
key_bytes = private_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
return key_bytes, EncryptionKeyType.PRIVATE, KeyEncodingType.DER | python | def _generate_rsa_key(key_length):
"""Generate a new RSA private key.
:param int key_length: Required key length in bits
:returns: DER-encoded private key, private key identifier, and DER encoding identifier
:rtype: tuple(bytes, :class:`EncryptionKeyType`, :class:`KeyEncodingType`)
"""
private_key = rsa.generate_private_key(public_exponent=65537, key_size=key_length, backend=default_backend())
key_bytes = private_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
return key_bytes, EncryptionKeyType.PRIVATE, KeyEncodingType.DER | [
"def",
"_generate_rsa_key",
"(",
"key_length",
")",
":",
"private_key",
"=",
"rsa",
".",
"generate_private_key",
"(",
"public_exponent",
"=",
"65537",
",",
"key_size",
"=",
"key_length",
",",
"backend",
"=",
"default_backend",
"(",
")",
")",
"key_bytes",
"=",
... | Generate a new RSA private key.
:param int key_length: Required key length in bits
:returns: DER-encoded private key, private key identifier, and DER encoding identifier
:rtype: tuple(bytes, :class:`EncryptionKeyType`, :class:`KeyEncodingType`) | [
"Generate",
"a",
"new",
"RSA",
"private",
"key",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/delegated_keys/jce.py#L52-L65 | train | 31,086 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/formatting/serialize/__init__.py | encode_value | def encode_value(value):
# type: (bytes) -> bytes
"""Encodes the value in Length-Value format.
:param value: Value to encode
:type value: six.string_types or :class:`boto3.dynamodb_encryption_sdk.types.Binary`
:returns: Length-Value encoded value
:rtype: bytes
"""
return struct.pack(">I{attr_len:d}s".format(attr_len=len(value)), len(value), value) | python | def encode_value(value):
# type: (bytes) -> bytes
"""Encodes the value in Length-Value format.
:param value: Value to encode
:type value: six.string_types or :class:`boto3.dynamodb_encryption_sdk.types.Binary`
:returns: Length-Value encoded value
:rtype: bytes
"""
return struct.pack(">I{attr_len:d}s".format(attr_len=len(value)), len(value), value) | [
"def",
"encode_value",
"(",
"value",
")",
":",
"# type: (bytes) -> bytes",
"return",
"struct",
".",
"pack",
"(",
"\">I{attr_len:d}s\"",
".",
"format",
"(",
"attr_len",
"=",
"len",
"(",
"value",
")",
")",
",",
"len",
"(",
"value",
")",
",",
"value",
")"
] | Encodes the value in Length-Value format.
:param value: Value to encode
:type value: six.string_types or :class:`boto3.dynamodb_encryption_sdk.types.Binary`
:returns: Length-Value encoded value
:rtype: bytes | [
"Encodes",
"the",
"value",
"in",
"Length",
"-",
"Value",
"format",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/formatting/serialize/__init__.py#L41-L50 | train | 31,087 |
aws/aws-dynamodb-encryption-python | examples/src/aws_kms_encrypted_client.py | encrypt_item | def encrypt_item(table_name, aws_cmk_id):
"""Demonstrate use of EncryptedClient to transparently encrypt an item."""
index_key = {"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "55"}}
plaintext_item = {
"example": {"S": "data"},
"some numbers": {"N": "99"},
"and some binary": {"B": b"\x00\x01\x02"},
"leave me": {"S": "alone"}, # We want to ignore this attribute
}
# Collect all of the attributes that will be encrypted (used later).
encrypted_attributes = set(plaintext_item.keys())
encrypted_attributes.remove("leave me")
# Collect all of the attributes that will not be encrypted (used later).
unencrypted_attributes = set(index_key.keys())
unencrypted_attributes.add("leave me")
# Add the index pairs to the item.
plaintext_item.update(index_key)
# Create a normal client.
client = boto3.client("dynamodb")
# Create a crypto materials provider using the specified AWS KMS key.
aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=aws_cmk_id)
# Create attribute actions that tells the encrypted client to encrypt all attributes except one.
actions = AttributeActions(
default_action=CryptoAction.ENCRYPT_AND_SIGN, attribute_actions={"leave me": CryptoAction.DO_NOTHING}
)
# Use these objects to create an encrypted client.
encrypted_client = EncryptedClient(client=client, materials_provider=aws_kms_cmp, attribute_actions=actions)
# Put the item to the table, using the encrypted client to transparently encrypt it.
encrypted_client.put_item(TableName=table_name, Item=plaintext_item)
# Get the encrypted item using the standard client.
encrypted_item = client.get_item(TableName=table_name, Key=index_key)["Item"]
# Get the item using the encrypted client, transparently decyrpting it.
decrypted_item = encrypted_client.get_item(TableName=table_name, Key=index_key)["Item"]
# Verify that all of the attributes are different in the encrypted item
for name in encrypted_attributes:
assert encrypted_item[name] != plaintext_item[name]
assert decrypted_item[name] == plaintext_item[name]
# Verify that all of the attributes that should not be encrypted were not.
for name in unencrypted_attributes:
assert decrypted_item[name] == encrypted_item[name] == plaintext_item[name]
# Clean up the item
encrypted_client.delete_item(TableName=table_name, Key=index_key) | python | def encrypt_item(table_name, aws_cmk_id):
"""Demonstrate use of EncryptedClient to transparently encrypt an item."""
index_key = {"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "55"}}
plaintext_item = {
"example": {"S": "data"},
"some numbers": {"N": "99"},
"and some binary": {"B": b"\x00\x01\x02"},
"leave me": {"S": "alone"}, # We want to ignore this attribute
}
# Collect all of the attributes that will be encrypted (used later).
encrypted_attributes = set(plaintext_item.keys())
encrypted_attributes.remove("leave me")
# Collect all of the attributes that will not be encrypted (used later).
unencrypted_attributes = set(index_key.keys())
unencrypted_attributes.add("leave me")
# Add the index pairs to the item.
plaintext_item.update(index_key)
# Create a normal client.
client = boto3.client("dynamodb")
# Create a crypto materials provider using the specified AWS KMS key.
aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=aws_cmk_id)
# Create attribute actions that tells the encrypted client to encrypt all attributes except one.
actions = AttributeActions(
default_action=CryptoAction.ENCRYPT_AND_SIGN, attribute_actions={"leave me": CryptoAction.DO_NOTHING}
)
# Use these objects to create an encrypted client.
encrypted_client = EncryptedClient(client=client, materials_provider=aws_kms_cmp, attribute_actions=actions)
# Put the item to the table, using the encrypted client to transparently encrypt it.
encrypted_client.put_item(TableName=table_name, Item=plaintext_item)
# Get the encrypted item using the standard client.
encrypted_item = client.get_item(TableName=table_name, Key=index_key)["Item"]
# Get the item using the encrypted client, transparently decyrpting it.
decrypted_item = encrypted_client.get_item(TableName=table_name, Key=index_key)["Item"]
# Verify that all of the attributes are different in the encrypted item
for name in encrypted_attributes:
assert encrypted_item[name] != plaintext_item[name]
assert decrypted_item[name] == plaintext_item[name]
# Verify that all of the attributes that should not be encrypted were not.
for name in unencrypted_attributes:
assert decrypted_item[name] == encrypted_item[name] == plaintext_item[name]
# Clean up the item
encrypted_client.delete_item(TableName=table_name, Key=index_key) | [
"def",
"encrypt_item",
"(",
"table_name",
",",
"aws_cmk_id",
")",
":",
"index_key",
"=",
"{",
"\"partition_attribute\"",
":",
"{",
"\"S\"",
":",
"\"is this\"",
"}",
",",
"\"sort_attribute\"",
":",
"{",
"\"N\"",
":",
"\"55\"",
"}",
"}",
"plaintext_item",
"=",
... | Demonstrate use of EncryptedClient to transparently encrypt an item. | [
"Demonstrate",
"use",
"of",
"EncryptedClient",
"to",
"transparently",
"encrypt",
"an",
"item",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/examples/src/aws_kms_encrypted_client.py#L22-L70 | train | 31,088 |
aws/aws-dynamodb-encryption-python | examples/src/aws_kms_encrypted_client.py | encrypt_batch_items | def encrypt_batch_items(table_name, aws_cmk_id):
"""Demonstrate use of EncryptedClient to transparently encrypt multiple items in a batch request."""
index_keys = [
{"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "55"}},
{"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "56"}},
{"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "57"}},
{"partition_attribute": {"S": "another"}, "sort_attribute": {"N": "55"}},
]
plaintext_additional_attributes = {
"example": {"S": "data"},
"some numbers": {"N": "99"},
"and some binary": {"B": b"\x00\x01\x02"},
"leave me": {"S": "alone"}, # We want to ignore this attribute
}
plaintext_items = []
for key in index_keys:
_attributes = key.copy()
_attributes.update(plaintext_additional_attributes)
plaintext_items.append(_attributes)
# Collect all of the attributes that will be encrypted (used later).
encrypted_attributes = set(plaintext_additional_attributes.keys())
encrypted_attributes.remove("leave me")
# Collect all of the attributes that will not be encrypted (used later).
unencrypted_attributes = set(index_keys[0].keys())
unencrypted_attributes.add("leave me")
# Create a normal client.
client = boto3.client("dynamodb")
# Create a crypto materials provider using the specified AWS KMS key.
aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=aws_cmk_id)
# Create attribute actions that tells the encrypted client to encrypt all attributes except one.
actions = AttributeActions(
default_action=CryptoAction.ENCRYPT_AND_SIGN, attribute_actions={"leave me": CryptoAction.DO_NOTHING}
)
# Use these objects to create an encrypted client.
encrypted_client = EncryptedClient(client=client, materials_provider=aws_kms_cmp, attribute_actions=actions)
# Put the items to the table, using the encrypted client to transparently encrypt them.
encrypted_client.batch_write_item(
RequestItems={table_name: [{"PutRequest": {"Item": item}} for item in plaintext_items]}
)
# Get the encrypted item using the standard client.
encrypted_items = client.batch_get_item(RequestItems={table_name: {"Keys": index_keys}})["Responses"][table_name]
# Get the item using the encrypted client, transparently decyrpting it.
decrypted_items = encrypted_client.batch_get_item(RequestItems={table_name: {"Keys": index_keys}})["Responses"][
table_name
]
def _select_index_from_item(item):
"""Find the index keys that match this item."""
for index in index_keys:
if all([item[key] == value for key, value in index.items()]):
return index
raise Exception("Index key not found in item.")
def _select_item_from_index(index, all_items):
"""Find the item that matches these index keys."""
for item in all_items:
if all([item[key] == value for key, value in index.items()]):
return item
raise Exception("Index key not found in item.")
for encrypted_item in encrypted_items:
key = _select_index_from_item(encrypted_item)
plaintext_item = _select_item_from_index(key, plaintext_items)
decrypted_item = _select_item_from_index(key, decrypted_items)
# Verify that all of the attributes are different in the encrypted item
for name in encrypted_attributes:
assert encrypted_item[name] != plaintext_item[name]
assert decrypted_item[name] == plaintext_item[name]
# Verify that all of the attributes that should not be encrypted were not.
for name in unencrypted_attributes:
assert decrypted_item[name] == encrypted_item[name] == plaintext_item[name]
# Clean up the item
encrypted_client.batch_write_item(
RequestItems={table_name: [{"DeleteRequest": {"Key": key}} for key in index_keys]}
) | python | def encrypt_batch_items(table_name, aws_cmk_id):
"""Demonstrate use of EncryptedClient to transparently encrypt multiple items in a batch request."""
index_keys = [
{"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "55"}},
{"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "56"}},
{"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "57"}},
{"partition_attribute": {"S": "another"}, "sort_attribute": {"N": "55"}},
]
plaintext_additional_attributes = {
"example": {"S": "data"},
"some numbers": {"N": "99"},
"and some binary": {"B": b"\x00\x01\x02"},
"leave me": {"S": "alone"}, # We want to ignore this attribute
}
plaintext_items = []
for key in index_keys:
_attributes = key.copy()
_attributes.update(plaintext_additional_attributes)
plaintext_items.append(_attributes)
# Collect all of the attributes that will be encrypted (used later).
encrypted_attributes = set(plaintext_additional_attributes.keys())
encrypted_attributes.remove("leave me")
# Collect all of the attributes that will not be encrypted (used later).
unencrypted_attributes = set(index_keys[0].keys())
unencrypted_attributes.add("leave me")
# Create a normal client.
client = boto3.client("dynamodb")
# Create a crypto materials provider using the specified AWS KMS key.
aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=aws_cmk_id)
# Create attribute actions that tells the encrypted client to encrypt all attributes except one.
actions = AttributeActions(
default_action=CryptoAction.ENCRYPT_AND_SIGN, attribute_actions={"leave me": CryptoAction.DO_NOTHING}
)
# Use these objects to create an encrypted client.
encrypted_client = EncryptedClient(client=client, materials_provider=aws_kms_cmp, attribute_actions=actions)
# Put the items to the table, using the encrypted client to transparently encrypt them.
encrypted_client.batch_write_item(
RequestItems={table_name: [{"PutRequest": {"Item": item}} for item in plaintext_items]}
)
# Get the encrypted item using the standard client.
encrypted_items = client.batch_get_item(RequestItems={table_name: {"Keys": index_keys}})["Responses"][table_name]
# Get the item using the encrypted client, transparently decyrpting it.
decrypted_items = encrypted_client.batch_get_item(RequestItems={table_name: {"Keys": index_keys}})["Responses"][
table_name
]
def _select_index_from_item(item):
"""Find the index keys that match this item."""
for index in index_keys:
if all([item[key] == value for key, value in index.items()]):
return index
raise Exception("Index key not found in item.")
def _select_item_from_index(index, all_items):
"""Find the item that matches these index keys."""
for item in all_items:
if all([item[key] == value for key, value in index.items()]):
return item
raise Exception("Index key not found in item.")
for encrypted_item in encrypted_items:
key = _select_index_from_item(encrypted_item)
plaintext_item = _select_item_from_index(key, plaintext_items)
decrypted_item = _select_item_from_index(key, decrypted_items)
# Verify that all of the attributes are different in the encrypted item
for name in encrypted_attributes:
assert encrypted_item[name] != plaintext_item[name]
assert decrypted_item[name] == plaintext_item[name]
# Verify that all of the attributes that should not be encrypted were not.
for name in unencrypted_attributes:
assert decrypted_item[name] == encrypted_item[name] == plaintext_item[name]
# Clean up the item
encrypted_client.batch_write_item(
RequestItems={table_name: [{"DeleteRequest": {"Key": key}} for key in index_keys]}
) | [
"def",
"encrypt_batch_items",
"(",
"table_name",
",",
"aws_cmk_id",
")",
":",
"index_keys",
"=",
"[",
"{",
"\"partition_attribute\"",
":",
"{",
"\"S\"",
":",
"\"is this\"",
"}",
",",
"\"sort_attribute\"",
":",
"{",
"\"N\"",
":",
"\"55\"",
"}",
"}",
",",
"{",... | Demonstrate use of EncryptedClient to transparently encrypt multiple items in a batch request. | [
"Demonstrate",
"use",
"of",
"EncryptedClient",
"to",
"transparently",
"encrypt",
"multiple",
"items",
"in",
"a",
"batch",
"request",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/examples/src/aws_kms_encrypted_client.py#L73-L157 | train | 31,089 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/crypto/jce_bridge/primitives.py | load_rsa_key | def load_rsa_key(key, key_type, key_encoding):
# (bytes, EncryptionKeyType, KeyEncodingType) -> Any
# TODO: narrow down the output type
"""Load an RSA key object from the provided raw key bytes.
:param bytes key: Raw key bytes to load
:param EncryptionKeyType key_type: Type of key to load
:param KeyEncodingType key_encoding: Encoding used to serialize ``key``
:returns: Loaded key
:rtype: TODO:
:raises ValueError: if ``key_type`` and ``key_encoding`` are not a valid pairing
"""
try:
loader = _RSA_KEY_LOADING[key_type][key_encoding]
except KeyError:
raise ValueError("Invalid key type and encoding: {} and {}".format(key_type, key_encoding))
kwargs = dict(data=key, backend=default_backend())
if key_type is EncryptionKeyType.PRIVATE:
kwargs["password"] = None
loaded_key = loader(**kwargs)
if loaded_key.key_size < MinimumKeySizes.RSA.value:
_LOGGER.warning("RSA keys smaller than %d bits are unsafe" % MinimumKeySizes.RSA.value)
return loaded_key | python | def load_rsa_key(key, key_type, key_encoding):
# (bytes, EncryptionKeyType, KeyEncodingType) -> Any
# TODO: narrow down the output type
"""Load an RSA key object from the provided raw key bytes.
:param bytes key: Raw key bytes to load
:param EncryptionKeyType key_type: Type of key to load
:param KeyEncodingType key_encoding: Encoding used to serialize ``key``
:returns: Loaded key
:rtype: TODO:
:raises ValueError: if ``key_type`` and ``key_encoding`` are not a valid pairing
"""
try:
loader = _RSA_KEY_LOADING[key_type][key_encoding]
except KeyError:
raise ValueError("Invalid key type and encoding: {} and {}".format(key_type, key_encoding))
kwargs = dict(data=key, backend=default_backend())
if key_type is EncryptionKeyType.PRIVATE:
kwargs["password"] = None
loaded_key = loader(**kwargs)
if loaded_key.key_size < MinimumKeySizes.RSA.value:
_LOGGER.warning("RSA keys smaller than %d bits are unsafe" % MinimumKeySizes.RSA.value)
return loaded_key | [
"def",
"load_rsa_key",
"(",
"key",
",",
"key_type",
",",
"key_encoding",
")",
":",
"# (bytes, EncryptionKeyType, KeyEncodingType) -> Any",
"# TODO: narrow down the output type",
"try",
":",
"loader",
"=",
"_RSA_KEY_LOADING",
"[",
"key_type",
"]",
"[",
"key_encoding",
"]",... | Load an RSA key object from the provided raw key bytes.
:param bytes key: Raw key bytes to load
:param EncryptionKeyType key_type: Type of key to load
:param KeyEncodingType key_encoding: Encoding used to serialize ``key``
:returns: Loaded key
:rtype: TODO:
:raises ValueError: if ``key_type`` and ``key_encoding`` are not a valid pairing | [
"Load",
"an",
"RSA",
"key",
"object",
"from",
"the",
"provided",
"raw",
"key",
"bytes",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/crypto/jce_bridge/primitives.py#L434-L460 | train | 31,090 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/crypto/jce_bridge/primitives.py | JavaSymmetricEncryptionAlgorithm._disable_encryption | def _disable_encryption(self):
# () -> None
"""Enable encryption methods for ciphers that support them."""
self.encrypt = self._disabled_encrypt
self.decrypt = self._disabled_decrypt | python | def _disable_encryption(self):
# () -> None
"""Enable encryption methods for ciphers that support them."""
self.encrypt = self._disabled_encrypt
self.decrypt = self._disabled_decrypt | [
"def",
"_disable_encryption",
"(",
"self",
")",
":",
"# () -> None",
"self",
".",
"encrypt",
"=",
"self",
".",
"_disabled_encrypt",
"self",
".",
"decrypt",
"=",
"self",
".",
"_disabled_decrypt"
] | Enable encryption methods for ciphers that support them. | [
"Enable",
"encryption",
"methods",
"for",
"ciphers",
"that",
"support",
"them",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/crypto/jce_bridge/primitives.py#L296-L300 | train | 31,091 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/crypto/jce_bridge/primitives.py | JavaSymmetricEncryptionAlgorithm.wrap | def wrap(self, wrapping_key, key_to_wrap):
# type: (bytes, bytes) -> bytes
"""Wrap key using AES keywrap.
:param bytes wrapping_key: Loaded key with which to wrap
:param bytes key_to_wrap: Raw key to wrap
:returns: Wrapped key
:rtype: bytes
"""
if self.java_name not in ("AES", "AESWrap"):
raise NotImplementedError('"wrap" is not supported by the "{}" cipher'.format(self.java_name))
try:
return keywrap.aes_key_wrap(wrapping_key=wrapping_key, key_to_wrap=key_to_wrap, backend=default_backend())
except Exception:
error_message = "Key wrap failed"
_LOGGER.exception(error_message)
raise WrappingError(error_message) | python | def wrap(self, wrapping_key, key_to_wrap):
# type: (bytes, bytes) -> bytes
"""Wrap key using AES keywrap.
:param bytes wrapping_key: Loaded key with which to wrap
:param bytes key_to_wrap: Raw key to wrap
:returns: Wrapped key
:rtype: bytes
"""
if self.java_name not in ("AES", "AESWrap"):
raise NotImplementedError('"wrap" is not supported by the "{}" cipher'.format(self.java_name))
try:
return keywrap.aes_key_wrap(wrapping_key=wrapping_key, key_to_wrap=key_to_wrap, backend=default_backend())
except Exception:
error_message = "Key wrap failed"
_LOGGER.exception(error_message)
raise WrappingError(error_message) | [
"def",
"wrap",
"(",
"self",
",",
"wrapping_key",
",",
"key_to_wrap",
")",
":",
"# type: (bytes, bytes) -> bytes",
"if",
"self",
".",
"java_name",
"not",
"in",
"(",
"\"AES\"",
",",
"\"AESWrap\"",
")",
":",
"raise",
"NotImplementedError",
"(",
"'\"wrap\" is not supp... | Wrap key using AES keywrap.
:param bytes wrapping_key: Loaded key with which to wrap
:param bytes key_to_wrap: Raw key to wrap
:returns: Wrapped key
:rtype: bytes | [
"Wrap",
"key",
"using",
"AES",
"keywrap",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/crypto/jce_bridge/primitives.py#L330-L347 | train | 31,092 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/crypto/jce_bridge/primitives.py | JavaSymmetricEncryptionAlgorithm.unwrap | def unwrap(self, wrapping_key, wrapped_key):
# type: (bytes, bytes) -> bytes
"""Unwrap key using AES keywrap.
:param bytes wrapping_key: Loaded key with which to unwrap
:param bytes wrapped_key: Wrapped key to unwrap
:returns: Unwrapped key
:rtype: bytes
"""
if self.java_name not in ("AES", "AESWrap"):
raise NotImplementedError('"unwrap" is not supported by this cipher')
try:
return keywrap.aes_key_unwrap(wrapping_key=wrapping_key, wrapped_key=wrapped_key, backend=default_backend())
except Exception:
error_message = "Key unwrap failed"
_LOGGER.exception(error_message)
raise UnwrappingError(error_message) | python | def unwrap(self, wrapping_key, wrapped_key):
# type: (bytes, bytes) -> bytes
"""Unwrap key using AES keywrap.
:param bytes wrapping_key: Loaded key with which to unwrap
:param bytes wrapped_key: Wrapped key to unwrap
:returns: Unwrapped key
:rtype: bytes
"""
if self.java_name not in ("AES", "AESWrap"):
raise NotImplementedError('"unwrap" is not supported by this cipher')
try:
return keywrap.aes_key_unwrap(wrapping_key=wrapping_key, wrapped_key=wrapped_key, backend=default_backend())
except Exception:
error_message = "Key unwrap failed"
_LOGGER.exception(error_message)
raise UnwrappingError(error_message) | [
"def",
"unwrap",
"(",
"self",
",",
"wrapping_key",
",",
"wrapped_key",
")",
":",
"# type: (bytes, bytes) -> bytes",
"if",
"self",
".",
"java_name",
"not",
"in",
"(",
"\"AES\"",
",",
"\"AESWrap\"",
")",
":",
"raise",
"NotImplementedError",
"(",
"'\"unwrap\" is not ... | Unwrap key using AES keywrap.
:param bytes wrapping_key: Loaded key with which to unwrap
:param bytes wrapped_key: Wrapped key to unwrap
:returns: Unwrapped key
:rtype: bytes | [
"Unwrap",
"key",
"using",
"AES",
"keywrap",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/crypto/jce_bridge/primitives.py#L349-L366 | train | 31,093 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/structures.py | _validate_attribute_values_are_ddb_items | def _validate_attribute_values_are_ddb_items(instance, attribute, value): # pylint: disable=unused-argument
"""Validate that dictionary values in ``value`` match the structure of DynamoDB JSON
items.
.. note::
We are not trying to validate the full structure of the item with this validator.
This is just meant to verify that the values roughly match the correct format.
"""
for data in value.values():
if len(list(data.values())) != 1:
raise TypeError('"{}" values do not look like DynamoDB items'.format(attribute.name)) | python | def _validate_attribute_values_are_ddb_items(instance, attribute, value): # pylint: disable=unused-argument
"""Validate that dictionary values in ``value`` match the structure of DynamoDB JSON
items.
.. note::
We are not trying to validate the full structure of the item with this validator.
This is just meant to verify that the values roughly match the correct format.
"""
for data in value.values():
if len(list(data.values())) != 1:
raise TypeError('"{}" values do not look like DynamoDB items'.format(attribute.name)) | [
"def",
"_validate_attribute_values_are_ddb_items",
"(",
"instance",
",",
"attribute",
",",
"value",
")",
":",
"# pylint: disable=unused-argument",
"for",
"data",
"in",
"value",
".",
"values",
"(",
")",
":",
"if",
"len",
"(",
"list",
"(",
"data",
".",
"values",
... | Validate that dictionary values in ``value`` match the structure of DynamoDB JSON
items.
.. note::
We are not trying to validate the full structure of the item with this validator.
This is just meant to verify that the values roughly match the correct format. | [
"Validate",
"that",
"dictionary",
"values",
"in",
"value",
"match",
"the",
"structure",
"of",
"DynamoDB",
"JSON",
"items",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/structures.py#L35-L46 | train | 31,094 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/utils.py | validate_get_arguments | def validate_get_arguments(kwargs):
# type: (Dict[Text, Any]) -> None
"""Verify that attribute filtering parameters are not found in the request.
:raises InvalidArgumentError: if banned parameters are found
"""
for arg in ("AttributesToGet", "ProjectionExpression"):
if arg in kwargs:
raise InvalidArgumentError('"{}" is not supported for this operation'.format(arg))
if kwargs.get("Select", None) in ("SPECIFIC_ATTRIBUTES", "ALL_PROJECTED_ATTRIBUTES"):
raise InvalidArgumentError('Scan "Select" value of "{}" is not supported'.format(kwargs["Select"])) | python | def validate_get_arguments(kwargs):
# type: (Dict[Text, Any]) -> None
"""Verify that attribute filtering parameters are not found in the request.
:raises InvalidArgumentError: if banned parameters are found
"""
for arg in ("AttributesToGet", "ProjectionExpression"):
if arg in kwargs:
raise InvalidArgumentError('"{}" is not supported for this operation'.format(arg))
if kwargs.get("Select", None) in ("SPECIFIC_ATTRIBUTES", "ALL_PROJECTED_ATTRIBUTES"):
raise InvalidArgumentError('Scan "Select" value of "{}" is not supported'.format(kwargs["Select"])) | [
"def",
"validate_get_arguments",
"(",
"kwargs",
")",
":",
"# type: (Dict[Text, Any]) -> None",
"for",
"arg",
"in",
"(",
"\"AttributesToGet\"",
",",
"\"ProjectionExpression\"",
")",
":",
"if",
"arg",
"in",
"kwargs",
":",
"raise",
"InvalidArgumentError",
"(",
"'\"{}\" i... | Verify that attribute filtering parameters are not found in the request.
:raises InvalidArgumentError: if banned parameters are found | [
"Verify",
"that",
"attribute",
"filtering",
"parameters",
"are",
"not",
"found",
"in",
"the",
"request",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/utils.py#L97-L108 | train | 31,095 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/utils.py | crypto_config_from_kwargs | def crypto_config_from_kwargs(fallback, **kwargs):
"""Pull all encryption-specific parameters from the request and use them to build a crypto config.
:returns: crypto config and updated kwargs
:rtype: dynamodb_encryption_sdk.encrypted.CryptoConfig and dict
"""
try:
crypto_config = kwargs.pop("crypto_config")
except KeyError:
try:
fallback_kwargs = {"table_name": kwargs["TableName"]}
except KeyError:
fallback_kwargs = {}
crypto_config = fallback(**fallback_kwargs)
return crypto_config, kwargs | python | def crypto_config_from_kwargs(fallback, **kwargs):
"""Pull all encryption-specific parameters from the request and use them to build a crypto config.
:returns: crypto config and updated kwargs
:rtype: dynamodb_encryption_sdk.encrypted.CryptoConfig and dict
"""
try:
crypto_config = kwargs.pop("crypto_config")
except KeyError:
try:
fallback_kwargs = {"table_name": kwargs["TableName"]}
except KeyError:
fallback_kwargs = {}
crypto_config = fallback(**fallback_kwargs)
return crypto_config, kwargs | [
"def",
"crypto_config_from_kwargs",
"(",
"fallback",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"crypto_config",
"=",
"kwargs",
".",
"pop",
"(",
"\"crypto_config\"",
")",
"except",
"KeyError",
":",
"try",
":",
"fallback_kwargs",
"=",
"{",
"\"table_name\"",... | Pull all encryption-specific parameters from the request and use them to build a crypto config.
:returns: crypto config and updated kwargs
:rtype: dynamodb_encryption_sdk.encrypted.CryptoConfig and dict | [
"Pull",
"all",
"encryption",
"-",
"specific",
"parameters",
"from",
"the",
"request",
"and",
"use",
"them",
"to",
"build",
"a",
"crypto",
"config",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/utils.py#L111-L125 | train | 31,096 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/utils.py | crypto_config_from_table_info | def crypto_config_from_table_info(materials_provider, attribute_actions, table_info):
"""Build a crypto config from the provided values and table info.
:returns: crypto config and updated kwargs
:rtype: tuple(CryptoConfig, dict)
"""
ec_kwargs = table_info.encryption_context_values
if table_info.primary_index is not None:
ec_kwargs.update(
{"partition_key_name": table_info.primary_index.partition, "sort_key_name": table_info.primary_index.sort}
)
return CryptoConfig(
materials_provider=materials_provider,
encryption_context=EncryptionContext(**ec_kwargs),
attribute_actions=attribute_actions,
) | python | def crypto_config_from_table_info(materials_provider, attribute_actions, table_info):
"""Build a crypto config from the provided values and table info.
:returns: crypto config and updated kwargs
:rtype: tuple(CryptoConfig, dict)
"""
ec_kwargs = table_info.encryption_context_values
if table_info.primary_index is not None:
ec_kwargs.update(
{"partition_key_name": table_info.primary_index.partition, "sort_key_name": table_info.primary_index.sort}
)
return CryptoConfig(
materials_provider=materials_provider,
encryption_context=EncryptionContext(**ec_kwargs),
attribute_actions=attribute_actions,
) | [
"def",
"crypto_config_from_table_info",
"(",
"materials_provider",
",",
"attribute_actions",
",",
"table_info",
")",
":",
"ec_kwargs",
"=",
"table_info",
".",
"encryption_context_values",
"if",
"table_info",
".",
"primary_index",
"is",
"not",
"None",
":",
"ec_kwargs",
... | Build a crypto config from the provided values and table info.
:returns: crypto config and updated kwargs
:rtype: tuple(CryptoConfig, dict) | [
"Build",
"a",
"crypto",
"config",
"from",
"the",
"provided",
"values",
"and",
"table",
"info",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/utils.py#L128-L144 | train | 31,097 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/utils.py | crypto_config_from_cache | def crypto_config_from_cache(materials_provider, attribute_actions, table_info_cache, table_name):
"""Build a crypto config from the provided values, loading the table info from the provided cache.
:returns: crypto config and updated kwargs
:rtype: tuple(CryptoConfig, dict)
"""
table_info = table_info_cache.table_info(table_name)
attribute_actions = attribute_actions.copy()
attribute_actions.set_index_keys(*table_info.protected_index_keys())
return crypto_config_from_table_info(materials_provider, attribute_actions, table_info) | python | def crypto_config_from_cache(materials_provider, attribute_actions, table_info_cache, table_name):
"""Build a crypto config from the provided values, loading the table info from the provided cache.
:returns: crypto config and updated kwargs
:rtype: tuple(CryptoConfig, dict)
"""
table_info = table_info_cache.table_info(table_name)
attribute_actions = attribute_actions.copy()
attribute_actions.set_index_keys(*table_info.protected_index_keys())
return crypto_config_from_table_info(materials_provider, attribute_actions, table_info) | [
"def",
"crypto_config_from_cache",
"(",
"materials_provider",
",",
"attribute_actions",
",",
"table_info_cache",
",",
"table_name",
")",
":",
"table_info",
"=",
"table_info_cache",
".",
"table_info",
"(",
"table_name",
")",
"attribute_actions",
"=",
"attribute_actions",
... | Build a crypto config from the provided values, loading the table info from the provided cache.
:returns: crypto config and updated kwargs
:rtype: tuple(CryptoConfig, dict) | [
"Build",
"a",
"crypto",
"config",
"from",
"the",
"provided",
"values",
"loading",
"the",
"table",
"info",
"from",
"the",
"provided",
"cache",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/utils.py#L147-L158 | train | 31,098 |
aws/aws-dynamodb-encryption-python | src/dynamodb_encryption_sdk/internal/utils.py | decrypt_multi_get | def decrypt_multi_get(decrypt_method, crypto_config_method, read_method, **kwargs):
# type: (Callable, Callable, Callable, **Any) -> Dict
# TODO: narrow this down
"""Transparently decrypt multiple items after getting them from the table with a scan or query method.
:param callable decrypt_method: Method to use to decrypt items
:param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig`
:param callable read_method: Method that reads from the table
:param **kwargs: Keyword arguments to pass to ``read_method``
:return: DynamoDB response
:rtype: dict
"""
validate_get_arguments(kwargs)
crypto_config, ddb_kwargs = crypto_config_method(**kwargs)
response = read_method(**ddb_kwargs)
for pos in range(len(response["Items"])):
response["Items"][pos] = decrypt_method(
item=response["Items"][pos],
crypto_config=crypto_config.with_item(_item_transformer(decrypt_method)(response["Items"][pos])),
)
return response | python | def decrypt_multi_get(decrypt_method, crypto_config_method, read_method, **kwargs):
# type: (Callable, Callable, Callable, **Any) -> Dict
# TODO: narrow this down
"""Transparently decrypt multiple items after getting them from the table with a scan or query method.
:param callable decrypt_method: Method to use to decrypt items
:param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig`
:param callable read_method: Method that reads from the table
:param **kwargs: Keyword arguments to pass to ``read_method``
:return: DynamoDB response
:rtype: dict
"""
validate_get_arguments(kwargs)
crypto_config, ddb_kwargs = crypto_config_method(**kwargs)
response = read_method(**ddb_kwargs)
for pos in range(len(response["Items"])):
response["Items"][pos] = decrypt_method(
item=response["Items"][pos],
crypto_config=crypto_config.with_item(_item_transformer(decrypt_method)(response["Items"][pos])),
)
return response | [
"def",
"decrypt_multi_get",
"(",
"decrypt_method",
",",
"crypto_config_method",
",",
"read_method",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (Callable, Callable, Callable, **Any) -> Dict",
"# TODO: narrow this down",
"validate_get_arguments",
"(",
"kwargs",
")",
"crypto_co... | Transparently decrypt multiple items after getting them from the table with a scan or query method.
:param callable decrypt_method: Method to use to decrypt items
:param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig`
:param callable read_method: Method that reads from the table
:param **kwargs: Keyword arguments to pass to ``read_method``
:return: DynamoDB response
:rtype: dict | [
"Transparently",
"decrypt",
"multiple",
"items",
"after",
"getting",
"them",
"from",
"the",
"table",
"with",
"a",
"scan",
"or",
"query",
"method",
"."
] | 8de3bbe13df39c59b21bf431010f7acfcf629a2f | https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/utils.py#L174-L194 | train | 31,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.