repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
danielperna84/pyhomematic | pyhomematic/vccu.py | ServerThread.stop | def stop(self):
"""Shut down our XML-RPC server."""
LOG.info("Shutting down server")
self.server.shutdown()
LOG.debug("ServerThread.stop: Stopping ServerThread")
self.server.server_close()
LOG.info("Server stopped") | python | def stop(self):
"""Shut down our XML-RPC server."""
LOG.info("Shutting down server")
self.server.shutdown()
LOG.debug("ServerThread.stop: Stopping ServerThread")
self.server.server_close()
LOG.info("Server stopped") | [
"def",
"stop",
"(",
"self",
")",
":",
"LOG",
".",
"info",
"(",
"\"Shutting down server\"",
")",
"self",
".",
"server",
".",
"shutdown",
"(",
")",
"LOG",
".",
"debug",
"(",
"\"ServerThread.stop: Stopping ServerThread\"",
")",
"self",
".",
"server",
".",
"serv... | Shut down our XML-RPC server. | [
"Shut",
"down",
"our",
"XML",
"-",
"RPC",
"server",
"."
] | 8b91f3e84c83f05d289c740d507293a0d6759d8e | https://github.com/danielperna84/pyhomematic/blob/8b91f3e84c83f05d289c740d507293a0d6759d8e/pyhomematic/vccu.py#L132-L138 | train | 213,300 |
seomoz/shovel | shovel/help.py | heirarchical_help | def heirarchical_help(shovel, prefix):
'''Given a shovel of tasks, display a heirarchical list of the tasks'''
result = []
tuples = heirarchical_helper(shovel, prefix)
if not tuples:
return ''
# We need to figure out the longest fullname length
longest = max(len(name + ' ' * level) for name, _, level in tuples)
fmt = '%%%is => %%-50s' % longest
for name, docstring, level in tuples:
if docstring == None:
result.append(' ' * level + name + '/')
else:
docstring = re.sub(r'\s+', ' ', docstring).strip()
if len(docstring) > 50:
docstring = docstring[:47] + '...'
result.append(fmt % (name, docstring))
return '\n'.join(result) | python | def heirarchical_help(shovel, prefix):
'''Given a shovel of tasks, display a heirarchical list of the tasks'''
result = []
tuples = heirarchical_helper(shovel, prefix)
if not tuples:
return ''
# We need to figure out the longest fullname length
longest = max(len(name + ' ' * level) for name, _, level in tuples)
fmt = '%%%is => %%-50s' % longest
for name, docstring, level in tuples:
if docstring == None:
result.append(' ' * level + name + '/')
else:
docstring = re.sub(r'\s+', ' ', docstring).strip()
if len(docstring) > 50:
docstring = docstring[:47] + '...'
result.append(fmt % (name, docstring))
return '\n'.join(result) | [
"def",
"heirarchical_help",
"(",
"shovel",
",",
"prefix",
")",
":",
"result",
"=",
"[",
"]",
"tuples",
"=",
"heirarchical_helper",
"(",
"shovel",
",",
"prefix",
")",
"if",
"not",
"tuples",
":",
"return",
"''",
"# We need to figure out the longest fullname length",... | Given a shovel of tasks, display a heirarchical list of the tasks | [
"Given",
"a",
"shovel",
"of",
"tasks",
"display",
"a",
"heirarchical",
"list",
"of",
"the",
"tasks"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/help.py#L44-L62 | train | 213,301 |
seomoz/shovel | shovel/help.py | shovel_help | def shovel_help(shovel, *names):
'''Return a string about help with the tasks, or lists tasks available'''
# If names are provided, and the name refers to a group of tasks, print out
# the tasks and a brief docstring. Otherwise, just enumerate all the tasks
# available
if not len(names):
return heirarchical_help(shovel, '')
else:
for name in names:
task = shovel[name]
if isinstance(task, Shovel):
return heirarchical_help(task, name)
else:
return task.help() | python | def shovel_help(shovel, *names):
'''Return a string about help with the tasks, or lists tasks available'''
# If names are provided, and the name refers to a group of tasks, print out
# the tasks and a brief docstring. Otherwise, just enumerate all the tasks
# available
if not len(names):
return heirarchical_help(shovel, '')
else:
for name in names:
task = shovel[name]
if isinstance(task, Shovel):
return heirarchical_help(task, name)
else:
return task.help() | [
"def",
"shovel_help",
"(",
"shovel",
",",
"*",
"names",
")",
":",
"# If names are provided, and the name refers to a group of tasks, print out",
"# the tasks and a brief docstring. Otherwise, just enumerate all the tasks",
"# available",
"if",
"not",
"len",
"(",
"names",
")",
":"... | Return a string about help with the tasks, or lists tasks available | [
"Return",
"a",
"string",
"about",
"help",
"with",
"the",
"tasks",
"or",
"lists",
"tasks",
"available"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/help.py#L65-L78 | train | 213,302 |
seomoz/shovel | shovel/tasks.py | Shovel.load | def load(cls, path, base=None):
'''Either load a path and return a shovel object or return None'''
obj = cls()
obj.read(path, base)
return obj | python | def load(cls, path, base=None):
'''Either load a path and return a shovel object or return None'''
obj = cls()
obj.read(path, base)
return obj | [
"def",
"load",
"(",
"cls",
",",
"path",
",",
"base",
"=",
"None",
")",
":",
"obj",
"=",
"cls",
"(",
")",
"obj",
".",
"read",
"(",
"path",
",",
"base",
")",
"return",
"obj"
] | Either load a path and return a shovel object or return None | [
"Either",
"load",
"a",
"path",
"and",
"return",
"a",
"shovel",
"object",
"or",
"return",
"None"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L44-L48 | train | 213,303 |
seomoz/shovel | shovel/tasks.py | Shovel.extend | def extend(self, tasks):
'''Add tasks to this particular shovel'''
self._tasks.extend(tasks)
for task in tasks:
# We'll now go through all of our tasks and group them into
# sub-shovels
current = self.map
modules = task.fullname.split('.')
for module in modules[:-1]:
if not isinstance(current[module], Shovel):
logger.warn('Overriding task %s with a module' %
current[module].file)
shovel = Shovel()
shovel.overrides = current[module]
current[module] = shovel
current = current[module].map
# Now we'll put the task in this particular sub-shovel
name = modules[-1]
if name in current:
logger.warn('Overriding %s with %s' % (
'.'.join(modules), task.file))
task.overrides = current[name]
current[name] = task | python | def extend(self, tasks):
'''Add tasks to this particular shovel'''
self._tasks.extend(tasks)
for task in tasks:
# We'll now go through all of our tasks and group them into
# sub-shovels
current = self.map
modules = task.fullname.split('.')
for module in modules[:-1]:
if not isinstance(current[module], Shovel):
logger.warn('Overriding task %s with a module' %
current[module].file)
shovel = Shovel()
shovel.overrides = current[module]
current[module] = shovel
current = current[module].map
# Now we'll put the task in this particular sub-shovel
name = modules[-1]
if name in current:
logger.warn('Overriding %s with %s' % (
'.'.join(modules), task.file))
task.overrides = current[name]
current[name] = task | [
"def",
"extend",
"(",
"self",
",",
"tasks",
")",
":",
"self",
".",
"_tasks",
".",
"extend",
"(",
"tasks",
")",
"for",
"task",
"in",
"tasks",
":",
"# We'll now go through all of our tasks and group them into",
"# sub-shovels",
"current",
"=",
"self",
".",
"map",
... | Add tasks to this particular shovel | [
"Add",
"tasks",
"to",
"this",
"particular",
"shovel"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L56-L79 | train | 213,304 |
seomoz/shovel | shovel/tasks.py | Shovel.read | def read(self, path, base=None):
'''Import some tasks'''
if base == None:
base = os.getcwd()
absolute = os.path.abspath(path)
if os.path.isfile(absolute):
# Load that particular file
logger.info('Loading %s' % absolute)
self.extend(Task.load(path, base))
elif os.path.isdir(absolute):
# Walk this directory looking for tasks
tasks = []
for root, _, files in os.walk(absolute):
files = [f for f in files if f.endswith('.py')]
for child in files:
absolute = os.path.join(root, child)
logger.info('Loading %s' % absolute)
tasks.extend(Task.load(absolute, base))
self.extend(tasks) | python | def read(self, path, base=None):
'''Import some tasks'''
if base == None:
base = os.getcwd()
absolute = os.path.abspath(path)
if os.path.isfile(absolute):
# Load that particular file
logger.info('Loading %s' % absolute)
self.extend(Task.load(path, base))
elif os.path.isdir(absolute):
# Walk this directory looking for tasks
tasks = []
for root, _, files in os.walk(absolute):
files = [f for f in files if f.endswith('.py')]
for child in files:
absolute = os.path.join(root, child)
logger.info('Loading %s' % absolute)
tasks.extend(Task.load(absolute, base))
self.extend(tasks) | [
"def",
"read",
"(",
"self",
",",
"path",
",",
"base",
"=",
"None",
")",
":",
"if",
"base",
"==",
"None",
":",
"base",
"=",
"os",
".",
"getcwd",
"(",
")",
"absolute",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"if",
"os",
".",
"... | Import some tasks | [
"Import",
"some",
"tasks"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L81-L99 | train | 213,305 |
seomoz/shovel | shovel/tasks.py | Shovel.keys | def keys(self):
'''Return all valid keys'''
keys = []
for key, value in self.map.items():
if isinstance(value, Shovel):
keys.extend([key + '.' + k for k in value.keys()])
else:
keys.append(key)
return sorted(keys) | python | def keys(self):
'''Return all valid keys'''
keys = []
for key, value in self.map.items():
if isinstance(value, Shovel):
keys.extend([key + '.' + k for k in value.keys()])
else:
keys.append(key)
return sorted(keys) | [
"def",
"keys",
"(",
"self",
")",
":",
"keys",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"self",
".",
"map",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Shovel",
")",
":",
"keys",
".",
"extend",
"(",
"[",
"key",
... | Return all valid keys | [
"Return",
"all",
"valid",
"keys"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L119-L127 | train | 213,306 |
seomoz/shovel | shovel/tasks.py | Shovel.items | def items(self):
'''Return a list of tuples of all the keys and tasks'''
pairs = []
for key, value in self.map.items():
if isinstance(value, Shovel):
pairs.extend([(key + '.' + k, v) for k, v in value.items()])
else:
pairs.append((key, value))
return sorted(pairs) | python | def items(self):
'''Return a list of tuples of all the keys and tasks'''
pairs = []
for key, value in self.map.items():
if isinstance(value, Shovel):
pairs.extend([(key + '.' + k, v) for k, v in value.items()])
else:
pairs.append((key, value))
return sorted(pairs) | [
"def",
"items",
"(",
"self",
")",
":",
"pairs",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"self",
".",
"map",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Shovel",
")",
":",
"pairs",
".",
"extend",
"(",
"[",
"(",
... | Return a list of tuples of all the keys and tasks | [
"Return",
"a",
"list",
"of",
"tuples",
"of",
"all",
"the",
"keys",
"and",
"tasks"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L129-L137 | train | 213,307 |
seomoz/shovel | shovel/tasks.py | Shovel.tasks | def tasks(self, name):
'''Get all the tasks that match a name'''
found = self[name]
if isinstance(found, Shovel):
return [v for _, v in found.items()]
return [found] | python | def tasks(self, name):
'''Get all the tasks that match a name'''
found = self[name]
if isinstance(found, Shovel):
return [v for _, v in found.items()]
return [found] | [
"def",
"tasks",
"(",
"self",
",",
"name",
")",
":",
"found",
"=",
"self",
"[",
"name",
"]",
"if",
"isinstance",
"(",
"found",
",",
"Shovel",
")",
":",
"return",
"[",
"v",
"for",
"_",
",",
"v",
"in",
"found",
".",
"items",
"(",
")",
"]",
"return... | Get all the tasks that match a name | [
"Get",
"all",
"the",
"tasks",
"that",
"match",
"a",
"name"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L139-L144 | train | 213,308 |
seomoz/shovel | shovel/tasks.py | Task.make | def make(cls, obj):
'''Given a callable object, return a new callable object'''
try:
cls._cache.append(Task(obj))
except Exception:
logger.exception('Unable to make task for %s' % repr(obj)) | python | def make(cls, obj):
'''Given a callable object, return a new callable object'''
try:
cls._cache.append(Task(obj))
except Exception:
logger.exception('Unable to make task for %s' % repr(obj)) | [
"def",
"make",
"(",
"cls",
",",
"obj",
")",
":",
"try",
":",
"cls",
".",
"_cache",
".",
"append",
"(",
"Task",
"(",
"obj",
")",
")",
"except",
"Exception",
":",
"logger",
".",
"exception",
"(",
"'Unable to make task for %s'",
"%",
"repr",
"(",
"obj",
... | Given a callable object, return a new callable object | [
"Given",
"a",
"callable",
"object",
"return",
"a",
"new",
"callable",
"object"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L165-L170 | train | 213,309 |
seomoz/shovel | shovel/tasks.py | Task.load | def load(cls, path, base=None):
'''Return a list of the tasks stored in a file'''
base = base or os.getcwd()
absolute = os.path.abspath(path)
parent = os.path.dirname(absolute)
name, _, _ = os.path.basename(absolute).rpartition('.py')
fobj, path, description = imp.find_module(name, [parent])
try:
imp.load_module(name, fobj, path, description)
finally:
if fobj:
fobj.close()
# Manipulate the full names of the tasks to be relative to the provided
# base
relative, _, _ = os.path.relpath(path, base).rpartition('.py')
for task in cls._cache:
parts = relative.split(os.path.sep)
parts.append(task.name)
# If it's either in shovel.py, or folder/__init__.py, then we
# should consider it as being at one level above that file
parts = [part.strip('.') for part in parts if part not in
('shovel', '.shovel', '__init__', '.', '..', '')]
task.fullname = '.'.join(parts)
logger.debug('Found task %s in %s' % (task.fullname, task.module))
return cls.clear() | python | def load(cls, path, base=None):
'''Return a list of the tasks stored in a file'''
base = base or os.getcwd()
absolute = os.path.abspath(path)
parent = os.path.dirname(absolute)
name, _, _ = os.path.basename(absolute).rpartition('.py')
fobj, path, description = imp.find_module(name, [parent])
try:
imp.load_module(name, fobj, path, description)
finally:
if fobj:
fobj.close()
# Manipulate the full names of the tasks to be relative to the provided
# base
relative, _, _ = os.path.relpath(path, base).rpartition('.py')
for task in cls._cache:
parts = relative.split(os.path.sep)
parts.append(task.name)
# If it's either in shovel.py, or folder/__init__.py, then we
# should consider it as being at one level above that file
parts = [part.strip('.') for part in parts if part not in
('shovel', '.shovel', '__init__', '.', '..', '')]
task.fullname = '.'.join(parts)
logger.debug('Found task %s in %s' % (task.fullname, task.module))
return cls.clear() | [
"def",
"load",
"(",
"cls",
",",
"path",
",",
"base",
"=",
"None",
")",
":",
"base",
"=",
"base",
"or",
"os",
".",
"getcwd",
"(",
")",
"absolute",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"parent",
"=",
"os",
".",
"path",
".",
... | Return a list of the tasks stored in a file | [
"Return",
"a",
"list",
"of",
"the",
"tasks",
"stored",
"in",
"a",
"file"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L173-L197 | train | 213,310 |
seomoz/shovel | shovel/tasks.py | Task.capture | def capture(self, *args, **kwargs):
'''Run a task and return a dictionary with stderr, stdout and the
return value. Also, the traceback from the exception if there was
one'''
import traceback
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
stdout, stderr = sys.stdout, sys.stderr
sys.stdout = out = StringIO()
sys.stderr = err = StringIO()
result = {
'exception': None,
'stderr': None,
'stdout': None,
'return': None
}
try:
result['return'] = self.__call__(*args, **kwargs)
except Exception:
result['exception'] = traceback.format_exc()
sys.stdout, sys.stderr = stdout, stderr
result['stderr'] = err.getvalue()
result['stdout'] = out.getvalue()
return result | python | def capture(self, *args, **kwargs):
'''Run a task and return a dictionary with stderr, stdout and the
return value. Also, the traceback from the exception if there was
one'''
import traceback
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
stdout, stderr = sys.stdout, sys.stderr
sys.stdout = out = StringIO()
sys.stderr = err = StringIO()
result = {
'exception': None,
'stderr': None,
'stdout': None,
'return': None
}
try:
result['return'] = self.__call__(*args, **kwargs)
except Exception:
result['exception'] = traceback.format_exc()
sys.stdout, sys.stderr = stdout, stderr
result['stderr'] = err.getvalue()
result['stdout'] = out.getvalue()
return result | [
"def",
"capture",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"traceback",
"try",
":",
"from",
"StringIO",
"import",
"StringIO",
"except",
"ImportError",
":",
"from",
"io",
"import",
"StringIO",
"stdout",
",",
"stderr",
"=... | Run a task and return a dictionary with stderr, stdout and the
return value. Also, the traceback from the exception if there was
one | [
"Run",
"a",
"task",
"and",
"return",
"a",
"dictionary",
"with",
"stderr",
"stdout",
"and",
"the",
"return",
"value",
".",
"Also",
"the",
"traceback",
"from",
"the",
"exception",
"if",
"there",
"was",
"one"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L248-L273 | train | 213,311 |
seomoz/shovel | shovel/tasks.py | Task.dry | def dry(self, *args, **kwargs):
'''Perform a dry-run of the task'''
return 'Would have executed:\n%s%s' % (
self.name, Args(self.spec).explain(*args, **kwargs)) | python | def dry(self, *args, **kwargs):
'''Perform a dry-run of the task'''
return 'Would have executed:\n%s%s' % (
self.name, Args(self.spec).explain(*args, **kwargs)) | [
"def",
"dry",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"'Would have executed:\\n%s%s'",
"%",
"(",
"self",
".",
"name",
",",
"Args",
"(",
"self",
".",
"spec",
")",
".",
"explain",
"(",
"*",
"args",
",",
"*",
"*",
... | Perform a dry-run of the task | [
"Perform",
"a",
"dry",
"-",
"run",
"of",
"the",
"task"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L275-L278 | train | 213,312 |
seomoz/shovel | shovel/tasks.py | Task.help | def help(self):
'''Return the help string of the task'''
# This returns a help string for a given task of the form:
#
# ==================================================
# <name>
# ============================== (If supplied)
# <docstring>
# ============================== (If overrides other tasks)
# Overrides <other task file>
# ==============================
# From <file> on <line>
# ==============================
# <name>(Argspec)
result = [
'=' * 50,
self.name
]
# And the doc, if it exists
if self.doc:
result.extend([
'=' * 30,
self.doc
])
override = self.overrides
while override:
if isinstance(override, Shovel):
result.append('Overrides module')
else:
result.append('Overrides %s' % override.file)
override = override.overrides
# Print where we read this function in from
result.extend([
'=' * 30,
'From %s on line %i' % (self.file, self.line),
'=' * 30,
'%s%s' % (self.name, str(Args(self.spec)))
])
return os.linesep.join(result) | python | def help(self):
'''Return the help string of the task'''
# This returns a help string for a given task of the form:
#
# ==================================================
# <name>
# ============================== (If supplied)
# <docstring>
# ============================== (If overrides other tasks)
# Overrides <other task file>
# ==============================
# From <file> on <line>
# ==============================
# <name>(Argspec)
result = [
'=' * 50,
self.name
]
# And the doc, if it exists
if self.doc:
result.extend([
'=' * 30,
self.doc
])
override = self.overrides
while override:
if isinstance(override, Shovel):
result.append('Overrides module')
else:
result.append('Overrides %s' % override.file)
override = override.overrides
# Print where we read this function in from
result.extend([
'=' * 30,
'From %s on line %i' % (self.file, self.line),
'=' * 30,
'%s%s' % (self.name, str(Args(self.spec)))
])
return os.linesep.join(result) | [
"def",
"help",
"(",
"self",
")",
":",
"# This returns a help string for a given task of the form:",
"#",
"# ==================================================",
"# <name>",
"# ============================== (If supplied)",
"# <docstring>",
"# ============================== (If overrides othe... | Return the help string of the task | [
"Return",
"the",
"help",
"string",
"of",
"the",
"task"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L280-L321 | train | 213,313 |
seomoz/shovel | shovel/args.py | Args.explain | def explain(self, *args, **kwargs):
'''Return a string that describes how these args are interpreted'''
args = self.get(*args, **kwargs)
results = ['%s = %s' % (name, value) for name, value in args.required]
results.extend(['%s = %s (overridden)' % (
name, value) for name, value in args.overridden])
results.extend(['%s = %s (default)' % (
name, value) for name, value in args.defaulted])
if self._varargs:
results.append('%s = %s' % (self._varargs, args.varargs))
if self._kwargs:
results.append('%s = %s' % (self._kwargs, args.kwargs))
return '\n\t'.join(results) | python | def explain(self, *args, **kwargs):
'''Return a string that describes how these args are interpreted'''
args = self.get(*args, **kwargs)
results = ['%s = %s' % (name, value) for name, value in args.required]
results.extend(['%s = %s (overridden)' % (
name, value) for name, value in args.overridden])
results.extend(['%s = %s (default)' % (
name, value) for name, value in args.defaulted])
if self._varargs:
results.append('%s = %s' % (self._varargs, args.varargs))
if self._kwargs:
results.append('%s = %s' % (self._kwargs, args.kwargs))
return '\n\t'.join(results) | [
"def",
"explain",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"self",
".",
"get",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"results",
"=",
"[",
"'%s = %s'",
"%",
"(",
"name",
",",
"value",
")",
"for",
... | Return a string that describes how these args are interpreted | [
"Return",
"a",
"string",
"that",
"describes",
"how",
"these",
"args",
"are",
"interpreted"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/args.py#L69-L81 | train | 213,314 |
seomoz/shovel | shovel/args.py | Args.get | def get(self, *args, **kwargs):
'''Evaluate this argspec with the provided arguments'''
# We'll go through all of our required args and make sure they're
# present
required = [arg for arg in self._args if arg not in kwargs]
if len(args) < len(required):
raise TypeError('Missing arguments %s' % required[len(args):])
required = list(zip(required, args))
args = args[len(required):]
# Now we'll look through our defaults, if there are any
defaulted = [(name, default) for name, default in self._defaults
if name not in kwargs]
overridden = list(zip([d[0] for d in defaulted], args))
args = args[len(overridden):]
defaulted = defaulted[len(overridden):]
# And anything left over is in varargs
if args and not self._varargs:
raise TypeError('Too many arguments provided')
return ArgTuple(required, overridden, defaulted, args, kwargs) | python | def get(self, *args, **kwargs):
'''Evaluate this argspec with the provided arguments'''
# We'll go through all of our required args and make sure they're
# present
required = [arg for arg in self._args if arg not in kwargs]
if len(args) < len(required):
raise TypeError('Missing arguments %s' % required[len(args):])
required = list(zip(required, args))
args = args[len(required):]
# Now we'll look through our defaults, if there are any
defaulted = [(name, default) for name, default in self._defaults
if name not in kwargs]
overridden = list(zip([d[0] for d in defaulted], args))
args = args[len(overridden):]
defaulted = defaulted[len(overridden):]
# And anything left over is in varargs
if args and not self._varargs:
raise TypeError('Too many arguments provided')
return ArgTuple(required, overridden, defaulted, args, kwargs) | [
"def",
"get",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# We'll go through all of our required args and make sure they're",
"# present",
"required",
"=",
"[",
"arg",
"for",
"arg",
"in",
"self",
".",
"_args",
"if",
"arg",
"not",
"in",
... | Evaluate this argspec with the provided arguments | [
"Evaluate",
"this",
"argspec",
"with",
"the",
"provided",
"arguments"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/args.py#L83-L104 | train | 213,315 |
seomoz/shovel | shovel.py | sumnum | def sumnum(*args):
'''Computes the sum of the provided numbers'''
print('%s = %f' % (' + '.join(args), sum(float(arg) for arg in args))) | python | def sumnum(*args):
'''Computes the sum of the provided numbers'''
print('%s = %f' % (' + '.join(args), sum(float(arg) for arg in args))) | [
"def",
"sumnum",
"(",
"*",
"args",
")",
":",
"print",
"(",
"'%s = %f'",
"%",
"(",
"' + '",
".",
"join",
"(",
"args",
")",
",",
"sum",
"(",
"float",
"(",
"arg",
")",
"for",
"arg",
"in",
"args",
")",
")",
")"
] | Computes the sum of the provided numbers | [
"Computes",
"the",
"sum",
"of",
"the",
"provided",
"numbers"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel.py#L9-L11 | train | 213,316 |
seomoz/shovel | shovel.py | attributes | def attributes(name, **kwargs):
'''Prints a name, and all keyword attributes'''
print('%s has attributes:' % name)
for key, value in kwargs.items():
print('\t%s => %s' % (key, value)) | python | def attributes(name, **kwargs):
'''Prints a name, and all keyword attributes'''
print('%s has attributes:' % name)
for key, value in kwargs.items():
print('\t%s => %s' % (key, value)) | [
"def",
"attributes",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"print",
"(",
"'%s has attributes:'",
"%",
"name",
")",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"print",
"(",
"'\\t%s => %s'",
"%",
"(",
"key",
",",... | Prints a name, and all keyword attributes | [
"Prints",
"a",
"name",
"and",
"all",
"keyword",
"attributes"
] | fc29232b2b8be33972f8fb498a91a67e334f057f | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel.py#L14-L18 | train | 213,317 |
lincolnloop/django-dynamic-raw-id | dynamic_raw_id/widgets.py | DynamicRawIDWidget.render | def render(self, name, value, attrs=None, multi=False, renderer=None):
"""
Django <= 1.10 variant.
"""
DJANGO_111_OR_UP = (VERSION[0] == 1 and VERSION[1] >= 11) or (
VERSION[0] >= 2
)
if DJANGO_111_OR_UP:
return super(DynamicRawIDWidget, self).render(
name, value, attrs, renderer=renderer
)
if attrs is None:
attrs = {}
related_url = reverse(
'admin:{0}_{1}_changelist'.format(
self.rel.to._meta.app_label,
self.rel.to._meta.object_name.lower(),
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
url = u'?' + u'&'.join(
[u'{0}={1}'.format(k, v) for k, v in params.items()]
)
else:
url = u''
if "class" not in attrs:
attrs[
'class'
] = (
'vForeignKeyRawIdAdminField'
) # The JavaScript looks for this hook.
app_name = self.rel.to._meta.app_label.strip()
model_name = self.rel.to._meta.object_name.lower().strip()
hidden_input = super(widgets.ForeignKeyRawIdWidget, self).render(
name, value, attrs
)
extra_context = {
'hidden_input': hidden_input,
'name': name,
'app_name': app_name,
'model_name': model_name,
'related_url': related_url,
'url': url,
}
return render_to_string(
'dynamic_raw_id/admin/widgets/dynamic_raw_id_field.html',
extra_context,
) | python | def render(self, name, value, attrs=None, multi=False, renderer=None):
"""
Django <= 1.10 variant.
"""
DJANGO_111_OR_UP = (VERSION[0] == 1 and VERSION[1] >= 11) or (
VERSION[0] >= 2
)
if DJANGO_111_OR_UP:
return super(DynamicRawIDWidget, self).render(
name, value, attrs, renderer=renderer
)
if attrs is None:
attrs = {}
related_url = reverse(
'admin:{0}_{1}_changelist'.format(
self.rel.to._meta.app_label,
self.rel.to._meta.object_name.lower(),
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
url = u'?' + u'&'.join(
[u'{0}={1}'.format(k, v) for k, v in params.items()]
)
else:
url = u''
if "class" not in attrs:
attrs[
'class'
] = (
'vForeignKeyRawIdAdminField'
) # The JavaScript looks for this hook.
app_name = self.rel.to._meta.app_label.strip()
model_name = self.rel.to._meta.object_name.lower().strip()
hidden_input = super(widgets.ForeignKeyRawIdWidget, self).render(
name, value, attrs
)
extra_context = {
'hidden_input': hidden_input,
'name': name,
'app_name': app_name,
'model_name': model_name,
'related_url': related_url,
'url': url,
}
return render_to_string(
'dynamic_raw_id/admin/widgets/dynamic_raw_id_field.html',
extra_context,
) | [
"def",
"render",
"(",
"self",
",",
"name",
",",
"value",
",",
"attrs",
"=",
"None",
",",
"multi",
"=",
"False",
",",
"renderer",
"=",
"None",
")",
":",
"DJANGO_111_OR_UP",
"=",
"(",
"VERSION",
"[",
"0",
"]",
"==",
"1",
"and",
"VERSION",
"[",
"1",
... | Django <= 1.10 variant. | [
"Django",
"<",
"=",
"1",
".",
"10",
"variant",
"."
] | 4bf234f4a9d99daf44141205c0948222442f4957 | https://github.com/lincolnloop/django-dynamic-raw-id/blob/4bf234f4a9d99daf44141205c0948222442f4957/dynamic_raw_id/widgets.py#L23-L76 | train | 213,318 |
lincolnloop/django-dynamic-raw-id | dynamic_raw_id/widgets.py | DynamicRawIDWidget.get_context | def get_context(self, name, value, attrs):
"""
Django >= 1.11 variant.
"""
context = super(DynamicRawIDWidget, self).get_context(
name, value, attrs
)
model = self.rel.model if VERSION[0] == 2 else self.rel.to
related_url = reverse(
'admin:{0}_{1}_changelist'.format(
model._meta.app_label, model._meta.object_name.lower()
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
url = u'?' + u'&'.join(
[u'{0}={1}'.format(k, v) for k, v in params.items()]
)
else:
url = u''
if "class" not in attrs:
attrs[
'class'
] = (
'vForeignKeyRawIdAdminField'
) # The JavaScript looks for this hook.
app_name = model._meta.app_label.strip()
model_name = model._meta.object_name.lower().strip()
context.update(
{
'name': name,
'app_name': app_name,
'model_name': model_name,
'related_url': related_url,
'url': url,
}
)
return context | python | def get_context(self, name, value, attrs):
"""
Django >= 1.11 variant.
"""
context = super(DynamicRawIDWidget, self).get_context(
name, value, attrs
)
model = self.rel.model if VERSION[0] == 2 else self.rel.to
related_url = reverse(
'admin:{0}_{1}_changelist'.format(
model._meta.app_label, model._meta.object_name.lower()
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
url = u'?' + u'&'.join(
[u'{0}={1}'.format(k, v) for k, v in params.items()]
)
else:
url = u''
if "class" not in attrs:
attrs[
'class'
] = (
'vForeignKeyRawIdAdminField'
) # The JavaScript looks for this hook.
app_name = model._meta.app_label.strip()
model_name = model._meta.object_name.lower().strip()
context.update(
{
'name': name,
'app_name': app_name,
'model_name': model_name,
'related_url': related_url,
'url': url,
}
)
return context | [
"def",
"get_context",
"(",
"self",
",",
"name",
",",
"value",
",",
"attrs",
")",
":",
"context",
"=",
"super",
"(",
"DynamicRawIDWidget",
",",
"self",
")",
".",
"get_context",
"(",
"name",
",",
"value",
",",
"attrs",
")",
"model",
"=",
"self",
".",
"... | Django >= 1.11 variant. | [
"Django",
">",
"=",
"1",
".",
"11",
"variant",
"."
] | 4bf234f4a9d99daf44141205c0948222442f4957 | https://github.com/lincolnloop/django-dynamic-raw-id/blob/4bf234f4a9d99daf44141205c0948222442f4957/dynamic_raw_id/widgets.py#L78-L118 | train | 213,319 |
lincolnloop/django-dynamic-raw-id | dynamic_raw_id/filters.py | DynamicRawIDFilter.get_form | def get_form(self, request, rel, admin_site):
"""Return filter form."""
return DynamicRawIDFilterForm(
admin_site=admin_site,
rel=rel,
field_name=self.field_path,
data=self.used_parameters,
) | python | def get_form(self, request, rel, admin_site):
"""Return filter form."""
return DynamicRawIDFilterForm(
admin_site=admin_site,
rel=rel,
field_name=self.field_path,
data=self.used_parameters,
) | [
"def",
"get_form",
"(",
"self",
",",
"request",
",",
"rel",
",",
"admin_site",
")",
":",
"return",
"DynamicRawIDFilterForm",
"(",
"admin_site",
"=",
"admin_site",
",",
"rel",
"=",
"rel",
",",
"field_name",
"=",
"self",
".",
"field_path",
",",
"data",
"=",
... | Return filter form. | [
"Return",
"filter",
"form",
"."
] | 4bf234f4a9d99daf44141205c0948222442f4957 | https://github.com/lincolnloop/django-dynamic-raw-id/blob/4bf234f4a9d99daf44141205c0948222442f4957/dynamic_raw_id/filters.py#L48-L55 | train | 213,320 |
lincolnloop/django-dynamic-raw-id | dynamic_raw_id/filters.py | DynamicRawIDFilter.queryset | def queryset(self, request, queryset):
"""Filter queryset using params from the form."""
if self.form.is_valid():
# get no null params
filter_params = dict(
filter(lambda x: bool(x[1]), self.form.cleaned_data.items())
)
return queryset.filter(**filter_params)
return queryset | python | def queryset(self, request, queryset):
"""Filter queryset using params from the form."""
if self.form.is_valid():
# get no null params
filter_params = dict(
filter(lambda x: bool(x[1]), self.form.cleaned_data.items())
)
return queryset.filter(**filter_params)
return queryset | [
"def",
"queryset",
"(",
"self",
",",
"request",
",",
"queryset",
")",
":",
"if",
"self",
".",
"form",
".",
"is_valid",
"(",
")",
":",
"# get no null params",
"filter_params",
"=",
"dict",
"(",
"filter",
"(",
"lambda",
"x",
":",
"bool",
"(",
"x",
"[",
... | Filter queryset using params from the form. | [
"Filter",
"queryset",
"using",
"params",
"from",
"the",
"form",
"."
] | 4bf234f4a9d99daf44141205c0948222442f4957 | https://github.com/lincolnloop/django-dynamic-raw-id/blob/4bf234f4a9d99daf44141205c0948222442f4957/dynamic_raw_id/filters.py#L57-L65 | train | 213,321 |
joealcorn/laboratory | laboratory/experiment.py | Experiment.decorator | def decorator(cls, candidate, *exp_args, **exp_kwargs):
'''
Decorate a control function in order to conduct an experiment when called.
:param callable candidate: your candidate function
:param iterable exp_args: positional arguments passed to :class:`Experiment`
:param dict exp_kwargs: keyword arguments passed to :class:`Experiment`
Usage::
candidate_func = lambda: True
@Experiment.decorator(candidate_func)
def control_func():
return True
'''
def wrapper(control):
@wraps(control)
def inner(*args, **kwargs):
experiment = cls(*exp_args, **exp_kwargs)
experiment.control(control, args=args, kwargs=kwargs)
experiment.candidate(candidate, args=args, kwargs=kwargs)
return experiment.conduct()
return inner
return wrapper | python | def decorator(cls, candidate, *exp_args, **exp_kwargs):
'''
Decorate a control function in order to conduct an experiment when called.
:param callable candidate: your candidate function
:param iterable exp_args: positional arguments passed to :class:`Experiment`
:param dict exp_kwargs: keyword arguments passed to :class:`Experiment`
Usage::
candidate_func = lambda: True
@Experiment.decorator(candidate_func)
def control_func():
return True
'''
def wrapper(control):
@wraps(control)
def inner(*args, **kwargs):
experiment = cls(*exp_args, **exp_kwargs)
experiment.control(control, args=args, kwargs=kwargs)
experiment.candidate(candidate, args=args, kwargs=kwargs)
return experiment.conduct()
return inner
return wrapper | [
"def",
"decorator",
"(",
"cls",
",",
"candidate",
",",
"*",
"exp_args",
",",
"*",
"*",
"exp_kwargs",
")",
":",
"def",
"wrapper",
"(",
"control",
")",
":",
"@",
"wraps",
"(",
"control",
")",
"def",
"inner",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",... | Decorate a control function in order to conduct an experiment when called.
:param callable candidate: your candidate function
:param iterable exp_args: positional arguments passed to :class:`Experiment`
:param dict exp_kwargs: keyword arguments passed to :class:`Experiment`
Usage::
candidate_func = lambda: True
@Experiment.decorator(candidate_func)
def control_func():
return True | [
"Decorate",
"a",
"control",
"function",
"in",
"order",
"to",
"conduct",
"an",
"experiment",
"when",
"called",
"."
] | e7af560c69d9dbb8f8cf4ca93c3c03523f8fb83d | https://github.com/joealcorn/laboratory/blob/e7af560c69d9dbb8f8cf4ca93c3c03523f8fb83d/laboratory/experiment.py#L40-L65 | train | 213,322 |
joealcorn/laboratory | laboratory/experiment.py | Experiment.candidate | def candidate(self, cand_func, args=None, kwargs=None, name='Candidate', context=None):
'''
Adds a candidate function to an experiment. Can be used multiple times for
multiple candidates.
:param callable cand_func: your control function
:param iterable args: positional arguments to pass to your function
:param dict kwargs: keyword arguments to pass to your function
:param string name: a name for your observation
:param dict context: observation-specific context
'''
self._candidates.append({
'func': cand_func,
'args': args or [],
'kwargs': kwargs or {},
'name': name,
'context': context or {},
}) | python | def candidate(self, cand_func, args=None, kwargs=None, name='Candidate', context=None):
'''
Adds a candidate function to an experiment. Can be used multiple times for
multiple candidates.
:param callable cand_func: your control function
:param iterable args: positional arguments to pass to your function
:param dict kwargs: keyword arguments to pass to your function
:param string name: a name for your observation
:param dict context: observation-specific context
'''
self._candidates.append({
'func': cand_func,
'args': args or [],
'kwargs': kwargs or {},
'name': name,
'context': context or {},
}) | [
"def",
"candidate",
"(",
"self",
",",
"cand_func",
",",
"args",
"=",
"None",
",",
"kwargs",
"=",
"None",
",",
"name",
"=",
"'Candidate'",
",",
"context",
"=",
"None",
")",
":",
"self",
".",
"_candidates",
".",
"append",
"(",
"{",
"'func'",
":",
"cand... | Adds a candidate function to an experiment. Can be used multiple times for
multiple candidates.
:param callable cand_func: your control function
:param iterable args: positional arguments to pass to your function
:param dict kwargs: keyword arguments to pass to your function
:param string name: a name for your observation
:param dict context: observation-specific context | [
"Adds",
"a",
"candidate",
"function",
"to",
"an",
"experiment",
".",
"Can",
"be",
"used",
"multiple",
"times",
"for",
"multiple",
"candidates",
"."
] | e7af560c69d9dbb8f8cf4ca93c3c03523f8fb83d | https://github.com/joealcorn/laboratory/blob/e7af560c69d9dbb8f8cf4ca93c3c03523f8fb83d/laboratory/experiment.py#L92-L109 | train | 213,323 |
summa-tx/riemann | riemann/encoding/cashaddr.py | encode | def encode(data):
'''
bytes -> str
'''
if riemann.network.CASHADDR_PREFIX is None:
raise ValueError('Network {} does not support cashaddresses.'
.format(riemann.get_current_network_name()))
data = convertbits(data, 8, 5)
checksum = calculate_checksum(riemann.network.CASHADDR_PREFIX, data)
payload = b32encode(data + checksum)
form = '{prefix}:{payload}'
return form.format(
prefix=riemann.network.CASHADDR_PREFIX,
payload=payload) | python | def encode(data):
'''
bytes -> str
'''
if riemann.network.CASHADDR_PREFIX is None:
raise ValueError('Network {} does not support cashaddresses.'
.format(riemann.get_current_network_name()))
data = convertbits(data, 8, 5)
checksum = calculate_checksum(riemann.network.CASHADDR_PREFIX, data)
payload = b32encode(data + checksum)
form = '{prefix}:{payload}'
return form.format(
prefix=riemann.network.CASHADDR_PREFIX,
payload=payload) | [
"def",
"encode",
"(",
"data",
")",
":",
"if",
"riemann",
".",
"network",
".",
"CASHADDR_PREFIX",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Network {} does not support cashaddresses.'",
".",
"format",
"(",
"riemann",
".",
"get_current_network_name",
"(",
")"... | bytes -> str | [
"bytes",
"-",
">",
"str"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/cashaddr.py#L29-L45 | train | 213,324 |
summa-tx/riemann | riemann/encoding/cashaddr.py | decode | def decode(data):
'''
str -> bytes
'''
if riemann.network.CASHADDR_PREFIX is None:
raise ValueError('Network {} does not support cashaddresses.'
.format(riemann.get_current_network_name()))
if data.find(riemann.network.CASHADDR_PREFIX) != 0:
raise ValueError('Malformed cashaddr. Cannot locate prefix: {}'
.format(riemann.netowrk.CASHADDR_PREFIX))
# the data is everything after the colon
prefix, data = data.split(':')
decoded = b32decode(data)
if not verify_checksum(prefix, decoded):
raise ValueError('Bad cash address checksum')
converted = convertbits(decoded, 5, 8)
return bytes(converted[:-6]) | python | def decode(data):
'''
str -> bytes
'''
if riemann.network.CASHADDR_PREFIX is None:
raise ValueError('Network {} does not support cashaddresses.'
.format(riemann.get_current_network_name()))
if data.find(riemann.network.CASHADDR_PREFIX) != 0:
raise ValueError('Malformed cashaddr. Cannot locate prefix: {}'
.format(riemann.netowrk.CASHADDR_PREFIX))
# the data is everything after the colon
prefix, data = data.split(':')
decoded = b32decode(data)
if not verify_checksum(prefix, decoded):
raise ValueError('Bad cash address checksum')
converted = convertbits(decoded, 5, 8)
return bytes(converted[:-6]) | [
"def",
"decode",
"(",
"data",
")",
":",
"if",
"riemann",
".",
"network",
".",
"CASHADDR_PREFIX",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Network {} does not support cashaddresses.'",
".",
"format",
"(",
"riemann",
".",
"get_current_network_name",
"(",
")"... | str -> bytes | [
"str",
"-",
">",
"bytes"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/cashaddr.py#L48-L66 | train | 213,325 |
summa-tx/riemann | riemann/tx/tx.py | TxIn.copy | def copy(self, outpoint=None, stack_script=None,
redeem_script=None, sequence=None):
'''
TxIn -> TxIn
'''
return TxIn(
outpoint=outpoint if outpoint is not None else self.outpoint,
stack_script=(stack_script if stack_script is not None
else self.stack_script),
redeem_script=(redeem_script if redeem_script is not None
else self.redeem_script),
sequence=sequence if sequence is not None else self.sequence) | python | def copy(self, outpoint=None, stack_script=None,
redeem_script=None, sequence=None):
'''
TxIn -> TxIn
'''
return TxIn(
outpoint=outpoint if outpoint is not None else self.outpoint,
stack_script=(stack_script if stack_script is not None
else self.stack_script),
redeem_script=(redeem_script if redeem_script is not None
else self.redeem_script),
sequence=sequence if sequence is not None else self.sequence) | [
"def",
"copy",
"(",
"self",
",",
"outpoint",
"=",
"None",
",",
"stack_script",
"=",
"None",
",",
"redeem_script",
"=",
"None",
",",
"sequence",
"=",
"None",
")",
":",
"return",
"TxIn",
"(",
"outpoint",
"=",
"outpoint",
"if",
"outpoint",
"is",
"not",
"N... | TxIn -> TxIn | [
"TxIn",
"-",
">",
"TxIn"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx.py#L77-L88 | train | 213,326 |
summa-tx/riemann | riemann/tx/tx.py | TxIn.from_bytes | def from_bytes(TxIn, byte_string):
'''
byte_string -> TxIn
parses a TxIn from a byte-like object
'''
outpoint = Outpoint.from_bytes(byte_string[:36])
script_sig_len = VarInt.from_bytes(byte_string[36:45])
script_start = 36 + len(script_sig_len)
script_end = script_start + script_sig_len.number
script_sig = byte_string[script_start:script_end]
sequence = byte_string[script_end:script_end + 4]
if script_sig == b'':
stack_script = b''
redeem_script = b''
else:
stack_script, redeem_script = TxIn._parse_script_sig(script_sig)
return TxIn(
outpoint=outpoint,
stack_script=stack_script,
redeem_script=redeem_script,
sequence=sequence) | python | def from_bytes(TxIn, byte_string):
'''
byte_string -> TxIn
parses a TxIn from a byte-like object
'''
outpoint = Outpoint.from_bytes(byte_string[:36])
script_sig_len = VarInt.from_bytes(byte_string[36:45])
script_start = 36 + len(script_sig_len)
script_end = script_start + script_sig_len.number
script_sig = byte_string[script_start:script_end]
sequence = byte_string[script_end:script_end + 4]
if script_sig == b'':
stack_script = b''
redeem_script = b''
else:
stack_script, redeem_script = TxIn._parse_script_sig(script_sig)
return TxIn(
outpoint=outpoint,
stack_script=stack_script,
redeem_script=redeem_script,
sequence=sequence) | [
"def",
"from_bytes",
"(",
"TxIn",
",",
"byte_string",
")",
":",
"outpoint",
"=",
"Outpoint",
".",
"from_bytes",
"(",
"byte_string",
"[",
":",
"36",
"]",
")",
"script_sig_len",
"=",
"VarInt",
".",
"from_bytes",
"(",
"byte_string",
"[",
"36",
":",
"45",
"]... | byte_string -> TxIn
parses a TxIn from a byte-like object | [
"byte_string",
"-",
">",
"TxIn",
"parses",
"a",
"TxIn",
"from",
"a",
"byte",
"-",
"like",
"object"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx.py#L117-L139 | train | 213,327 |
summa-tx/riemann | riemann/tx/tx.py | Tx.no_witness | def no_witness(self):
'''
Tx -> bytes
'''
tx = bytes()
tx += self.version
tx += VarInt(len(self.tx_ins)).to_bytes()
for tx_in in self.tx_ins:
tx += tx_in.to_bytes()
tx += VarInt(len(self.tx_outs)).to_bytes()
for tx_out in self.tx_outs:
tx += tx_out.to_bytes()
tx += self.lock_time
return bytes(tx) | python | def no_witness(self):
'''
Tx -> bytes
'''
tx = bytes()
tx += self.version
tx += VarInt(len(self.tx_ins)).to_bytes()
for tx_in in self.tx_ins:
tx += tx_in.to_bytes()
tx += VarInt(len(self.tx_outs)).to_bytes()
for tx_out in self.tx_outs:
tx += tx_out.to_bytes()
tx += self.lock_time
return bytes(tx) | [
"def",
"no_witness",
"(",
"self",
")",
":",
"tx",
"=",
"bytes",
"(",
")",
"tx",
"+=",
"self",
".",
"version",
"tx",
"+=",
"VarInt",
"(",
"len",
"(",
"self",
".",
"tx_ins",
")",
")",
".",
"to_bytes",
"(",
")",
"for",
"tx_in",
"in",
"self",
".",
... | Tx -> bytes | [
"Tx",
"-",
">",
"bytes"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx.py#L393-L406 | train | 213,328 |
summa-tx/riemann | riemann/tx/tx.py | Tx._hash_sequence | def _hash_sequence(self, sighash_type, anyone_can_pay):
'''BIP143 hashSequence implementation
Args:
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
anyone_can_pay (bool): true if ANYONECANPAY should be set
Returns:
(bytes): the hashSequence, a 32 byte hash
'''
if anyone_can_pay or sighash_type == shared.SIGHASH_SINGLE:
# If any of ANYONECANPAY, SINGLE sighash type is set,
# hashSequence is a uint256 of 0x0000......0000.
return b'\x00' * 32
else:
# hashSequence is the double SHA256 of nSequence of all inputs;
sequences = ByteData()
for tx_in in self.tx_ins:
sequences += tx_in.sequence
return utils.hash256(sequences.to_bytes()) | python | def _hash_sequence(self, sighash_type, anyone_can_pay):
'''BIP143 hashSequence implementation
Args:
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
anyone_can_pay (bool): true if ANYONECANPAY should be set
Returns:
(bytes): the hashSequence, a 32 byte hash
'''
if anyone_can_pay or sighash_type == shared.SIGHASH_SINGLE:
# If any of ANYONECANPAY, SINGLE sighash type is set,
# hashSequence is a uint256 of 0x0000......0000.
return b'\x00' * 32
else:
# hashSequence is the double SHA256 of nSequence of all inputs;
sequences = ByteData()
for tx_in in self.tx_ins:
sequences += tx_in.sequence
return utils.hash256(sequences.to_bytes()) | [
"def",
"_hash_sequence",
"(",
"self",
",",
"sighash_type",
",",
"anyone_can_pay",
")",
":",
"if",
"anyone_can_pay",
"or",
"sighash_type",
"==",
"shared",
".",
"SIGHASH_SINGLE",
":",
"# If any of ANYONECANPAY, SINGLE sighash type is set,",
"# hashSequence is a uint256 of 0x000... | BIP143 hashSequence implementation
Args:
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
anyone_can_pay (bool): true if ANYONECANPAY should be set
Returns:
(bytes): the hashSequence, a 32 byte hash | [
"BIP143",
"hashSequence",
"implementation"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx.py#L629-L647 | train | 213,329 |
summa-tx/riemann | riemann/tx/tx.py | Tx._adjusted_script_code | def _adjusted_script_code(self, script):
'''
Checks if the script code pased in to the sighash function is already
length-prepended
This will break if there's a redeem script that's just a pushdata
That won't happen in practice
Args:
script (bytes): the spend script
Returns:
(bytes): the length-prepended script (if necessary)
'''
script_code = ByteData()
if script[0] == len(script) - 1:
return script
script_code += VarInt(len(script))
script_code += script
return script_code | python | def _adjusted_script_code(self, script):
'''
Checks if the script code pased in to the sighash function is already
length-prepended
This will break if there's a redeem script that's just a pushdata
That won't happen in practice
Args:
script (bytes): the spend script
Returns:
(bytes): the length-prepended script (if necessary)
'''
script_code = ByteData()
if script[0] == len(script) - 1:
return script
script_code += VarInt(len(script))
script_code += script
return script_code | [
"def",
"_adjusted_script_code",
"(",
"self",
",",
"script",
")",
":",
"script_code",
"=",
"ByteData",
"(",
")",
"if",
"script",
"[",
"0",
"]",
"==",
"len",
"(",
"script",
")",
"-",
"1",
":",
"return",
"script",
"script_code",
"+=",
"VarInt",
"(",
"len"... | Checks if the script code pased in to the sighash function is already
length-prepended
This will break if there's a redeem script that's just a pushdata
That won't happen in practice
Args:
script (bytes): the spend script
Returns:
(bytes): the length-prepended script (if necessary) | [
"Checks",
"if",
"the",
"script",
"code",
"pased",
"in",
"to",
"the",
"sighash",
"function",
"is",
"already",
"length",
"-",
"prepended",
"This",
"will",
"break",
"if",
"there",
"s",
"a",
"redeem",
"script",
"that",
"s",
"just",
"a",
"pushdata",
"That",
"... | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx.py#L649-L666 | train | 213,330 |
summa-tx/riemann | riemann/tx/tx.py | Tx._hash_outputs | def _hash_outputs(self, index, sighash_type):
'''BIP143 hashOutputs implementation
Args:
index (int): index of input being signed
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
Returns:
(bytes): the hashOutputs, a 32 byte hash
'''
if sighash_type == shared.SIGHASH_ALL:
# If the sighash type is ALL,
# hashOutputs is the double SHA256 of all output amounts
# paired up with their scriptPubKey;
outputs = ByteData()
for tx_out in self.tx_outs:
outputs += tx_out.to_bytes()
return utils.hash256(outputs.to_bytes())
elif (sighash_type == shared.SIGHASH_SINGLE
and index < len(self.tx_outs)):
# if sighash type is SINGLE
# and the input index is smaller than the number of outputs,
# hashOutputs is the double SHA256 of the output at the same index
return utils.hash256(self.tx_outs[index].to_bytes())
else:
# Otherwise, hashOutputs is a uint256 of 0x0000......0000
raise NotImplementedError(
'I refuse to implement the SIGHASH_SINGLE bug.') | python | def _hash_outputs(self, index, sighash_type):
'''BIP143 hashOutputs implementation
Args:
index (int): index of input being signed
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
Returns:
(bytes): the hashOutputs, a 32 byte hash
'''
if sighash_type == shared.SIGHASH_ALL:
# If the sighash type is ALL,
# hashOutputs is the double SHA256 of all output amounts
# paired up with their scriptPubKey;
outputs = ByteData()
for tx_out in self.tx_outs:
outputs += tx_out.to_bytes()
return utils.hash256(outputs.to_bytes())
elif (sighash_type == shared.SIGHASH_SINGLE
and index < len(self.tx_outs)):
# if sighash type is SINGLE
# and the input index is smaller than the number of outputs,
# hashOutputs is the double SHA256 of the output at the same index
return utils.hash256(self.tx_outs[index].to_bytes())
else:
# Otherwise, hashOutputs is a uint256 of 0x0000......0000
raise NotImplementedError(
'I refuse to implement the SIGHASH_SINGLE bug.') | [
"def",
"_hash_outputs",
"(",
"self",
",",
"index",
",",
"sighash_type",
")",
":",
"if",
"sighash_type",
"==",
"shared",
".",
"SIGHASH_ALL",
":",
"# If the sighash type is ALL,",
"# hashOutputs is the double SHA256 of all output amounts",
"# paired up with their scriptPubKey;",
... | BIP143 hashOutputs implementation
Args:
index (int): index of input being signed
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
Returns:
(bytes): the hashOutputs, a 32 byte hash | [
"BIP143",
"hashOutputs",
"implementation"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx.py#L668-L694 | train | 213,331 |
summa-tx/riemann | riemann/script/serialization.py | deserialize | def deserialize(serialized_script):
'''
bytearray -> str
'''
deserialized = []
i = 0
while i < len(serialized_script):
current_byte = serialized_script[i]
if current_byte == 0xab:
raise NotImplementedError('OP_CODESEPARATOR is a bad idea.')
if current_byte <= 75 and current_byte != 0:
deserialized.append(
serialized_script[i + 1: i + 1 + current_byte].hex())
i += 1 + current_byte
if i > len(serialized_script):
raise IndexError(
'Push {} caused out of bounds exception.'
.format(current_byte))
elif current_byte == 76:
# next hex blob length
blob_len = serialized_script[i + 1]
deserialized.append(
serialized_script[i + 2: i + 2 + blob_len].hex())
i += 2 + blob_len
elif current_byte == 77:
# next hex blob length
blob_len = utils.le2i(serialized_script[i + 1: i + 3])
deserialized.append(
serialized_script[i + 3: i + 3 + blob_len].hex())
i += 3 + blob_len
elif current_byte == 78:
raise NotImplementedError('OP_PUSHDATA4 is a bad idea.')
else:
if current_byte in riemann.network.INT_TO_CODE_OVERWRITE:
deserialized.append(
riemann.network.INT_TO_CODE_OVERWRITE[current_byte])
elif current_byte in INT_TO_CODE:
deserialized.append(INT_TO_CODE[current_byte])
else:
raise ValueError(
'Unsupported opcode. '
'Got 0x%x' % serialized_script[i])
i += 1
return ' '.join(deserialized) | python | def deserialize(serialized_script):
'''
bytearray -> str
'''
deserialized = []
i = 0
while i < len(serialized_script):
current_byte = serialized_script[i]
if current_byte == 0xab:
raise NotImplementedError('OP_CODESEPARATOR is a bad idea.')
if current_byte <= 75 and current_byte != 0:
deserialized.append(
serialized_script[i + 1: i + 1 + current_byte].hex())
i += 1 + current_byte
if i > len(serialized_script):
raise IndexError(
'Push {} caused out of bounds exception.'
.format(current_byte))
elif current_byte == 76:
# next hex blob length
blob_len = serialized_script[i + 1]
deserialized.append(
serialized_script[i + 2: i + 2 + blob_len].hex())
i += 2 + blob_len
elif current_byte == 77:
# next hex blob length
blob_len = utils.le2i(serialized_script[i + 1: i + 3])
deserialized.append(
serialized_script[i + 3: i + 3 + blob_len].hex())
i += 3 + blob_len
elif current_byte == 78:
raise NotImplementedError('OP_PUSHDATA4 is a bad idea.')
else:
if current_byte in riemann.network.INT_TO_CODE_OVERWRITE:
deserialized.append(
riemann.network.INT_TO_CODE_OVERWRITE[current_byte])
elif current_byte in INT_TO_CODE:
deserialized.append(INT_TO_CODE[current_byte])
else:
raise ValueError(
'Unsupported opcode. '
'Got 0x%x' % serialized_script[i])
i += 1
return ' '.join(deserialized) | [
"def",
"deserialize",
"(",
"serialized_script",
")",
":",
"deserialized",
"=",
"[",
"]",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"serialized_script",
")",
":",
"current_byte",
"=",
"serialized_script",
"[",
"i",
"]",
"if",
"current_byte",
"==",
"0xab... | bytearray -> str | [
"bytearray",
"-",
">",
"str"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/script/serialization.py#L59-L113 | train | 213,332 |
summa-tx/riemann | riemann/tx/sprout.py | SproutTx._hsig_input | def _hsig_input(self, index):
'''
inputs for the hsig hash
'''
hsig_input = z.ZcashByteData()
hsig_input += self.tx_joinsplits[index].random_seed
hsig_input += self.tx_joinsplits[index].nullifiers
hsig_input += self.joinsplit_pubkey
return hsig_input.to_bytes() | python | def _hsig_input(self, index):
'''
inputs for the hsig hash
'''
hsig_input = z.ZcashByteData()
hsig_input += self.tx_joinsplits[index].random_seed
hsig_input += self.tx_joinsplits[index].nullifiers
hsig_input += self.joinsplit_pubkey
return hsig_input.to_bytes() | [
"def",
"_hsig_input",
"(",
"self",
",",
"index",
")",
":",
"hsig_input",
"=",
"z",
".",
"ZcashByteData",
"(",
")",
"hsig_input",
"+=",
"self",
".",
"tx_joinsplits",
"[",
"index",
"]",
".",
"random_seed",
"hsig_input",
"+=",
"self",
".",
"tx_joinsplits",
"[... | inputs for the hsig hash | [
"inputs",
"for",
"the",
"hsig",
"hash"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/sprout.py#L112-L120 | train | 213,333 |
summa-tx/riemann | riemann/tx/sprout.py | SproutTx._primary_input | def _primary_input(self, index):
'''
Primary input for the zkproof
'''
primary_input = z.ZcashByteData()
primary_input += self.tx_joinsplits[index].anchor
primary_input += self.tx_joinsplits[index].nullifiers
primary_input += self.tx_joinsplits[index].commitments
primary_input += self.tx_joinsplits[index].vpub_old
primary_input += self.tx_joinsplits[index].vpub_new
primary_input += self.hsigs[index]
primary_input += self.tx_joinsplits[index].vmacs
return primary_input.to_bytes() | python | def _primary_input(self, index):
'''
Primary input for the zkproof
'''
primary_input = z.ZcashByteData()
primary_input += self.tx_joinsplits[index].anchor
primary_input += self.tx_joinsplits[index].nullifiers
primary_input += self.tx_joinsplits[index].commitments
primary_input += self.tx_joinsplits[index].vpub_old
primary_input += self.tx_joinsplits[index].vpub_new
primary_input += self.hsigs[index]
primary_input += self.tx_joinsplits[index].vmacs
return primary_input.to_bytes() | [
"def",
"_primary_input",
"(",
"self",
",",
"index",
")",
":",
"primary_input",
"=",
"z",
".",
"ZcashByteData",
"(",
")",
"primary_input",
"+=",
"self",
".",
"tx_joinsplits",
"[",
"index",
"]",
".",
"anchor",
"primary_input",
"+=",
"self",
".",
"tx_joinsplits... | Primary input for the zkproof | [
"Primary",
"input",
"for",
"the",
"zkproof"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/sprout.py#L122-L134 | train | 213,334 |
summa-tx/riemann | riemann/tx/sprout.py | SproutTx.from_bytes | def from_bytes(SproutTx, byte_string):
'''
byte-like -> SproutTx
'''
version = byte_string[0:4]
tx_ins = []
tx_ins_num = shared.VarInt.from_bytes(byte_string[4:])
current = 4 + len(tx_ins_num)
for _ in range(tx_ins_num.number):
tx_in = TxIn.from_bytes(byte_string[current:])
current += len(tx_in)
tx_ins.append(tx_in)
tx_outs = []
tx_outs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_outs_num.number):
tx_out = TxOut.from_bytes(byte_string[current:])
current += len(tx_out)
tx_outs.append(tx_out)
lock_time = byte_string[current:current + 4]
current += 4
tx_joinsplits = None
joinsplit_pubkey = None
joinsplit_sig = None
if utils.le2i(version) == 2: # If we expect joinsplits
tx_joinsplits = []
tx_joinsplits_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_joinsplits_num)
for _ in range(tx_joinsplits_num.number):
joinsplit = z.SproutJoinsplit.from_bytes(byte_string[current:])
current += len(joinsplit)
tx_joinsplits.append(joinsplit)
joinsplit_pubkey = byte_string[current:current + 32]
current += 32
joinsplit_sig = byte_string[current:current + 64]
return SproutTx(
version=version,
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
tx_joinsplits=tx_joinsplits,
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig) | python | def from_bytes(SproutTx, byte_string):
'''
byte-like -> SproutTx
'''
version = byte_string[0:4]
tx_ins = []
tx_ins_num = shared.VarInt.from_bytes(byte_string[4:])
current = 4 + len(tx_ins_num)
for _ in range(tx_ins_num.number):
tx_in = TxIn.from_bytes(byte_string[current:])
current += len(tx_in)
tx_ins.append(tx_in)
tx_outs = []
tx_outs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_outs_num.number):
tx_out = TxOut.from_bytes(byte_string[current:])
current += len(tx_out)
tx_outs.append(tx_out)
lock_time = byte_string[current:current + 4]
current += 4
tx_joinsplits = None
joinsplit_pubkey = None
joinsplit_sig = None
if utils.le2i(version) == 2: # If we expect joinsplits
tx_joinsplits = []
tx_joinsplits_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_joinsplits_num)
for _ in range(tx_joinsplits_num.number):
joinsplit = z.SproutJoinsplit.from_bytes(byte_string[current:])
current += len(joinsplit)
tx_joinsplits.append(joinsplit)
joinsplit_pubkey = byte_string[current:current + 32]
current += 32
joinsplit_sig = byte_string[current:current + 64]
return SproutTx(
version=version,
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
tx_joinsplits=tx_joinsplits,
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig) | [
"def",
"from_bytes",
"(",
"SproutTx",
",",
"byte_string",
")",
":",
"version",
"=",
"byte_string",
"[",
"0",
":",
"4",
"]",
"tx_ins",
"=",
"[",
"]",
"tx_ins_num",
"=",
"shared",
".",
"VarInt",
".",
"from_bytes",
"(",
"byte_string",
"[",
"4",
":",
"]",
... | byte-like -> SproutTx | [
"byte",
"-",
"like",
"-",
">",
"SproutTx"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/sprout.py#L137-L186 | train | 213,335 |
summa-tx/riemann | riemann/tx/sprout.py | SproutTx.copy | def copy(self, version=None, tx_ins=None, tx_outs=None, lock_time=None,
tx_joinsplits=None, joinsplit_pubkey=None, joinsplit_sig=None):
'''
SproutTx, ... -> Tx
Makes a copy. Allows over-writing specific pieces.
'''
return SproutTx(
version=version if version is not None else self.version,
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
tx_joinsplits=(tx_joinsplits if tx_joinsplits is not None
else self.tx_joinsplits),
joinsplit_pubkey=(joinsplit_pubkey if joinsplit_pubkey is not None
else self.joinsplit_pubkey),
joinsplit_sig=(joinsplit_sig if joinsplit_sig is not None
else self.joinsplit_sig)) | python | def copy(self, version=None, tx_ins=None, tx_outs=None, lock_time=None,
tx_joinsplits=None, joinsplit_pubkey=None, joinsplit_sig=None):
'''
SproutTx, ... -> Tx
Makes a copy. Allows over-writing specific pieces.
'''
return SproutTx(
version=version if version is not None else self.version,
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
tx_joinsplits=(tx_joinsplits if tx_joinsplits is not None
else self.tx_joinsplits),
joinsplit_pubkey=(joinsplit_pubkey if joinsplit_pubkey is not None
else self.joinsplit_pubkey),
joinsplit_sig=(joinsplit_sig if joinsplit_sig is not None
else self.joinsplit_sig)) | [
"def",
"copy",
"(",
"self",
",",
"version",
"=",
"None",
",",
"tx_ins",
"=",
"None",
",",
"tx_outs",
"=",
"None",
",",
"lock_time",
"=",
"None",
",",
"tx_joinsplits",
"=",
"None",
",",
"joinsplit_pubkey",
"=",
"None",
",",
"joinsplit_sig",
"=",
"None",
... | SproutTx, ... -> Tx
Makes a copy. Allows over-writing specific pieces. | [
"SproutTx",
"...",
"-",
">",
"Tx"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/sprout.py#L199-L217 | train | 213,336 |
summa-tx/riemann | riemann/tx/shared.py | ByteData._make_immutable | def _make_immutable(self):
'''
Prevents any future changes to the object
'''
self._bytes = bytes(self._bytes)
self.__immutable = True | python | def _make_immutable(self):
'''
Prevents any future changes to the object
'''
self._bytes = bytes(self._bytes)
self.__immutable = True | [
"def",
"_make_immutable",
"(",
"self",
")",
":",
"self",
".",
"_bytes",
"=",
"bytes",
"(",
"self",
".",
"_bytes",
")",
"self",
".",
"__immutable",
"=",
"True"
] | Prevents any future changes to the object | [
"Prevents",
"any",
"future",
"changes",
"to",
"the",
"object"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/shared.py#L99-L104 | train | 213,337 |
summa-tx/riemann | riemann/tx/shared.py | ByteData.find | def find(self, substring):
'''
byte-like -> int
Finds the index of substring
'''
if isinstance(substring, ByteData):
substring = substring.to_bytes()
return self._bytes.find(substring) | python | def find(self, substring):
'''
byte-like -> int
Finds the index of substring
'''
if isinstance(substring, ByteData):
substring = substring.to_bytes()
return self._bytes.find(substring) | [
"def",
"find",
"(",
"self",
",",
"substring",
")",
":",
"if",
"isinstance",
"(",
"substring",
",",
"ByteData",
")",
":",
"substring",
"=",
"substring",
".",
"to_bytes",
"(",
")",
"return",
"self",
".",
"_bytes",
".",
"find",
"(",
"substring",
")"
] | byte-like -> int
Finds the index of substring | [
"byte",
"-",
"like",
"-",
">",
"int",
"Finds",
"the",
"index",
"of",
"substring"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/shared.py#L106-L113 | train | 213,338 |
summa-tx/riemann | riemann/tx/shared.py | VarInt.from_bytes | def from_bytes(VarInt, byte_string):
'''
byte-like -> VarInt
accepts arbitrary length input, gets a VarInt off the front
'''
num = byte_string
if num[0] <= 0xfc:
num = num[0:1]
non_compact = False
elif num[0] == 0xfd:
num = num[1:3]
non_compact = (num[-1:] == b'\x00')
elif num[0] == 0xfe:
num = num[1:5]
non_compact = (num[-2:] == b'\x00\x00')
elif num[0] == 0xff:
num = num[1:9]
non_compact = (num[-4:] == b'\x00\x00\x00\x00')
if len(num) not in [1, 2, 4, 8]:
raise ValueError('Malformed VarInt. Got: {}'
.format(byte_string.hex()))
if (non_compact
and ('overwinter' in riemann.get_current_network_name()
or 'sapling' in riemann.get_current_network_name())):
raise ValueError('VarInt must be compact. Got: {}'
.format(byte_string.hex()))
ret = VarInt(
utils.le2i(num),
length=len(num) + 1 if non_compact else 0)
return ret | python | def from_bytes(VarInt, byte_string):
'''
byte-like -> VarInt
accepts arbitrary length input, gets a VarInt off the front
'''
num = byte_string
if num[0] <= 0xfc:
num = num[0:1]
non_compact = False
elif num[0] == 0xfd:
num = num[1:3]
non_compact = (num[-1:] == b'\x00')
elif num[0] == 0xfe:
num = num[1:5]
non_compact = (num[-2:] == b'\x00\x00')
elif num[0] == 0xff:
num = num[1:9]
non_compact = (num[-4:] == b'\x00\x00\x00\x00')
if len(num) not in [1, 2, 4, 8]:
raise ValueError('Malformed VarInt. Got: {}'
.format(byte_string.hex()))
if (non_compact
and ('overwinter' in riemann.get_current_network_name()
or 'sapling' in riemann.get_current_network_name())):
raise ValueError('VarInt must be compact. Got: {}'
.format(byte_string.hex()))
ret = VarInt(
utils.le2i(num),
length=len(num) + 1 if non_compact else 0)
return ret | [
"def",
"from_bytes",
"(",
"VarInt",
",",
"byte_string",
")",
":",
"num",
"=",
"byte_string",
"if",
"num",
"[",
"0",
"]",
"<=",
"0xfc",
":",
"num",
"=",
"num",
"[",
"0",
":",
"1",
"]",
"non_compact",
"=",
"False",
"elif",
"num",
"[",
"0",
"]",
"==... | byte-like -> VarInt
accepts arbitrary length input, gets a VarInt off the front | [
"byte",
"-",
"like",
"-",
">",
"VarInt",
"accepts",
"arbitrary",
"length",
"input",
"gets",
"a",
"VarInt",
"off",
"the",
"front"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/shared.py#L180-L212 | train | 213,339 |
summa-tx/riemann | riemann/tx/sapling.py | SaplingTx.copy | def copy(self, tx_ins=None, tx_outs=None, lock_time=None,
expiry_height=None, value_balance=None, tx_shielded_spends=None,
tx_shielded_outputs=None, tx_joinsplits=None,
joinsplit_pubkey=None, joinsplit_sig=None, binding_sig=None):
'''
SaplingTx, ... -> SaplingTx
Makes a copy. Allows over-writing specific pieces.
'''
return SaplingTx(
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
expiry_height=(expiry_height if expiry_height is not None
else self.expiry_height),
value_balance=(value_balance if value_balance is not None
else self.value_balance),
tx_shielded_spends=(
tx_shielded_spends if tx_shielded_spends is not None
else self.tx_shielded_spends),
tx_shielded_outputs=(
tx_shielded_outputs if tx_shielded_outputs is not None
else self.tx_shielded_outputs),
tx_joinsplits=(tx_joinsplits if tx_joinsplits is not None
else self.tx_joinsplits),
joinsplit_pubkey=(joinsplit_pubkey if joinsplit_pubkey is not None
else self.joinsplit_pubkey),
joinsplit_sig=(joinsplit_sig if joinsplit_sig is not None
else self.joinsplit_sig),
binding_sig=(binding_sig if binding_sig is not None
else self.binding_sig)) | python | def copy(self, tx_ins=None, tx_outs=None, lock_time=None,
expiry_height=None, value_balance=None, tx_shielded_spends=None,
tx_shielded_outputs=None, tx_joinsplits=None,
joinsplit_pubkey=None, joinsplit_sig=None, binding_sig=None):
'''
SaplingTx, ... -> SaplingTx
Makes a copy. Allows over-writing specific pieces.
'''
return SaplingTx(
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
expiry_height=(expiry_height if expiry_height is not None
else self.expiry_height),
value_balance=(value_balance if value_balance is not None
else self.value_balance),
tx_shielded_spends=(
tx_shielded_spends if tx_shielded_spends is not None
else self.tx_shielded_spends),
tx_shielded_outputs=(
tx_shielded_outputs if tx_shielded_outputs is not None
else self.tx_shielded_outputs),
tx_joinsplits=(tx_joinsplits if tx_joinsplits is not None
else self.tx_joinsplits),
joinsplit_pubkey=(joinsplit_pubkey if joinsplit_pubkey is not None
else self.joinsplit_pubkey),
joinsplit_sig=(joinsplit_sig if joinsplit_sig is not None
else self.joinsplit_sig),
binding_sig=(binding_sig if binding_sig is not None
else self.binding_sig)) | [
"def",
"copy",
"(",
"self",
",",
"tx_ins",
"=",
"None",
",",
"tx_outs",
"=",
"None",
",",
"lock_time",
"=",
"None",
",",
"expiry_height",
"=",
"None",
",",
"value_balance",
"=",
"None",
",",
"tx_shielded_spends",
"=",
"None",
",",
"tx_shielded_outputs",
"=... | SaplingTx, ... -> SaplingTx
Makes a copy. Allows over-writing specific pieces. | [
"SaplingTx",
"...",
"-",
">",
"SaplingTx"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/sapling.py#L353-L384 | train | 213,340 |
summa-tx/riemann | riemann/encoding/base58.py | encode | def encode(data, checksum=True):
"""Convert binary to base58 using BASE58_ALPHABET."""
if checksum:
data = data + utils.hash256(data)[:4]
v, prefix = to_long(256, lambda x: x, iter(data))
data = from_long(v, prefix, BASE58_BASE, lambda v: BASE58_ALPHABET[v])
return data.decode("utf8") | python | def encode(data, checksum=True):
"""Convert binary to base58 using BASE58_ALPHABET."""
if checksum:
data = data + utils.hash256(data)[:4]
v, prefix = to_long(256, lambda x: x, iter(data))
data = from_long(v, prefix, BASE58_BASE, lambda v: BASE58_ALPHABET[v])
return data.decode("utf8") | [
"def",
"encode",
"(",
"data",
",",
"checksum",
"=",
"True",
")",
":",
"if",
"checksum",
":",
"data",
"=",
"data",
"+",
"utils",
".",
"hash256",
"(",
"data",
")",
"[",
":",
"4",
"]",
"v",
",",
"prefix",
"=",
"to_long",
"(",
"256",
",",
"lambda",
... | Convert binary to base58 using BASE58_ALPHABET. | [
"Convert",
"binary",
"to",
"base58",
"using",
"BASE58_ALPHABET",
"."
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/base58.py#L32-L39 | train | 213,341 |
summa-tx/riemann | riemann/tx/tx_builder.py | make_pkh_output_script | def make_pkh_output_script(pubkey, witness=False):
'''
bytearray -> bytearray
'''
if witness and not riemann.network.SEGWIT:
raise ValueError(
'Network {} does not support witness scripts.'
.format(riemann.get_current_network_name()))
output_script = bytearray()
if type(pubkey) is not bytearray and type(pubkey) is not bytes:
raise ValueError('Unknown pubkey format. '
'Expected bytes. Got: {}'.format(type(pubkey)))
pubkey_hash = utils.hash160(pubkey)
if witness:
output_script.extend(riemann.network.P2WPKH_PREFIX)
output_script.extend(pubkey_hash)
else:
output_script.extend(b'\x76\xa9\x14') # OP_DUP OP_HASH160 PUSH14
output_script.extend(pubkey_hash)
output_script.extend(b'\x88\xac') # OP_EQUALVERIFY OP_CHECKSIG
return output_script | python | def make_pkh_output_script(pubkey, witness=False):
'''
bytearray -> bytearray
'''
if witness and not riemann.network.SEGWIT:
raise ValueError(
'Network {} does not support witness scripts.'
.format(riemann.get_current_network_name()))
output_script = bytearray()
if type(pubkey) is not bytearray and type(pubkey) is not bytes:
raise ValueError('Unknown pubkey format. '
'Expected bytes. Got: {}'.format(type(pubkey)))
pubkey_hash = utils.hash160(pubkey)
if witness:
output_script.extend(riemann.network.P2WPKH_PREFIX)
output_script.extend(pubkey_hash)
else:
output_script.extend(b'\x76\xa9\x14') # OP_DUP OP_HASH160 PUSH14
output_script.extend(pubkey_hash)
output_script.extend(b'\x88\xac') # OP_EQUALVERIFY OP_CHECKSIG
return output_script | [
"def",
"make_pkh_output_script",
"(",
"pubkey",
",",
"witness",
"=",
"False",
")",
":",
"if",
"witness",
"and",
"not",
"riemann",
".",
"network",
".",
"SEGWIT",
":",
"raise",
"ValueError",
"(",
"'Network {} does not support witness scripts.'",
".",
"format",
"(",
... | bytearray -> bytearray | [
"bytearray",
"-",
">",
"bytearray"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx_builder.py#L35-L59 | train | 213,342 |
summa-tx/riemann | riemann/tx/tx_builder.py | _make_output | def _make_output(value, output_script, version=None):
'''
byte-like, byte-like -> TxOut
'''
if 'decred' in riemann.get_current_network_name():
return tx.DecredTxOut(
value=value,
version=version,
output_script=output_script)
return tx.TxOut(value=value, output_script=output_script) | python | def _make_output(value, output_script, version=None):
'''
byte-like, byte-like -> TxOut
'''
if 'decred' in riemann.get_current_network_name():
return tx.DecredTxOut(
value=value,
version=version,
output_script=output_script)
return tx.TxOut(value=value, output_script=output_script) | [
"def",
"_make_output",
"(",
"value",
",",
"output_script",
",",
"version",
"=",
"None",
")",
":",
"if",
"'decred'",
"in",
"riemann",
".",
"get_current_network_name",
"(",
")",
":",
"return",
"tx",
".",
"DecredTxOut",
"(",
"value",
"=",
"value",
",",
"versi... | byte-like, byte-like -> TxOut | [
"byte",
"-",
"like",
"byte",
"-",
"like",
"-",
">",
"TxOut"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx_builder.py#L78-L87 | train | 213,343 |
summa-tx/riemann | riemann/tx/tx_builder.py | make_sh_output | def make_sh_output(value, output_script, witness=False):
'''
int, str -> TxOut
'''
return _make_output(
value=utils.i2le_padded(value, 8),
output_script=make_sh_output_script(output_script, witness)) | python | def make_sh_output(value, output_script, witness=False):
'''
int, str -> TxOut
'''
return _make_output(
value=utils.i2le_padded(value, 8),
output_script=make_sh_output_script(output_script, witness)) | [
"def",
"make_sh_output",
"(",
"value",
",",
"output_script",
",",
"witness",
"=",
"False",
")",
":",
"return",
"_make_output",
"(",
"value",
"=",
"utils",
".",
"i2le_padded",
"(",
"value",
",",
"8",
")",
",",
"output_script",
"=",
"make_sh_output_script",
"(... | int, str -> TxOut | [
"int",
"str",
"-",
">",
"TxOut"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx_builder.py#L90-L96 | train | 213,344 |
summa-tx/riemann | riemann/tx/tx_builder.py | make_pkh_output | def make_pkh_output(value, pubkey, witness=False):
'''
int, bytearray -> TxOut
'''
return _make_output(
value=utils.i2le_padded(value, 8),
output_script=make_pkh_output_script(pubkey, witness)) | python | def make_pkh_output(value, pubkey, witness=False):
'''
int, bytearray -> TxOut
'''
return _make_output(
value=utils.i2le_padded(value, 8),
output_script=make_pkh_output_script(pubkey, witness)) | [
"def",
"make_pkh_output",
"(",
"value",
",",
"pubkey",
",",
"witness",
"=",
"False",
")",
":",
"return",
"_make_output",
"(",
"value",
"=",
"utils",
".",
"i2le_padded",
"(",
"value",
",",
"8",
")",
",",
"output_script",
"=",
"make_pkh_output_script",
"(",
... | int, bytearray -> TxOut | [
"int",
"bytearray",
"-",
">",
"TxOut"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx_builder.py#L107-L113 | train | 213,345 |
summa-tx/riemann | riemann/tx/tx_builder.py | make_outpoint | def make_outpoint(tx_id_le, index, tree=None):
'''
byte-like, int, int -> Outpoint
'''
if 'decred' in riemann.get_current_network_name():
return tx.DecredOutpoint(tx_id=tx_id_le,
index=utils.i2le_padded(index, 4),
tree=utils.i2le_padded(tree, 1))
return tx.Outpoint(tx_id=tx_id_le,
index=utils.i2le_padded(index, 4)) | python | def make_outpoint(tx_id_le, index, tree=None):
'''
byte-like, int, int -> Outpoint
'''
if 'decred' in riemann.get_current_network_name():
return tx.DecredOutpoint(tx_id=tx_id_le,
index=utils.i2le_padded(index, 4),
tree=utils.i2le_padded(tree, 1))
return tx.Outpoint(tx_id=tx_id_le,
index=utils.i2le_padded(index, 4)) | [
"def",
"make_outpoint",
"(",
"tx_id_le",
",",
"index",
",",
"tree",
"=",
"None",
")",
":",
"if",
"'decred'",
"in",
"riemann",
".",
"get_current_network_name",
"(",
")",
":",
"return",
"tx",
".",
"DecredOutpoint",
"(",
"tx_id",
"=",
"tx_id_le",
",",
"index"... | byte-like, int, int -> Outpoint | [
"byte",
"-",
"like",
"int",
"int",
"-",
">",
"Outpoint"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx_builder.py#L180-L189 | train | 213,346 |
summa-tx/riemann | riemann/tx/tx_builder.py | make_script_sig | def make_script_sig(stack_script, redeem_script):
'''
str, str -> bytearray
'''
stack_script += ' {}'.format(
serialization.hex_serialize(redeem_script))
return serialization.serialize(stack_script) | python | def make_script_sig(stack_script, redeem_script):
'''
str, str -> bytearray
'''
stack_script += ' {}'.format(
serialization.hex_serialize(redeem_script))
return serialization.serialize(stack_script) | [
"def",
"make_script_sig",
"(",
"stack_script",
",",
"redeem_script",
")",
":",
"stack_script",
"+=",
"' {}'",
".",
"format",
"(",
"serialization",
".",
"hex_serialize",
"(",
"redeem_script",
")",
")",
"return",
"serialization",
".",
"serialize",
"(",
"stack_script... | str, str -> bytearray | [
"str",
"str",
"-",
">",
"bytearray"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx_builder.py#L192-L198 | train | 213,347 |
summa-tx/riemann | riemann/tx/tx_builder.py | make_legacy_input | def make_legacy_input(outpoint, stack_script, redeem_script, sequence):
'''
Outpoint, byte-like, byte-like, int -> TxIn
'''
if 'decred' in riemann.get_current_network_name():
return tx.DecredTxIn(
outpoint=outpoint,
sequence=utils.i2le_padded(sequence, 4))
return tx.TxIn(outpoint=outpoint,
stack_script=stack_script,
redeem_script=redeem_script,
sequence=utils.i2le_padded(sequence, 4)) | python | def make_legacy_input(outpoint, stack_script, redeem_script, sequence):
'''
Outpoint, byte-like, byte-like, int -> TxIn
'''
if 'decred' in riemann.get_current_network_name():
return tx.DecredTxIn(
outpoint=outpoint,
sequence=utils.i2le_padded(sequence, 4))
return tx.TxIn(outpoint=outpoint,
stack_script=stack_script,
redeem_script=redeem_script,
sequence=utils.i2le_padded(sequence, 4)) | [
"def",
"make_legacy_input",
"(",
"outpoint",
",",
"stack_script",
",",
"redeem_script",
",",
"sequence",
")",
":",
"if",
"'decred'",
"in",
"riemann",
".",
"get_current_network_name",
"(",
")",
":",
"return",
"tx",
".",
"DecredTxIn",
"(",
"outpoint",
"=",
"outp... | Outpoint, byte-like, byte-like, int -> TxIn | [
"Outpoint",
"byte",
"-",
"like",
"byte",
"-",
"like",
"int",
"-",
">",
"TxIn"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx_builder.py#L201-L212 | train | 213,348 |
summa-tx/riemann | riemann/tx/tx_builder.py | make_witness_input | def make_witness_input(outpoint, sequence):
'''
Outpoint, int -> TxIn
'''
if 'decred' in riemann.get_current_network_name():
return tx.DecredTxIn(
outpoint=outpoint,
sequence=utils.i2le_padded(sequence, 4))
return tx.TxIn(outpoint=outpoint,
stack_script=b'',
redeem_script=b'',
sequence=utils.i2le_padded(sequence, 4)) | python | def make_witness_input(outpoint, sequence):
'''
Outpoint, int -> TxIn
'''
if 'decred' in riemann.get_current_network_name():
return tx.DecredTxIn(
outpoint=outpoint,
sequence=utils.i2le_padded(sequence, 4))
return tx.TxIn(outpoint=outpoint,
stack_script=b'',
redeem_script=b'',
sequence=utils.i2le_padded(sequence, 4)) | [
"def",
"make_witness_input",
"(",
"outpoint",
",",
"sequence",
")",
":",
"if",
"'decred'",
"in",
"riemann",
".",
"get_current_network_name",
"(",
")",
":",
"return",
"tx",
".",
"DecredTxIn",
"(",
"outpoint",
"=",
"outpoint",
",",
"sequence",
"=",
"utils",
".... | Outpoint, int -> TxIn | [
"Outpoint",
"int",
"-",
">",
"TxIn"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx_builder.py#L227-L238 | train | 213,349 |
summa-tx/riemann | riemann/tx/tx_builder.py | length_prepend | def length_prepend(byte_string):
'''
bytes -> bytes
'''
length = tx.VarInt(len(byte_string))
return length.to_bytes() + byte_string | python | def length_prepend(byte_string):
'''
bytes -> bytes
'''
length = tx.VarInt(len(byte_string))
return length.to_bytes() + byte_string | [
"def",
"length_prepend",
"(",
"byte_string",
")",
":",
"length",
"=",
"tx",
".",
"VarInt",
"(",
"len",
"(",
"byte_string",
")",
")",
"return",
"length",
".",
"to_bytes",
"(",
")",
"+",
"byte_string"
] | bytes -> bytes | [
"bytes",
"-",
">",
"bytes"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx_builder.py#L322-L327 | train | 213,350 |
summa-tx/riemann | riemann/encoding/addresses.py | _hash_to_sh_address | def _hash_to_sh_address(script_hash, witness=False, cashaddr=True):
'''
bytes, bool, bool -> str
cashaddrs are preferred where possible
but cashaddr is ignored in most cases
is there a better way to structure this?
'''
addr_bytes = bytearray()
if riemann.network.CASHADDR_P2SH is not None and cashaddr:
addr_bytes.extend(riemann.network.CASHADDR_P2SH)
addr_bytes.extend(script_hash)
return riemann.network.CASHADDR_ENCODER.encode(addr_bytes)
if witness:
addr_bytes.extend(riemann.network.P2WSH_PREFIX)
addr_bytes.extend(script_hash)
return riemann.network.SEGWIT_ENCODER.encode(addr_bytes)
else:
addr_bytes.extend(riemann.network.P2SH_PREFIX)
addr_bytes.extend(script_hash)
return riemann.network.LEGACY_ENCODER.encode(addr_bytes) | python | def _hash_to_sh_address(script_hash, witness=False, cashaddr=True):
'''
bytes, bool, bool -> str
cashaddrs are preferred where possible
but cashaddr is ignored in most cases
is there a better way to structure this?
'''
addr_bytes = bytearray()
if riemann.network.CASHADDR_P2SH is not None and cashaddr:
addr_bytes.extend(riemann.network.CASHADDR_P2SH)
addr_bytes.extend(script_hash)
return riemann.network.CASHADDR_ENCODER.encode(addr_bytes)
if witness:
addr_bytes.extend(riemann.network.P2WSH_PREFIX)
addr_bytes.extend(script_hash)
return riemann.network.SEGWIT_ENCODER.encode(addr_bytes)
else:
addr_bytes.extend(riemann.network.P2SH_PREFIX)
addr_bytes.extend(script_hash)
return riemann.network.LEGACY_ENCODER.encode(addr_bytes) | [
"def",
"_hash_to_sh_address",
"(",
"script_hash",
",",
"witness",
"=",
"False",
",",
"cashaddr",
"=",
"True",
")",
":",
"addr_bytes",
"=",
"bytearray",
"(",
")",
"if",
"riemann",
".",
"network",
".",
"CASHADDR_P2SH",
"is",
"not",
"None",
"and",
"cashaddr",
... | bytes, bool, bool -> str
cashaddrs are preferred where possible
but cashaddr is ignored in most cases
is there a better way to structure this? | [
"bytes",
"bool",
"bool",
"-",
">",
"str",
"cashaddrs",
"are",
"preferred",
"where",
"possible",
"but",
"cashaddr",
"is",
"ignored",
"in",
"most",
"cases",
"is",
"there",
"a",
"better",
"way",
"to",
"structure",
"this?"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/addresses.py#L6-L25 | train | 213,351 |
summa-tx/riemann | riemann/encoding/addresses.py | _ser_script_to_sh_address | def _ser_script_to_sh_address(script_bytes, witness=False, cashaddr=True):
'''
makes an p2sh address from a serialized script
'''
if witness:
script_hash = utils.sha256(script_bytes)
else:
script_hash = utils.hash160(script_bytes)
return _hash_to_sh_address(
script_hash=script_hash,
witness=witness,
cashaddr=cashaddr) | python | def _ser_script_to_sh_address(script_bytes, witness=False, cashaddr=True):
'''
makes an p2sh address from a serialized script
'''
if witness:
script_hash = utils.sha256(script_bytes)
else:
script_hash = utils.hash160(script_bytes)
return _hash_to_sh_address(
script_hash=script_hash,
witness=witness,
cashaddr=cashaddr) | [
"def",
"_ser_script_to_sh_address",
"(",
"script_bytes",
",",
"witness",
"=",
"False",
",",
"cashaddr",
"=",
"True",
")",
":",
"if",
"witness",
":",
"script_hash",
"=",
"utils",
".",
"sha256",
"(",
"script_bytes",
")",
"else",
":",
"script_hash",
"=",
"utils... | makes an p2sh address from a serialized script | [
"makes",
"an",
"p2sh",
"address",
"from",
"a",
"serialized",
"script"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/addresses.py#L28-L39 | train | 213,352 |
summa-tx/riemann | riemann/encoding/addresses.py | make_sh_address | def make_sh_address(script_string, witness=False, cashaddr=True):
'''
str, bool, bool -> str
'''
script_bytes = script_ser.serialize(script_string)
return _ser_script_to_sh_address(
script_bytes=script_bytes,
witness=witness,
cashaddr=cashaddr) | python | def make_sh_address(script_string, witness=False, cashaddr=True):
'''
str, bool, bool -> str
'''
script_bytes = script_ser.serialize(script_string)
return _ser_script_to_sh_address(
script_bytes=script_bytes,
witness=witness,
cashaddr=cashaddr) | [
"def",
"make_sh_address",
"(",
"script_string",
",",
"witness",
"=",
"False",
",",
"cashaddr",
"=",
"True",
")",
":",
"script_bytes",
"=",
"script_ser",
".",
"serialize",
"(",
"script_string",
")",
"return",
"_ser_script_to_sh_address",
"(",
"script_bytes",
"=",
... | str, bool, bool -> str | [
"str",
"bool",
"bool",
"-",
">",
"str"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/addresses.py#L42-L51 | train | 213,353 |
summa-tx/riemann | riemann/encoding/addresses.py | to_output_script | def to_output_script(address):
'''
str -> bytes
There's probably a better way to do this
'''
parsed = parse(address)
parsed_hash = b''
try:
if (parsed.find(riemann.network.P2WPKH_PREFIX) == 0
and len(parsed) == 22):
return parsed
except TypeError:
pass
try:
if (parsed.find(riemann.network.P2WSH_PREFIX) == 0
and len(parsed) == 34):
return parsed
except TypeError:
pass
try:
if (parsed.find(riemann.network.CASHADDR_P2SH) == 0
and len(parsed) == len(riemann.network.CASHADDR_P2SH) + 20):
prefix = b'\xa9\x14' # OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2SH_PREFIX):]
suffix = b'\x87' # OP_EQUAL
except TypeError:
pass
try:
if (parsed.find(riemann.network.CASHADDR_P2PKH) == 0
and len(parsed) == len(riemann.network.CASHADDR_P2PKH) + 20):
prefix = b'\x76\xa9\x14' # OP_DUP OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2PKH_PREFIX):]
suffix = b'\x88\xac' # OP_EQUALVERIFY OP_CHECKSIG
except TypeError:
pass
if (parsed.find(riemann.network.P2PKH_PREFIX) == 0
and len(parsed) == len(riemann.network.P2PKH_PREFIX) + 20):
prefix = b'\x76\xa9\x14' # OP_DUP OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2PKH_PREFIX):]
suffix = b'\x88\xac' # OP_EQUALVERIFY OP_CHECKSIG
if (parsed.find(riemann.network.P2SH_PREFIX) == 0
and len(parsed) == len(riemann.network.P2SH_PREFIX) + 20):
prefix = b'\xa9\x14' # OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2SH_PREFIX):]
suffix = b'\x87' # OP_EQUAL
if parsed_hash == b'':
raise ValueError('Cannot parse output script from address.')
output_script = prefix + parsed_hash + suffix
return output_script | python | def to_output_script(address):
'''
str -> bytes
There's probably a better way to do this
'''
parsed = parse(address)
parsed_hash = b''
try:
if (parsed.find(riemann.network.P2WPKH_PREFIX) == 0
and len(parsed) == 22):
return parsed
except TypeError:
pass
try:
if (parsed.find(riemann.network.P2WSH_PREFIX) == 0
and len(parsed) == 34):
return parsed
except TypeError:
pass
try:
if (parsed.find(riemann.network.CASHADDR_P2SH) == 0
and len(parsed) == len(riemann.network.CASHADDR_P2SH) + 20):
prefix = b'\xa9\x14' # OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2SH_PREFIX):]
suffix = b'\x87' # OP_EQUAL
except TypeError:
pass
try:
if (parsed.find(riemann.network.CASHADDR_P2PKH) == 0
and len(parsed) == len(riemann.network.CASHADDR_P2PKH) + 20):
prefix = b'\x76\xa9\x14' # OP_DUP OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2PKH_PREFIX):]
suffix = b'\x88\xac' # OP_EQUALVERIFY OP_CHECKSIG
except TypeError:
pass
if (parsed.find(riemann.network.P2PKH_PREFIX) == 0
and len(parsed) == len(riemann.network.P2PKH_PREFIX) + 20):
prefix = b'\x76\xa9\x14' # OP_DUP OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2PKH_PREFIX):]
suffix = b'\x88\xac' # OP_EQUALVERIFY OP_CHECKSIG
if (parsed.find(riemann.network.P2SH_PREFIX) == 0
and len(parsed) == len(riemann.network.P2SH_PREFIX) + 20):
prefix = b'\xa9\x14' # OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2SH_PREFIX):]
suffix = b'\x87' # OP_EQUAL
if parsed_hash == b'':
raise ValueError('Cannot parse output script from address.')
output_script = prefix + parsed_hash + suffix
return output_script | [
"def",
"to_output_script",
"(",
"address",
")",
":",
"parsed",
"=",
"parse",
"(",
"address",
")",
"parsed_hash",
"=",
"b''",
"try",
":",
"if",
"(",
"parsed",
".",
"find",
"(",
"riemann",
".",
"network",
".",
"P2WPKH_PREFIX",
")",
"==",
"0",
"and",
"len... | str -> bytes
There's probably a better way to do this | [
"str",
"-",
">",
"bytes",
"There",
"s",
"probably",
"a",
"better",
"way",
"to",
"do",
"this"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/addresses.py#L131-L187 | train | 213,354 |
summa-tx/riemann | riemann/encoding/addresses.py | parse_hash | def parse_hash(address):
'''
str -> bytes
There's probably a better way to do this.
'''
raw = parse(address)
# Cash addresses
try:
if address.find(riemann.network.CASHADDR_PREFIX) == 0:
if raw.find(riemann.network.CASHADDR_P2SH) == 0:
return raw[len(riemann.network.CASHADDR_P2SH):]
if raw.find(riemann.network.CASHADDR_P2PKH) == 0:
return raw[len(riemann.network.CASHADDR_P2PKH):]
except TypeError:
pass
# Segwit addresses
try:
if address.find(riemann.network.BECH32_HRP) == 0:
if raw.find(riemann.network.P2WSH_PREFIX) == 0:
return raw[len(riemann.network.P2WSH_PREFIX):]
if raw.find(riemann.network.P2WPKH_PREFIX) == 0:
return raw[len(riemann.network.P2WPKH_PREFIX):]
except TypeError:
pass
# Legacy Addresses
if raw.find(riemann.network.P2SH_PREFIX) == 0:
return raw[len(riemann.network.P2SH_PREFIX):]
if raw.find(riemann.network.P2PKH_PREFIX) == 0:
return raw[len(riemann.network.P2PKH_PREFIX):] | python | def parse_hash(address):
'''
str -> bytes
There's probably a better way to do this.
'''
raw = parse(address)
# Cash addresses
try:
if address.find(riemann.network.CASHADDR_PREFIX) == 0:
if raw.find(riemann.network.CASHADDR_P2SH) == 0:
return raw[len(riemann.network.CASHADDR_P2SH):]
if raw.find(riemann.network.CASHADDR_P2PKH) == 0:
return raw[len(riemann.network.CASHADDR_P2PKH):]
except TypeError:
pass
# Segwit addresses
try:
if address.find(riemann.network.BECH32_HRP) == 0:
if raw.find(riemann.network.P2WSH_PREFIX) == 0:
return raw[len(riemann.network.P2WSH_PREFIX):]
if raw.find(riemann.network.P2WPKH_PREFIX) == 0:
return raw[len(riemann.network.P2WPKH_PREFIX):]
except TypeError:
pass
# Legacy Addresses
if raw.find(riemann.network.P2SH_PREFIX) == 0:
return raw[len(riemann.network.P2SH_PREFIX):]
if raw.find(riemann.network.P2PKH_PREFIX) == 0:
return raw[len(riemann.network.P2PKH_PREFIX):] | [
"def",
"parse_hash",
"(",
"address",
")",
":",
"raw",
"=",
"parse",
"(",
"address",
")",
"# Cash addresses",
"try",
":",
"if",
"address",
".",
"find",
"(",
"riemann",
".",
"network",
".",
"CASHADDR_PREFIX",
")",
"==",
"0",
":",
"if",
"raw",
".",
"find"... | str -> bytes
There's probably a better way to do this. | [
"str",
"-",
">",
"bytes",
"There",
"s",
"probably",
"a",
"better",
"way",
"to",
"do",
"this",
"."
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/addresses.py#L224-L256 | train | 213,355 |
summa-tx/riemann | riemann/simple.py | guess_version | def guess_version(redeem_script):
'''
str -> int
Bitcoin uses tx version 2 for nSequence signaling.
Zcash uses tx version 2 for joinsplits.
We want to signal nSequence if we're using OP_CSV.
Unless we're in zcash.
'''
n = riemann.get_current_network_name()
if 'sprout' in n:
return 1
if 'overwinter' in n:
return 3
if 'sapling' in n:
return 4
try:
script_array = redeem_script.split()
script_array.index('OP_CHECKSEQUENCEVERIFY')
return 2
except ValueError:
return 1 | python | def guess_version(redeem_script):
'''
str -> int
Bitcoin uses tx version 2 for nSequence signaling.
Zcash uses tx version 2 for joinsplits.
We want to signal nSequence if we're using OP_CSV.
Unless we're in zcash.
'''
n = riemann.get_current_network_name()
if 'sprout' in n:
return 1
if 'overwinter' in n:
return 3
if 'sapling' in n:
return 4
try:
script_array = redeem_script.split()
script_array.index('OP_CHECKSEQUENCEVERIFY')
return 2
except ValueError:
return 1 | [
"def",
"guess_version",
"(",
"redeem_script",
")",
":",
"n",
"=",
"riemann",
".",
"get_current_network_name",
"(",
")",
"if",
"'sprout'",
"in",
"n",
":",
"return",
"1",
"if",
"'overwinter'",
"in",
"n",
":",
"return",
"3",
"if",
"'sapling'",
"in",
"n",
":... | str -> int
Bitcoin uses tx version 2 for nSequence signaling.
Zcash uses tx version 2 for joinsplits.
We want to signal nSequence if we're using OP_CSV.
Unless we're in zcash. | [
"str",
"-",
">",
"int",
"Bitcoin",
"uses",
"tx",
"version",
"2",
"for",
"nSequence",
"signaling",
".",
"Zcash",
"uses",
"tx",
"version",
"2",
"for",
"joinsplits",
"."
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/simple.py#L8-L29 | train | 213,356 |
summa-tx/riemann | riemann/simple.py | guess_sequence | def guess_sequence(redeem_script):
'''
str -> int
If OP_CSV is used, guess an appropriate sequence
Otherwise, disable RBF, but leave lock_time on.
Fails if there's not a constant before OP_CSV
'''
try:
script_array = redeem_script.split()
loc = script_array.index('OP_CHECKSEQUENCEVERIFY')
return int(script_array[loc - 1], 16)
except ValueError:
return 0xFFFFFFFE | python | def guess_sequence(redeem_script):
'''
str -> int
If OP_CSV is used, guess an appropriate sequence
Otherwise, disable RBF, but leave lock_time on.
Fails if there's not a constant before OP_CSV
'''
try:
script_array = redeem_script.split()
loc = script_array.index('OP_CHECKSEQUENCEVERIFY')
return int(script_array[loc - 1], 16)
except ValueError:
return 0xFFFFFFFE | [
"def",
"guess_sequence",
"(",
"redeem_script",
")",
":",
"try",
":",
"script_array",
"=",
"redeem_script",
".",
"split",
"(",
")",
"loc",
"=",
"script_array",
".",
"index",
"(",
"'OP_CHECKSEQUENCEVERIFY'",
")",
"return",
"int",
"(",
"script_array",
"[",
"loc",... | str -> int
If OP_CSV is used, guess an appropriate sequence
Otherwise, disable RBF, but leave lock_time on.
Fails if there's not a constant before OP_CSV | [
"str",
"-",
">",
"int",
"If",
"OP_CSV",
"is",
"used",
"guess",
"an",
"appropriate",
"sequence",
"Otherwise",
"disable",
"RBF",
"but",
"leave",
"lock_time",
"on",
".",
"Fails",
"if",
"there",
"s",
"not",
"a",
"constant",
"before",
"OP_CSV"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/simple.py#L32-L44 | train | 213,357 |
summa-tx/riemann | riemann/simple.py | output | def output(value, address):
'''
int, str -> TxOut
accepts base58 or bech32 addresses
'''
script = addr.to_output_script(address)
value = utils.i2le_padded(value, 8)
return tb._make_output(value, script) | python | def output(value, address):
'''
int, str -> TxOut
accepts base58 or bech32 addresses
'''
script = addr.to_output_script(address)
value = utils.i2le_padded(value, 8)
return tb._make_output(value, script) | [
"def",
"output",
"(",
"value",
",",
"address",
")",
":",
"script",
"=",
"addr",
".",
"to_output_script",
"(",
"address",
")",
"value",
"=",
"utils",
".",
"i2le_padded",
"(",
"value",
",",
"8",
")",
"return",
"tb",
".",
"_make_output",
"(",
"value",
","... | int, str -> TxOut
accepts base58 or bech32 addresses | [
"int",
"str",
"-",
">",
"TxOut",
"accepts",
"base58",
"or",
"bech32",
"addresses"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/simple.py#L62-L69 | train | 213,358 |
summa-tx/riemann | riemann/simple.py | outpoint | def outpoint(tx_id, index, tree=None):
'''
hex_str, int, int -> Outpoint
accepts block explorer txid string
'''
tx_id_le = bytes.fromhex(tx_id)[::-1]
return tb.make_outpoint(tx_id_le, index, tree) | python | def outpoint(tx_id, index, tree=None):
'''
hex_str, int, int -> Outpoint
accepts block explorer txid string
'''
tx_id_le = bytes.fromhex(tx_id)[::-1]
return tb.make_outpoint(tx_id_le, index, tree) | [
"def",
"outpoint",
"(",
"tx_id",
",",
"index",
",",
"tree",
"=",
"None",
")",
":",
"tx_id_le",
"=",
"bytes",
".",
"fromhex",
"(",
"tx_id",
")",
"[",
":",
":",
"-",
"1",
"]",
"return",
"tb",
".",
"make_outpoint",
"(",
"tx_id_le",
",",
"index",
",",
... | hex_str, int, int -> Outpoint
accepts block explorer txid string | [
"hex_str",
"int",
"int",
"-",
">",
"Outpoint",
"accepts",
"block",
"explorer",
"txid",
"string"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/simple.py#L84-L90 | train | 213,359 |
summa-tx/riemann | riemann/simple.py | unsigned_input | def unsigned_input(outpoint, redeem_script=None, sequence=None):
'''
Outpoint, byte-like, int -> TxIn
'''
if redeem_script is not None and sequence is None:
sequence = guess_sequence(redeem_script)
if sequence is None:
sequence = 0xFFFFFFFE
return tb.make_legacy_input(
outpoint=outpoint,
stack_script=b'',
redeem_script=b'',
sequence=sequence) | python | def unsigned_input(outpoint, redeem_script=None, sequence=None):
'''
Outpoint, byte-like, int -> TxIn
'''
if redeem_script is not None and sequence is None:
sequence = guess_sequence(redeem_script)
if sequence is None:
sequence = 0xFFFFFFFE
return tb.make_legacy_input(
outpoint=outpoint,
stack_script=b'',
redeem_script=b'',
sequence=sequence) | [
"def",
"unsigned_input",
"(",
"outpoint",
",",
"redeem_script",
"=",
"None",
",",
"sequence",
"=",
"None",
")",
":",
"if",
"redeem_script",
"is",
"not",
"None",
"and",
"sequence",
"is",
"None",
":",
"sequence",
"=",
"guess_sequence",
"(",
"redeem_script",
")... | Outpoint, byte-like, int -> TxIn | [
"Outpoint",
"byte",
"-",
"like",
"int",
"-",
">",
"TxIn"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/simple.py#L104-L116 | train | 213,360 |
summa-tx/riemann | riemann/simple.py | p2pkh_input | def p2pkh_input(outpoint, sig, pubkey, sequence=0xFFFFFFFE):
'''
OutPoint, hex_string, hex_string, int -> TxIn
Create a signed legacy TxIn from a p2pkh prevout
'''
stack_script = '{sig} {pk}'.format(sig=sig, pk=pubkey)
stack_script = script_ser.serialize(stack_script)
return tb.make_legacy_input(outpoint, stack_script, b'', sequence) | python | def p2pkh_input(outpoint, sig, pubkey, sequence=0xFFFFFFFE):
'''
OutPoint, hex_string, hex_string, int -> TxIn
Create a signed legacy TxIn from a p2pkh prevout
'''
stack_script = '{sig} {pk}'.format(sig=sig, pk=pubkey)
stack_script = script_ser.serialize(stack_script)
return tb.make_legacy_input(outpoint, stack_script, b'', sequence) | [
"def",
"p2pkh_input",
"(",
"outpoint",
",",
"sig",
",",
"pubkey",
",",
"sequence",
"=",
"0xFFFFFFFE",
")",
":",
"stack_script",
"=",
"'{sig} {pk}'",
".",
"format",
"(",
"sig",
"=",
"sig",
",",
"pk",
"=",
"pubkey",
")",
"stack_script",
"=",
"script_ser",
... | OutPoint, hex_string, hex_string, int -> TxIn
Create a signed legacy TxIn from a p2pkh prevout | [
"OutPoint",
"hex_string",
"hex_string",
"int",
"-",
">",
"TxIn",
"Create",
"a",
"signed",
"legacy",
"TxIn",
"from",
"a",
"p2pkh",
"prevout"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/simple.py#L129-L136 | train | 213,361 |
summa-tx/riemann | riemann/simple.py | p2sh_input | def p2sh_input(outpoint, stack_script, redeem_script, sequence=None):
'''
OutPoint, str, str, int -> TxIn
Create a signed legacy TxIn from a p2pkh prevout
'''
if sequence is None:
sequence = guess_sequence(redeem_script)
stack_script = script_ser.serialize(stack_script)
redeem_script = script_ser.hex_serialize(redeem_script)
redeem_script = script_ser.serialize(redeem_script)
return tb.make_legacy_input(
outpoint=outpoint,
stack_script=stack_script,
redeem_script=redeem_script,
sequence=sequence) | python | def p2sh_input(outpoint, stack_script, redeem_script, sequence=None):
'''
OutPoint, str, str, int -> TxIn
Create a signed legacy TxIn from a p2pkh prevout
'''
if sequence is None:
sequence = guess_sequence(redeem_script)
stack_script = script_ser.serialize(stack_script)
redeem_script = script_ser.hex_serialize(redeem_script)
redeem_script = script_ser.serialize(redeem_script)
return tb.make_legacy_input(
outpoint=outpoint,
stack_script=stack_script,
redeem_script=redeem_script,
sequence=sequence) | [
"def",
"p2sh_input",
"(",
"outpoint",
",",
"stack_script",
",",
"redeem_script",
",",
"sequence",
"=",
"None",
")",
":",
"if",
"sequence",
"is",
"None",
":",
"sequence",
"=",
"guess_sequence",
"(",
"redeem_script",
")",
"stack_script",
"=",
"script_ser",
".",
... | OutPoint, str, str, int -> TxIn
Create a signed legacy TxIn from a p2pkh prevout | [
"OutPoint",
"str",
"str",
"int",
"-",
">",
"TxIn",
"Create",
"a",
"signed",
"legacy",
"TxIn",
"from",
"a",
"p2pkh",
"prevout"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/simple.py#L154-L170 | train | 213,362 |
summa-tx/riemann | riemann/tx/overwinter.py | OverwinterTx.copy | def copy(self, tx_ins=None, tx_outs=None, lock_time=None,
expiry_height=None, tx_joinsplits=None, joinsplit_pubkey=None,
joinsplit_sig=None):
'''
OverwinterTx, ... -> OverwinterTx
Makes a copy. Allows over-writing specific pieces.
'''
return OverwinterTx(
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
expiry_height=(expiry_height if expiry_height is not None
else self.expiry_height),
tx_joinsplits=(tx_joinsplits if tx_joinsplits is not None
else self.tx_joinsplits),
joinsplit_pubkey=(joinsplit_pubkey if joinsplit_pubkey is not None
else self.joinsplit_pubkey),
joinsplit_sig=(joinsplit_sig if joinsplit_sig is not None
else self.joinsplit_sig)) | python | def copy(self, tx_ins=None, tx_outs=None, lock_time=None,
expiry_height=None, tx_joinsplits=None, joinsplit_pubkey=None,
joinsplit_sig=None):
'''
OverwinterTx, ... -> OverwinterTx
Makes a copy. Allows over-writing specific pieces.
'''
return OverwinterTx(
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
expiry_height=(expiry_height if expiry_height is not None
else self.expiry_height),
tx_joinsplits=(tx_joinsplits if tx_joinsplits is not None
else self.tx_joinsplits),
joinsplit_pubkey=(joinsplit_pubkey if joinsplit_pubkey is not None
else self.joinsplit_pubkey),
joinsplit_sig=(joinsplit_sig if joinsplit_sig is not None
else self.joinsplit_sig)) | [
"def",
"copy",
"(",
"self",
",",
"tx_ins",
"=",
"None",
",",
"tx_outs",
"=",
"None",
",",
"lock_time",
"=",
"None",
",",
"expiry_height",
"=",
"None",
",",
"tx_joinsplits",
"=",
"None",
",",
"joinsplit_pubkey",
"=",
"None",
",",
"joinsplit_sig",
"=",
"No... | OverwinterTx, ... -> OverwinterTx
Makes a copy. Allows over-writing specific pieces. | [
"OverwinterTx",
"...",
"-",
">",
"OverwinterTx"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/overwinter.py#L120-L140 | train | 213,363 |
summa-tx/riemann | riemann/tx/overwinter.py | OverwinterTx.from_bytes | def from_bytes(OverwinterTx, byte_string):
'''
byte-like -> OverwinterTx
'''
header = byte_string[0:4]
group_id = byte_string[4:8]
if header != b'\x03\x00\x00\x80' or group_id != b'\x70\x82\xc4\x03':
raise ValueError(
'Bad header or group ID. Expected {} and {}. Got: {} and {}'
.format(b'\x03\x00\x00\x80'.hex(),
b'\x70\x82\xc4\x03'.hex(),
header.hex(),
group_id.hex()))
tx_ins = []
tx_ins_num = shared.VarInt.from_bytes(byte_string[8:])
current = 8 + len(tx_ins_num)
for _ in range(tx_ins_num.number):
tx_in = TxIn.from_bytes(byte_string[current:])
current += len(tx_in)
tx_ins.append(tx_in)
tx_outs = []
tx_outs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_outs_num.number):
tx_out = TxOut.from_bytes(byte_string[current:])
current += len(tx_out)
tx_outs.append(tx_out)
lock_time = byte_string[current:current + 4]
current += 4
expiry_height = byte_string[current:current + 4]
current += 4
if current == len(byte_string):
# No joinsplits
tx_joinsplits = tuple()
joinsplit_pubkey = None
joinsplit_sig = None
else:
tx_joinsplits = []
tx_joinsplits_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_joinsplits_num.number):
tx_joinsplit = z.SproutJoinsplit.from_bytes(
byte_string[current:])
current += len(tx_joinsplit)
tx_joinsplits.append(tx_joinsplit)
joinsplit_pubkey = byte_string[current:current + 32]
current += 32
joinsplit_sig = byte_string[current:current + 64]
return OverwinterTx(
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
expiry_height=expiry_height,
tx_joinsplits=tx_joinsplits,
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig) | python | def from_bytes(OverwinterTx, byte_string):
'''
byte-like -> OverwinterTx
'''
header = byte_string[0:4]
group_id = byte_string[4:8]
if header != b'\x03\x00\x00\x80' or group_id != b'\x70\x82\xc4\x03':
raise ValueError(
'Bad header or group ID. Expected {} and {}. Got: {} and {}'
.format(b'\x03\x00\x00\x80'.hex(),
b'\x70\x82\xc4\x03'.hex(),
header.hex(),
group_id.hex()))
tx_ins = []
tx_ins_num = shared.VarInt.from_bytes(byte_string[8:])
current = 8 + len(tx_ins_num)
for _ in range(tx_ins_num.number):
tx_in = TxIn.from_bytes(byte_string[current:])
current += len(tx_in)
tx_ins.append(tx_in)
tx_outs = []
tx_outs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_outs_num.number):
tx_out = TxOut.from_bytes(byte_string[current:])
current += len(tx_out)
tx_outs.append(tx_out)
lock_time = byte_string[current:current + 4]
current += 4
expiry_height = byte_string[current:current + 4]
current += 4
if current == len(byte_string):
# No joinsplits
tx_joinsplits = tuple()
joinsplit_pubkey = None
joinsplit_sig = None
else:
tx_joinsplits = []
tx_joinsplits_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_joinsplits_num.number):
tx_joinsplit = z.SproutJoinsplit.from_bytes(
byte_string[current:])
current += len(tx_joinsplit)
tx_joinsplits.append(tx_joinsplit)
joinsplit_pubkey = byte_string[current:current + 32]
current += 32
joinsplit_sig = byte_string[current:current + 64]
return OverwinterTx(
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
expiry_height=expiry_height,
tx_joinsplits=tx_joinsplits,
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig) | [
"def",
"from_bytes",
"(",
"OverwinterTx",
",",
"byte_string",
")",
":",
"header",
"=",
"byte_string",
"[",
"0",
":",
"4",
"]",
"group_id",
"=",
"byte_string",
"[",
"4",
":",
"8",
"]",
"if",
"header",
"!=",
"b'\\x03\\x00\\x00\\x80'",
"or",
"group_id",
"!=",... | byte-like -> OverwinterTx | [
"byte",
"-",
"like",
"-",
">",
"OverwinterTx"
] | 04ae336dfd4007ceaed748daadc91cc32fa278ec | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/overwinter.py#L173-L237 | train | 213,364 |
OrkoHunter/keep | keep/commands/cmd_init.py | cli | def cli(ctx):
"""Initializes the CLI environment."""
dir_path = os.path.join(os.path.expanduser('~'), '.keep')
if os.path.exists(dir_path):
if click.confirm('[CRITICAL] Remove everything inside ~/.keep ?', abort=True):
shutil.rmtree(dir_path)
utils.first_time_use(ctx) | python | def cli(ctx):
"""Initializes the CLI environment."""
dir_path = os.path.join(os.path.expanduser('~'), '.keep')
if os.path.exists(dir_path):
if click.confirm('[CRITICAL] Remove everything inside ~/.keep ?', abort=True):
shutil.rmtree(dir_path)
utils.first_time_use(ctx) | [
"def",
"cli",
"(",
"ctx",
")",
":",
"dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
",",
"'.keep'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dir_path",
")",
":",
"if",
"clic... | Initializes the CLI environment. | [
"Initializes",
"the",
"CLI",
"environment",
"."
] | 2253c60b4024c902115ae0472227059caee4a5eb | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/commands/cmd_init.py#L8-L14 | train | 213,365 |
OrkoHunter/keep | keep/commands/cmd_run.py | cli | def cli(ctx, pattern, arguments, safe):
"""Executes a saved command."""
matches = utils.grep_commands(pattern)
if matches:
selected = utils.select_command(matches)
if selected >= 0:
cmd, desc = matches[selected]
pcmd = utils.create_pcmd(cmd)
raw_params, params, defaults = utils.get_params_in_pcmd(pcmd)
arguments = list(arguments)
kargs = {}
for r, p, d in zip(raw_params, params, defaults):
if arguments:
val = arguments.pop(0)
click.echo("{}: {}".format(p, val))
kargs[r] = val
elif safe:
if d:
kargs[r] = d
else:
p_default = d if d else None
val = click.prompt("Enter value for '{}'".format(p), default=p_default)
kargs[r] = val
click.echo("\n")
final_cmd = utils.substitute_pcmd(pcmd, kargs, safe)
command = "$ {} :: {}".format(final_cmd, desc)
if click.confirm("Execute\n\t{}\n\n?".format(command), default=True):
os.system(final_cmd)
elif matches == []:
click.echo('No saved commands matches the pattern {}'.format(pattern))
else:
click.echo("No commands to run, Add one by 'keep new'. ") | python | def cli(ctx, pattern, arguments, safe):
"""Executes a saved command."""
matches = utils.grep_commands(pattern)
if matches:
selected = utils.select_command(matches)
if selected >= 0:
cmd, desc = matches[selected]
pcmd = utils.create_pcmd(cmd)
raw_params, params, defaults = utils.get_params_in_pcmd(pcmd)
arguments = list(arguments)
kargs = {}
for r, p, d in zip(raw_params, params, defaults):
if arguments:
val = arguments.pop(0)
click.echo("{}: {}".format(p, val))
kargs[r] = val
elif safe:
if d:
kargs[r] = d
else:
p_default = d if d else None
val = click.prompt("Enter value for '{}'".format(p), default=p_default)
kargs[r] = val
click.echo("\n")
final_cmd = utils.substitute_pcmd(pcmd, kargs, safe)
command = "$ {} :: {}".format(final_cmd, desc)
if click.confirm("Execute\n\t{}\n\n?".format(command), default=True):
os.system(final_cmd)
elif matches == []:
click.echo('No saved commands matches the pattern {}'.format(pattern))
else:
click.echo("No commands to run, Add one by 'keep new'. ") | [
"def",
"cli",
"(",
"ctx",
",",
"pattern",
",",
"arguments",
",",
"safe",
")",
":",
"matches",
"=",
"utils",
".",
"grep_commands",
"(",
"pattern",
")",
"if",
"matches",
":",
"selected",
"=",
"utils",
".",
"select_command",
"(",
"matches",
")",
"if",
"se... | Executes a saved command. | [
"Executes",
"a",
"saved",
"command",
"."
] | 2253c60b4024c902115ae0472227059caee4a5eb | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/commands/cmd_run.py#L12-L47 | train | 213,366 |
OrkoHunter/keep | keep/commands/cmd_new.py | cli | def cli(ctx):
"""Saves a new command"""
cmd = click.prompt('Command')
desc = click.prompt('Description ')
alias = click.prompt('Alias (optional)', default='')
utils.save_command(cmd, desc, alias)
utils.log(ctx, 'Saved the new command - {} - with the description - {}.'.format(cmd, desc)) | python | def cli(ctx):
"""Saves a new command"""
cmd = click.prompt('Command')
desc = click.prompt('Description ')
alias = click.prompt('Alias (optional)', default='')
utils.save_command(cmd, desc, alias)
utils.log(ctx, 'Saved the new command - {} - with the description - {}.'.format(cmd, desc)) | [
"def",
"cli",
"(",
"ctx",
")",
":",
"cmd",
"=",
"click",
".",
"prompt",
"(",
"'Command'",
")",
"desc",
"=",
"click",
".",
"prompt",
"(",
"'Description '",
")",
"alias",
"=",
"click",
".",
"prompt",
"(",
"'Alias (optional)'",
",",
"default",
"=",
"''",
... | Saves a new command | [
"Saves",
"a",
"new",
"command"
] | 2253c60b4024c902115ae0472227059caee4a5eb | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/commands/cmd_new.py#L6-L13 | train | 213,367 |
OrkoHunter/keep | keep/cli.py | Context.log | def log(self, msg, *args):
"""Logs a message to stderr."""
if args:
msg %= args
click.echo(msg, file=sys.stderr) | python | def log(self, msg, *args):
"""Logs a message to stderr."""
if args:
msg %= args
click.echo(msg, file=sys.stderr) | [
"def",
"log",
"(",
"self",
",",
"msg",
",",
"*",
"args",
")",
":",
"if",
"args",
":",
"msg",
"%=",
"args",
"click",
".",
"echo",
"(",
"msg",
",",
"file",
"=",
"sys",
".",
"stderr",
")"
] | Logs a message to stderr. | [
"Logs",
"a",
"message",
"to",
"stderr",
"."
] | 2253c60b4024c902115ae0472227059caee4a5eb | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/cli.py#L16-L20 | train | 213,368 |
OrkoHunter/keep | keep/cli.py | Context.vlog | def vlog(self, msg, *args):
"""Logs a message to stderr only if verbose is enabled."""
if self.verbose:
self.log(msg, *args) | python | def vlog(self, msg, *args):
"""Logs a message to stderr only if verbose is enabled."""
if self.verbose:
self.log(msg, *args) | [
"def",
"vlog",
"(",
"self",
",",
"msg",
",",
"*",
"args",
")",
":",
"if",
"self",
".",
"verbose",
":",
"self",
".",
"log",
"(",
"msg",
",",
"*",
"args",
")"
] | Logs a message to stderr only if verbose is enabled. | [
"Logs",
"a",
"message",
"to",
"stderr",
"only",
"if",
"verbose",
"is",
"enabled",
"."
] | 2253c60b4024c902115ae0472227059caee4a5eb | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/cli.py#L22-L25 | train | 213,369 |
OrkoHunter/keep | keep/commands/cmd_update.py | cli | def cli(ctx):
"""Check for an update of Keep."""
utils.check_update(ctx, forced=True)
click.secho("Keep is at its latest version v{}".format(about.__version__), fg='green') | python | def cli(ctx):
"""Check for an update of Keep."""
utils.check_update(ctx, forced=True)
click.secho("Keep is at its latest version v{}".format(about.__version__), fg='green') | [
"def",
"cli",
"(",
"ctx",
")",
":",
"utils",
".",
"check_update",
"(",
"ctx",
",",
"forced",
"=",
"True",
")",
"click",
".",
"secho",
"(",
"\"Keep is at its latest version v{}\"",
".",
"format",
"(",
"about",
".",
"__version__",
")",
",",
"fg",
"=",
"'gr... | Check for an update of Keep. | [
"Check",
"for",
"an",
"update",
"of",
"Keep",
"."
] | 2253c60b4024c902115ae0472227059caee4a5eb | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/commands/cmd_update.py#L6-L9 | train | 213,370 |
OrkoHunter/keep | keep/commands/cmd_grep.py | cli | def cli(ctx, pattern):
"""Searches for a saved command."""
matches = utils.grep_commands(pattern)
if matches:
for cmd, desc in matches:
click.secho("$ {} :: {}".format(cmd, desc), fg='green')
elif matches == []:
click.echo('No saved commands matches the pattern {}'.format(pattern))
else:
click.echo('No commands to show. Add one by `keep new`.') | python | def cli(ctx, pattern):
"""Searches for a saved command."""
matches = utils.grep_commands(pattern)
if matches:
for cmd, desc in matches:
click.secho("$ {} :: {}".format(cmd, desc), fg='green')
elif matches == []:
click.echo('No saved commands matches the pattern {}'.format(pattern))
else:
click.echo('No commands to show. Add one by `keep new`.') | [
"def",
"cli",
"(",
"ctx",
",",
"pattern",
")",
":",
"matches",
"=",
"utils",
".",
"grep_commands",
"(",
"pattern",
")",
"if",
"matches",
":",
"for",
"cmd",
",",
"desc",
"in",
"matches",
":",
"click",
".",
"secho",
"(",
"\"$ {} :: {}\"",
".",
"format",
... | Searches for a saved command. | [
"Searches",
"for",
"a",
"saved",
"command",
"."
] | 2253c60b4024c902115ae0472227059caee4a5eb | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/commands/cmd_grep.py#L8-L17 | train | 213,371 |
OrkoHunter/keep | keep/commands/cmd_list.py | cli | def cli(ctx):
"""Shows the saved commands."""
json_path = os.path.join(os.path.expanduser('~'), '.keep', 'commands.json')
if not os.path.exists(json_path):
click.echo('No commands to show. Add one by `keep new`.')
else:
utils.list_commands(ctx) | python | def cli(ctx):
"""Shows the saved commands."""
json_path = os.path.join(os.path.expanduser('~'), '.keep', 'commands.json')
if not os.path.exists(json_path):
click.echo('No commands to show. Add one by `keep new`.')
else:
utils.list_commands(ctx) | [
"def",
"cli",
"(",
"ctx",
")",
":",
"json_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
",",
"'.keep'",
",",
"'commands.json'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"js... | Shows the saved commands. | [
"Shows",
"the",
"saved",
"commands",
"."
] | 2253c60b4024c902115ae0472227059caee4a5eb | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/commands/cmd_list.py#L8-L14 | train | 213,372 |
OrkoHunter/keep | keep/legacy_commands/removed_cmd_pull.py | cli | def cli(ctx, overwrite):
"""Updates the local database with remote."""
credentials_path = os.path.join(os.path.expanduser('~'), '.keep', '.credentials')
if not os.path.exists(credentials_path):
click.echo('You are not registered.')
utils.register()
else:
utils.pull(ctx, overwrite) | python | def cli(ctx, overwrite):
"""Updates the local database with remote."""
credentials_path = os.path.join(os.path.expanduser('~'), '.keep', '.credentials')
if not os.path.exists(credentials_path):
click.echo('You are not registered.')
utils.register()
else:
utils.pull(ctx, overwrite) | [
"def",
"cli",
"(",
"ctx",
",",
"overwrite",
")",
":",
"credentials_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
",",
"'.keep'",
",",
"'.credentials'",
")",
"if",
"not",
"os",
".",
"path",
"... | Updates the local database with remote. | [
"Updates",
"the",
"local",
"database",
"with",
"remote",
"."
] | 2253c60b4024c902115ae0472227059caee4a5eb | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/legacy_commands/removed_cmd_pull.py#L9-L16 | train | 213,373 |
OrkoHunter/keep | keep/legacy_commands/removed_cmd_register.py | cli | def cli(ctx):
"""Register user over server."""
dir_path = os.path.join(os.path.expanduser('~'), '.keep', '.credentials')
if os.path.exists(dir_path):
if click.confirm('[CRITICAL] Reset credentials saved in ~/.keep/.credentials ?', abort=True):
os.remove(dir_path)
utils.register() | python | def cli(ctx):
"""Register user over server."""
dir_path = os.path.join(os.path.expanduser('~'), '.keep', '.credentials')
if os.path.exists(dir_path):
if click.confirm('[CRITICAL] Reset credentials saved in ~/.keep/.credentials ?', abort=True):
os.remove(dir_path)
utils.register() | [
"def",
"cli",
"(",
"ctx",
")",
":",
"dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
",",
"'.keep'",
",",
"'.credentials'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dir_path",
... | Register user over server. | [
"Register",
"user",
"over",
"server",
"."
] | 2253c60b4024c902115ae0472227059caee4a5eb | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/legacy_commands/removed_cmd_register.py#L7-L13 | train | 213,374 |
OrkoHunter/keep | keep/utils.py | check_update | def check_update(ctx, forced=False):
"""
Check for update on pypi. Limit to 1 check per day if not forced
"""
try:
if ctx.update_checked and not forced:
return
except AttributeError:
update_check_file = os.path.join(dir_path, 'update_check.txt')
today = datetime.date.today().strftime("%m/%d/%Y")
if os.path.exists(update_check_file):
date = open(update_check_file, 'r').read()
else:
date = []
if forced or today != date:
ctx.update_checked = True
date = today
with open(update_check_file, 'w') as f:
f.write(date)
r = requests.get("https://pypi.org/pypi/keep/json").json()
version = r['info']['version']
curr_version = about.__version__
if version > curr_version:
click.secho("Keep seems to be outdated. Current version = "
"{}, Latest version = {}".format(curr_version, version) +
"\n\nPlease update with ", bold=True, fg='red')
click.secho("\tpip3 --no-cache-dir install -U keep==" + str(version), fg='green')
click.secho("\n\n") | python | def check_update(ctx, forced=False):
"""
Check for update on pypi. Limit to 1 check per day if not forced
"""
try:
if ctx.update_checked and not forced:
return
except AttributeError:
update_check_file = os.path.join(dir_path, 'update_check.txt')
today = datetime.date.today().strftime("%m/%d/%Y")
if os.path.exists(update_check_file):
date = open(update_check_file, 'r').read()
else:
date = []
if forced or today != date:
ctx.update_checked = True
date = today
with open(update_check_file, 'w') as f:
f.write(date)
r = requests.get("https://pypi.org/pypi/keep/json").json()
version = r['info']['version']
curr_version = about.__version__
if version > curr_version:
click.secho("Keep seems to be outdated. Current version = "
"{}, Latest version = {}".format(curr_version, version) +
"\n\nPlease update with ", bold=True, fg='red')
click.secho("\tpip3 --no-cache-dir install -U keep==" + str(version), fg='green')
click.secho("\n\n") | [
"def",
"check_update",
"(",
"ctx",
",",
"forced",
"=",
"False",
")",
":",
"try",
":",
"if",
"ctx",
".",
"update_checked",
"and",
"not",
"forced",
":",
"return",
"except",
"AttributeError",
":",
"update_check_file",
"=",
"os",
".",
"path",
".",
"join",
"(... | Check for update on pypi. Limit to 1 check per day if not forced | [
"Check",
"for",
"update",
"on",
"pypi",
".",
"Limit",
"to",
"1",
"check",
"per",
"day",
"if",
"not",
"forced"
] | 2253c60b4024c902115ae0472227059caee4a5eb | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/utils.py#L23-L50 | train | 213,375 |
OrkoHunter/keep | keep/commands/cmd_edit.py | cli | def cli(ctx, editor):
"""Edit saved commands."""
commands = utils.read_commands()
if commands is []:
click.echo("No commands to edit, Add one by 'keep new'. ")
else:
edit_header = "# Unchanged file will abort the operation\n"
new_commands = utils.edit_commands(commands, editor, edit_header)
if new_commands and new_commands != commands:
click.echo("Replace:\n")
click.secho("\t{}".format('\n\t'.join(utils.format_commands(commands))),
fg="green")
click.echo("With:\n\t")
click.secho("\t{}".format('\n\t'.join(utils.format_commands(new_commands))),
fg="green")
if click.confirm("", default=False):
utils.write_commands(new_commands) | python | def cli(ctx, editor):
"""Edit saved commands."""
commands = utils.read_commands()
if commands is []:
click.echo("No commands to edit, Add one by 'keep new'. ")
else:
edit_header = "# Unchanged file will abort the operation\n"
new_commands = utils.edit_commands(commands, editor, edit_header)
if new_commands and new_commands != commands:
click.echo("Replace:\n")
click.secho("\t{}".format('\n\t'.join(utils.format_commands(commands))),
fg="green")
click.echo("With:\n\t")
click.secho("\t{}".format('\n\t'.join(utils.format_commands(new_commands))),
fg="green")
if click.confirm("", default=False):
utils.write_commands(new_commands) | [
"def",
"cli",
"(",
"ctx",
",",
"editor",
")",
":",
"commands",
"=",
"utils",
".",
"read_commands",
"(",
")",
"if",
"commands",
"is",
"[",
"]",
":",
"click",
".",
"echo",
"(",
"\"No commands to edit, Add one by 'keep new'. \"",
")",
"else",
":",
"edit_header"... | Edit saved commands. | [
"Edit",
"saved",
"commands",
"."
] | 2253c60b4024c902115ae0472227059caee4a5eb | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/commands/cmd_edit.py#L8-L25 | train | 213,376 |
OrkoHunter/keep | keep/commands/cmd_rm.py | cli | def cli(ctx, pattern):
"""Deletes a saved command."""
matches = utils.grep_commands(pattern)
if matches:
selected = utils.select_command(matches)
if selected >= 0:
cmd, desc = matches[selected]
command = "$ {} :: {}".format(cmd, desc)
if click.confirm("Remove\n\t{}\n\n?".format(command), default=True):
utils.remove_command(cmd)
click.echo('Command successfully removed!')
elif matches == []:
click.echo('No saved commands matches the pattern {}'.format(pattern))
else:
click.echo("No commands to remove, Add one by 'keep new'. ") | python | def cli(ctx, pattern):
"""Deletes a saved command."""
matches = utils.grep_commands(pattern)
if matches:
selected = utils.select_command(matches)
if selected >= 0:
cmd, desc = matches[selected]
command = "$ {} :: {}".format(cmd, desc)
if click.confirm("Remove\n\t{}\n\n?".format(command), default=True):
utils.remove_command(cmd)
click.echo('Command successfully removed!')
elif matches == []:
click.echo('No saved commands matches the pattern {}'.format(pattern))
else:
click.echo("No commands to remove, Add one by 'keep new'. ") | [
"def",
"cli",
"(",
"ctx",
",",
"pattern",
")",
":",
"matches",
"=",
"utils",
".",
"grep_commands",
"(",
"pattern",
")",
"if",
"matches",
":",
"selected",
"=",
"utils",
".",
"select_command",
"(",
"matches",
")",
"if",
"selected",
">=",
"0",
":",
"cmd",... | Deletes a saved command. | [
"Deletes",
"a",
"saved",
"command",
"."
] | 2253c60b4024c902115ae0472227059caee4a5eb | https://github.com/OrkoHunter/keep/blob/2253c60b4024c902115ae0472227059caee4a5eb/keep/commands/cmd_rm.py#L8-L22 | train | 213,377 |
PMEAL/porespy | porespy/visualization/__plots__.py | show_mesh | def show_mesh(mesh):
r"""
Visualizes the mesh of a region as obtained by ``get_mesh`` function in
the ``metrics`` submodule.
Parameters
----------
mesh : tuple
A mesh returned by ``skimage.measure.marching_cubes``
Returns
-------
fig : Matplotlib figure
A handle to a matplotlib 3D axis
"""
lim_max = sp.amax(mesh.verts, axis=0)
lim_min = sp.amin(mesh.verts, axis=0)
# Display resulting triangular mesh using Matplotlib.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(mesh.verts[mesh.faces])
mesh.set_edgecolor('k')
ax.add_collection3d(mesh)
ax.set_xlabel("x-axis")
ax.set_ylabel("y-axis")
ax.set_zlabel("z-axis")
ax.set_xlim(lim_min[0], lim_max[0])
ax.set_ylim(lim_min[1], lim_max[1])
ax.set_zlim(lim_min[2], lim_max[2])
return fig | python | def show_mesh(mesh):
r"""
Visualizes the mesh of a region as obtained by ``get_mesh`` function in
the ``metrics`` submodule.
Parameters
----------
mesh : tuple
A mesh returned by ``skimage.measure.marching_cubes``
Returns
-------
fig : Matplotlib figure
A handle to a matplotlib 3D axis
"""
lim_max = sp.amax(mesh.verts, axis=0)
lim_min = sp.amin(mesh.verts, axis=0)
# Display resulting triangular mesh using Matplotlib.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(mesh.verts[mesh.faces])
mesh.set_edgecolor('k')
ax.add_collection3d(mesh)
ax.set_xlabel("x-axis")
ax.set_ylabel("y-axis")
ax.set_zlabel("z-axis")
ax.set_xlim(lim_min[0], lim_max[0])
ax.set_ylim(lim_min[1], lim_max[1])
ax.set_zlim(lim_min[2], lim_max[2])
return fig | [
"def",
"show_mesh",
"(",
"mesh",
")",
":",
"lim_max",
"=",
"sp",
".",
"amax",
"(",
"mesh",
".",
"verts",
",",
"axis",
"=",
"0",
")",
"lim_min",
"=",
"sp",
".",
"amin",
"(",
"mesh",
".",
"verts",
",",
"axis",
"=",
"0",
")",
"# Display resulting tria... | r"""
Visualizes the mesh of a region as obtained by ``get_mesh`` function in
the ``metrics`` submodule.
Parameters
----------
mesh : tuple
A mesh returned by ``skimage.measure.marching_cubes``
Returns
-------
fig : Matplotlib figure
A handle to a matplotlib 3D axis | [
"r",
"Visualizes",
"the",
"mesh",
"of",
"a",
"region",
"as",
"obtained",
"by",
"get_mesh",
"function",
"in",
"the",
"metrics",
"submodule",
"."
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/visualization/__plots__.py#L6-L40 | train | 213,378 |
PMEAL/porespy | porespy/metrics/__funcs__.py | representative_elementary_volume | def representative_elementary_volume(im, npoints=1000):
r"""
Calculates the porosity of the image as a function subdomain size. This
function extracts a specified number of subdomains of random size, then
finds their porosity.
Parameters
----------
im : ND-array
The image of the porous material
npoints : int
The number of randomly located and sized boxes to sample. The default
is 1000.
Returns
-------
result : named_tuple
A tuple containing the *volume* and *porosity* of each subdomain
tested in arrays ``npoints`` long. They can be accessed as
attributes of the tuple. They can be conveniently plotted
by passing the tuple to matplotlib's ``plot`` function using the
\* notation: ``plt.plot(*result, 'b.')``. The resulting plot is
similar to the sketch given by Bachmat and Bear [1]
Notes
-----
This function is frustratingly slow. Profiling indicates that all the time
is spent on scipy's ``sum`` function which is needed to sum the number of
void voxels (1's) in each subdomain.
Also, this function is a prime target for parallelization since the
``npoints`` are calculated independenlty.
References
----------
[1] Bachmat and Bear. On the Concept and Size of a Representative
Elementary Volume (Rev), Advances in Transport Phenomena in Porous Media
(1987)
"""
im_temp = sp.zeros_like(im)
crds = sp.array(sp.rand(npoints, im.ndim)*im.shape, dtype=int)
pads = sp.array(sp.rand(npoints)*sp.amin(im.shape)/2+10, dtype=int)
im_temp[tuple(crds.T)] = True
labels, N = spim.label(input=im_temp)
slices = spim.find_objects(input=labels)
porosity = sp.zeros(shape=(N,), dtype=float)
volume = sp.zeros(shape=(N,), dtype=int)
for i in tqdm(sp.arange(0, N)):
s = slices[i]
p = pads[i]
new_s = extend_slice(s, shape=im.shape, pad=p)
temp = im[new_s]
Vp = sp.sum(temp)
Vt = sp.size(temp)
porosity[i] = Vp/Vt
volume[i] = Vt
profile = namedtuple('profile', ('volume', 'porosity'))
profile.volume = volume
profile.porosity = porosity
return profile | python | def representative_elementary_volume(im, npoints=1000):
r"""
Calculates the porosity of the image as a function subdomain size. This
function extracts a specified number of subdomains of random size, then
finds their porosity.
Parameters
----------
im : ND-array
The image of the porous material
npoints : int
The number of randomly located and sized boxes to sample. The default
is 1000.
Returns
-------
result : named_tuple
A tuple containing the *volume* and *porosity* of each subdomain
tested in arrays ``npoints`` long. They can be accessed as
attributes of the tuple. They can be conveniently plotted
by passing the tuple to matplotlib's ``plot`` function using the
\* notation: ``plt.plot(*result, 'b.')``. The resulting plot is
similar to the sketch given by Bachmat and Bear [1]
Notes
-----
This function is frustratingly slow. Profiling indicates that all the time
is spent on scipy's ``sum`` function which is needed to sum the number of
void voxels (1's) in each subdomain.
Also, this function is a prime target for parallelization since the
``npoints`` are calculated independenlty.
References
----------
[1] Bachmat and Bear. On the Concept and Size of a Representative
Elementary Volume (Rev), Advances in Transport Phenomena in Porous Media
(1987)
"""
im_temp = sp.zeros_like(im)
crds = sp.array(sp.rand(npoints, im.ndim)*im.shape, dtype=int)
pads = sp.array(sp.rand(npoints)*sp.amin(im.shape)/2+10, dtype=int)
im_temp[tuple(crds.T)] = True
labels, N = spim.label(input=im_temp)
slices = spim.find_objects(input=labels)
porosity = sp.zeros(shape=(N,), dtype=float)
volume = sp.zeros(shape=(N,), dtype=int)
for i in tqdm(sp.arange(0, N)):
s = slices[i]
p = pads[i]
new_s = extend_slice(s, shape=im.shape, pad=p)
temp = im[new_s]
Vp = sp.sum(temp)
Vt = sp.size(temp)
porosity[i] = Vp/Vt
volume[i] = Vt
profile = namedtuple('profile', ('volume', 'porosity'))
profile.volume = volume
profile.porosity = porosity
return profile | [
"def",
"representative_elementary_volume",
"(",
"im",
",",
"npoints",
"=",
"1000",
")",
":",
"im_temp",
"=",
"sp",
".",
"zeros_like",
"(",
"im",
")",
"crds",
"=",
"sp",
".",
"array",
"(",
"sp",
".",
"rand",
"(",
"npoints",
",",
"im",
".",
"ndim",
")"... | r"""
Calculates the porosity of the image as a function subdomain size. This
function extracts a specified number of subdomains of random size, then
finds their porosity.
Parameters
----------
im : ND-array
The image of the porous material
npoints : int
The number of randomly located and sized boxes to sample. The default
is 1000.
Returns
-------
result : named_tuple
A tuple containing the *volume* and *porosity* of each subdomain
tested in arrays ``npoints`` long. They can be accessed as
attributes of the tuple. They can be conveniently plotted
by passing the tuple to matplotlib's ``plot`` function using the
\* notation: ``plt.plot(*result, 'b.')``. The resulting plot is
similar to the sketch given by Bachmat and Bear [1]
Notes
-----
This function is frustratingly slow. Profiling indicates that all the time
is spent on scipy's ``sum`` function which is needed to sum the number of
void voxels (1's) in each subdomain.
Also, this function is a prime target for parallelization since the
``npoints`` are calculated independenlty.
References
----------
[1] Bachmat and Bear. On the Concept and Size of a Representative
Elementary Volume (Rev), Advances in Transport Phenomena in Porous Media
(1987) | [
"r",
"Calculates",
"the",
"porosity",
"of",
"the",
"image",
"as",
"a",
"function",
"subdomain",
"size",
".",
"This",
"function",
"extracts",
"a",
"specified",
"number",
"of",
"subdomains",
"of",
"random",
"size",
"then",
"finds",
"their",
"porosity",
"."
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__funcs__.py#L15-L75 | train | 213,379 |
PMEAL/porespy | porespy/metrics/__funcs__.py | porosity_profile | def porosity_profile(im, axis):
r"""
Returns a porosity profile along the specified axis
Parameters
----------
im : ND-array
The volumetric image for which to calculate the porosity profile
axis : int
The axis (0, 1, or 2) along which to calculate the profile. For
instance, if `axis` is 0, then the porosity in each YZ plane is
calculated and returned as 1D array with 1 value for each X position.
Returns
-------
result : 1D-array
A 1D-array of porosity along the specified axis
"""
if axis >= im.ndim:
raise Exception('axis out of range')
im = np.atleast_3d(im)
a = set(range(im.ndim)).difference(set([axis]))
a1, a2 = a
prof = np.sum(np.sum(im, axis=a2), axis=a1)/(im.shape[a2]*im.shape[a1])
return prof*100 | python | def porosity_profile(im, axis):
r"""
Returns a porosity profile along the specified axis
Parameters
----------
im : ND-array
The volumetric image for which to calculate the porosity profile
axis : int
The axis (0, 1, or 2) along which to calculate the profile. For
instance, if `axis` is 0, then the porosity in each YZ plane is
calculated and returned as 1D array with 1 value for each X position.
Returns
-------
result : 1D-array
A 1D-array of porosity along the specified axis
"""
if axis >= im.ndim:
raise Exception('axis out of range')
im = np.atleast_3d(im)
a = set(range(im.ndim)).difference(set([axis]))
a1, a2 = a
prof = np.sum(np.sum(im, axis=a2), axis=a1)/(im.shape[a2]*im.shape[a1])
return prof*100 | [
"def",
"porosity_profile",
"(",
"im",
",",
"axis",
")",
":",
"if",
"axis",
">=",
"im",
".",
"ndim",
":",
"raise",
"Exception",
"(",
"'axis out of range'",
")",
"im",
"=",
"np",
".",
"atleast_3d",
"(",
"im",
")",
"a",
"=",
"set",
"(",
"range",
"(",
... | r"""
Returns a porosity profile along the specified axis
Parameters
----------
im : ND-array
The volumetric image for which to calculate the porosity profile
axis : int
The axis (0, 1, or 2) along which to calculate the profile. For
instance, if `axis` is 0, then the porosity in each YZ plane is
calculated and returned as 1D array with 1 value for each X position.
Returns
-------
result : 1D-array
A 1D-array of porosity along the specified axis | [
"r",
"Returns",
"a",
"porosity",
"profile",
"along",
"the",
"specified",
"axis"
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__funcs__.py#L78-L102 | train | 213,380 |
PMEAL/porespy | porespy/metrics/__funcs__.py | porosity | def porosity(im):
r"""
Calculates the porosity of an image assuming 1's are void space and 0's are
solid phase.
All other values are ignored, so this can also return the relative
fraction of a phase of interest.
Parameters
----------
im : ND-array
Image of the void space with 1's indicating void space (or True) and
0's indicating the solid phase (or False).
Returns
-------
porosity : float
Calculated as the sum of all 1's divided by the sum of all 1's and 0's.
See Also
--------
phase_fraction
Notes
-----
This function assumes void is represented by 1 and solid by 0, and all
other values are ignored. This is useful, for example, for images of
cylindrical cores, where all voxels outside the core are labelled with 2.
Alternatively, images can be processed with ``find_disconnected_voxels``
to get an image of only blind pores. This can then be added to the orignal
image such that blind pores have a value of 2, thus allowing the
calculation of accessible porosity, rather than overall porosity.
"""
im = sp.array(im, dtype=int)
Vp = sp.sum(im == 1)
Vs = sp.sum(im == 0)
e = Vp/(Vs + Vp)
return e | python | def porosity(im):
r"""
Calculates the porosity of an image assuming 1's are void space and 0's are
solid phase.
All other values are ignored, so this can also return the relative
fraction of a phase of interest.
Parameters
----------
im : ND-array
Image of the void space with 1's indicating void space (or True) and
0's indicating the solid phase (or False).
Returns
-------
porosity : float
Calculated as the sum of all 1's divided by the sum of all 1's and 0's.
See Also
--------
phase_fraction
Notes
-----
This function assumes void is represented by 1 and solid by 0, and all
other values are ignored. This is useful, for example, for images of
cylindrical cores, where all voxels outside the core are labelled with 2.
Alternatively, images can be processed with ``find_disconnected_voxels``
to get an image of only blind pores. This can then be added to the orignal
image such that blind pores have a value of 2, thus allowing the
calculation of accessible porosity, rather than overall porosity.
"""
im = sp.array(im, dtype=int)
Vp = sp.sum(im == 1)
Vs = sp.sum(im == 0)
e = Vp/(Vs + Vp)
return e | [
"def",
"porosity",
"(",
"im",
")",
":",
"im",
"=",
"sp",
".",
"array",
"(",
"im",
",",
"dtype",
"=",
"int",
")",
"Vp",
"=",
"sp",
".",
"sum",
"(",
"im",
"==",
"1",
")",
"Vs",
"=",
"sp",
".",
"sum",
"(",
"im",
"==",
"0",
")",
"e",
"=",
"... | r"""
Calculates the porosity of an image assuming 1's are void space and 0's are
solid phase.
All other values are ignored, so this can also return the relative
fraction of a phase of interest.
Parameters
----------
im : ND-array
Image of the void space with 1's indicating void space (or True) and
0's indicating the solid phase (or False).
Returns
-------
porosity : float
Calculated as the sum of all 1's divided by the sum of all 1's and 0's.
See Also
--------
phase_fraction
Notes
-----
This function assumes void is represented by 1 and solid by 0, and all
other values are ignored. This is useful, for example, for images of
cylindrical cores, where all voxels outside the core are labelled with 2.
Alternatively, images can be processed with ``find_disconnected_voxels``
to get an image of only blind pores. This can then be added to the orignal
image such that blind pores have a value of 2, thus allowing the
calculation of accessible porosity, rather than overall porosity. | [
"r",
"Calculates",
"the",
"porosity",
"of",
"an",
"image",
"assuming",
"1",
"s",
"are",
"void",
"space",
"and",
"0",
"s",
"are",
"solid",
"phase",
"."
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__funcs__.py#L194-L233 | train | 213,381 |
PMEAL/porespy | porespy/metrics/__funcs__.py | _radial_profile | def _radial_profile(autocorr, r_max, nbins=100):
r"""
Helper functions to calculate the radial profile of the autocorrelation
Masks the image in radial segments from the center and averages the values
The distance values are normalized and 100 bins are used as default.
Parameters
----------
autocorr : ND-array
The image of autocorrelation produced by FFT
r_max : int or float
The maximum radius in pixels to sum the image over
Returns
-------
result : named_tuple
A named tupling containing an array of ``bins`` of radial position
and an array of ``counts`` in each bin.
"""
if len(autocorr.shape) == 2:
adj = sp.reshape(autocorr.shape, [2, 1, 1])
inds = sp.indices(autocorr.shape) - adj/2
dt = sp.sqrt(inds[0]**2 + inds[1]**2)
elif len(autocorr.shape) == 3:
adj = sp.reshape(autocorr.shape, [3, 1, 1, 1])
inds = sp.indices(autocorr.shape) - adj/2
dt = sp.sqrt(inds[0]**2 + inds[1]**2 + inds[2]**2)
else:
raise Exception('Image dimensions must be 2 or 3')
bin_size = np.int(np.ceil(r_max/nbins))
bins = np.arange(bin_size, r_max, step=bin_size)
radial_sum = np.zeros_like(bins)
for i, r in enumerate(bins):
# Generate Radial Mask from dt using bins
mask = (dt <= r) * (dt > (r-bin_size))
radial_sum[i] = np.sum(autocorr[mask])/np.sum(mask)
# Return normalized bin and radially summed autoc
norm_autoc_radial = radial_sum/np.max(autocorr)
tpcf = namedtuple('two_point_correlation_function',
('distance', 'probability'))
return tpcf(bins, norm_autoc_radial) | python | def _radial_profile(autocorr, r_max, nbins=100):
r"""
Helper functions to calculate the radial profile of the autocorrelation
Masks the image in radial segments from the center and averages the values
The distance values are normalized and 100 bins are used as default.
Parameters
----------
autocorr : ND-array
The image of autocorrelation produced by FFT
r_max : int or float
The maximum radius in pixels to sum the image over
Returns
-------
result : named_tuple
A named tupling containing an array of ``bins`` of radial position
and an array of ``counts`` in each bin.
"""
if len(autocorr.shape) == 2:
adj = sp.reshape(autocorr.shape, [2, 1, 1])
inds = sp.indices(autocorr.shape) - adj/2
dt = sp.sqrt(inds[0]**2 + inds[1]**2)
elif len(autocorr.shape) == 3:
adj = sp.reshape(autocorr.shape, [3, 1, 1, 1])
inds = sp.indices(autocorr.shape) - adj/2
dt = sp.sqrt(inds[0]**2 + inds[1]**2 + inds[2]**2)
else:
raise Exception('Image dimensions must be 2 or 3')
bin_size = np.int(np.ceil(r_max/nbins))
bins = np.arange(bin_size, r_max, step=bin_size)
radial_sum = np.zeros_like(bins)
for i, r in enumerate(bins):
# Generate Radial Mask from dt using bins
mask = (dt <= r) * (dt > (r-bin_size))
radial_sum[i] = np.sum(autocorr[mask])/np.sum(mask)
# Return normalized bin and radially summed autoc
norm_autoc_radial = radial_sum/np.max(autocorr)
tpcf = namedtuple('two_point_correlation_function',
('distance', 'probability'))
return tpcf(bins, norm_autoc_radial) | [
"def",
"_radial_profile",
"(",
"autocorr",
",",
"r_max",
",",
"nbins",
"=",
"100",
")",
":",
"if",
"len",
"(",
"autocorr",
".",
"shape",
")",
"==",
"2",
":",
"adj",
"=",
"sp",
".",
"reshape",
"(",
"autocorr",
".",
"shape",
",",
"[",
"2",
",",
"1"... | r"""
Helper functions to calculate the radial profile of the autocorrelation
Masks the image in radial segments from the center and averages the values
The distance values are normalized and 100 bins are used as default.
Parameters
----------
autocorr : ND-array
The image of autocorrelation produced by FFT
r_max : int or float
The maximum radius in pixels to sum the image over
Returns
-------
result : named_tuple
A named tupling containing an array of ``bins`` of radial position
and an array of ``counts`` in each bin. | [
"r",
"Helper",
"functions",
"to",
"calculate",
"the",
"radial",
"profile",
"of",
"the",
"autocorrelation",
"Masks",
"the",
"image",
"in",
"radial",
"segments",
"from",
"the",
"center",
"and",
"averages",
"the",
"values",
"The",
"distance",
"values",
"are",
"no... | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__funcs__.py#L294-L334 | train | 213,382 |
PMEAL/porespy | porespy/metrics/__funcs__.py | two_point_correlation_fft | def two_point_correlation_fft(im):
r"""
Calculates the two-point correlation function using fourier transforms
Parameters
----------
im : ND-array
The image of the void space on which the 2-point correlation is desired
Returns
-------
result : named_tuple
A tuple containing the x and y data for plotting the two-point
correlation function, using the *args feature of matplotlib's plot
function. The x array is the distances between points and the y array
is corresponding probabilities that points of a given distance both
lie in the void space.
Notes
-----
The fourier transform approach utilizes the fact that the autocorrelation
function is the inverse FT of the power spectrum density.
For background read the Scipy fftpack docs and for a good explanation see:
http://www.ucl.ac.uk/~ucapikr/projects/KamilaSuankulova_BSc_Project.pdf
"""
# Calculate half lengths of the image
hls = (np.ceil(np.shape(im))/2).astype(int)
# Fourier Transform and shift image
F = sp_ft.ifftshift(sp_ft.fftn(sp_ft.fftshift(im)))
# Compute Power Spectrum
P = sp.absolute(F**2)
# Auto-correlation is inverse of Power Spectrum
autoc = sp.absolute(sp_ft.ifftshift(sp_ft.ifftn(sp_ft.fftshift(P))))
tpcf = _radial_profile(autoc, r_max=np.min(hls))
return tpcf | python | def two_point_correlation_fft(im):
r"""
Calculates the two-point correlation function using fourier transforms
Parameters
----------
im : ND-array
The image of the void space on which the 2-point correlation is desired
Returns
-------
result : named_tuple
A tuple containing the x and y data for plotting the two-point
correlation function, using the *args feature of matplotlib's plot
function. The x array is the distances between points and the y array
is corresponding probabilities that points of a given distance both
lie in the void space.
Notes
-----
The fourier transform approach utilizes the fact that the autocorrelation
function is the inverse FT of the power spectrum density.
For background read the Scipy fftpack docs and for a good explanation see:
http://www.ucl.ac.uk/~ucapikr/projects/KamilaSuankulova_BSc_Project.pdf
"""
# Calculate half lengths of the image
hls = (np.ceil(np.shape(im))/2).astype(int)
# Fourier Transform and shift image
F = sp_ft.ifftshift(sp_ft.fftn(sp_ft.fftshift(im)))
# Compute Power Spectrum
P = sp.absolute(F**2)
# Auto-correlation is inverse of Power Spectrum
autoc = sp.absolute(sp_ft.ifftshift(sp_ft.ifftn(sp_ft.fftshift(P))))
tpcf = _radial_profile(autoc, r_max=np.min(hls))
return tpcf | [
"def",
"two_point_correlation_fft",
"(",
"im",
")",
":",
"# Calculate half lengths of the image",
"hls",
"=",
"(",
"np",
".",
"ceil",
"(",
"np",
".",
"shape",
"(",
"im",
")",
")",
"/",
"2",
")",
".",
"astype",
"(",
"int",
")",
"# Fourier Transform and shift ... | r"""
Calculates the two-point correlation function using fourier transforms
Parameters
----------
im : ND-array
The image of the void space on which the 2-point correlation is desired
Returns
-------
result : named_tuple
A tuple containing the x and y data for plotting the two-point
correlation function, using the *args feature of matplotlib's plot
function. The x array is the distances between points and the y array
is corresponding probabilities that points of a given distance both
lie in the void space.
Notes
-----
The fourier transform approach utilizes the fact that the autocorrelation
function is the inverse FT of the power spectrum density.
For background read the Scipy fftpack docs and for a good explanation see:
http://www.ucl.ac.uk/~ucapikr/projects/KamilaSuankulova_BSc_Project.pdf | [
"r",
"Calculates",
"the",
"two",
"-",
"point",
"correlation",
"function",
"using",
"fourier",
"transforms"
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__funcs__.py#L337-L371 | train | 213,383 |
PMEAL/porespy | porespy/metrics/__funcs__.py | pore_size_distribution | def pore_size_distribution(im, bins=10, log=True, voxel_size=1):
r"""
Calculate a pore-size distribution based on the image produced by the
``porosimetry`` or ``local_thickness`` functions.
Parameters
----------
im : ND-array
The array of containing the sizes of the largest sphere that overlaps
each voxel. Obtained from either ``porosimetry`` or
``local_thickness``.
bins : scalar or array_like
Either an array of bin sizes to use, or the number of bins that should
be automatically generated that span the data range.
log : boolean
If ``True`` (default) the size data is converted to log (base-10)
values before processing. This can help
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A named-tuple containing several values:
*R* or *logR* - radius, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*satn* - phase saturation in differential form. For the cumulative
saturation, just use *cfd* which is already normalized to 1.
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
Notes
-----
(1) To ensure the returned values represent actual sizes be sure to scale
the distance transform by the voxel size first (``dt *= voxel_size``)
plt.bar(psd.R, psd.satn, width=psd.bin_widths, edgecolor='k')
"""
im = im.flatten()
vals = im[im > 0]*voxel_size
if log:
vals = sp.log10(vals)
h = _parse_histogram(sp.histogram(vals, bins=bins, density=True))
psd = namedtuple('pore_size_distribution',
(log*'log' + 'R', 'pdf', 'cdf', 'satn',
'bin_centers', 'bin_edges', 'bin_widths'))
return psd(h.bin_centers, h.pdf, h.cdf, h.relfreq,
h.bin_centers, h.bin_edges, h.bin_widths) | python | def pore_size_distribution(im, bins=10, log=True, voxel_size=1):
r"""
Calculate a pore-size distribution based on the image produced by the
``porosimetry`` or ``local_thickness`` functions.
Parameters
----------
im : ND-array
The array of containing the sizes of the largest sphere that overlaps
each voxel. Obtained from either ``porosimetry`` or
``local_thickness``.
bins : scalar or array_like
Either an array of bin sizes to use, or the number of bins that should
be automatically generated that span the data range.
log : boolean
If ``True`` (default) the size data is converted to log (base-10)
values before processing. This can help
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A named-tuple containing several values:
*R* or *logR* - radius, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*satn* - phase saturation in differential form. For the cumulative
saturation, just use *cfd* which is already normalized to 1.
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
Notes
-----
(1) To ensure the returned values represent actual sizes be sure to scale
the distance transform by the voxel size first (``dt *= voxel_size``)
plt.bar(psd.R, psd.satn, width=psd.bin_widths, edgecolor='k')
"""
im = im.flatten()
vals = im[im > 0]*voxel_size
if log:
vals = sp.log10(vals)
h = _parse_histogram(sp.histogram(vals, bins=bins, density=True))
psd = namedtuple('pore_size_distribution',
(log*'log' + 'R', 'pdf', 'cdf', 'satn',
'bin_centers', 'bin_edges', 'bin_widths'))
return psd(h.bin_centers, h.pdf, h.cdf, h.relfreq,
h.bin_centers, h.bin_edges, h.bin_widths) | [
"def",
"pore_size_distribution",
"(",
"im",
",",
"bins",
"=",
"10",
",",
"log",
"=",
"True",
",",
"voxel_size",
"=",
"1",
")",
":",
"im",
"=",
"im",
".",
"flatten",
"(",
")",
"vals",
"=",
"im",
"[",
"im",
">",
"0",
"]",
"*",
"voxel_size",
"if",
... | r"""
Calculate a pore-size distribution based on the image produced by the
``porosimetry`` or ``local_thickness`` functions.
Parameters
----------
im : ND-array
The array of containing the sizes of the largest sphere that overlaps
each voxel. Obtained from either ``porosimetry`` or
``local_thickness``.
bins : scalar or array_like
Either an array of bin sizes to use, or the number of bins that should
be automatically generated that span the data range.
log : boolean
If ``True`` (default) the size data is converted to log (base-10)
values before processing. This can help
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A named-tuple containing several values:
*R* or *logR* - radius, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*satn* - phase saturation in differential form. For the cumulative
saturation, just use *cfd* which is already normalized to 1.
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
Notes
-----
(1) To ensure the returned values represent actual sizes be sure to scale
the distance transform by the voxel size first (``dt *= voxel_size``)
plt.bar(psd.R, psd.satn, width=psd.bin_widths, edgecolor='k') | [
"r",
"Calculate",
"a",
"pore",
"-",
"size",
"distribution",
"based",
"on",
"the",
"image",
"produced",
"by",
"the",
"porosimetry",
"or",
"local_thickness",
"functions",
"."
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__funcs__.py#L374-L434 | train | 213,384 |
PMEAL/porespy | porespy/metrics/__funcs__.py | chord_counts | def chord_counts(im):
r"""
Finds the length of each chord in the supplied image and returns a list
of their individual sizes
Parameters
----------
im : ND-array
An image containing chords drawn in the void space.
Returns
-------
result : 1D-array
A 1D array with one element for each chord, containing its length.
Notes
----
The returned array can be passed to ``plt.hist`` to plot the histogram,
or to ``sp.histogram`` to get the histogram data directly. Another useful
function is ``sp.bincount`` which gives the number of chords of each
length in a format suitable for ``plt.plot``.
"""
labels, N = spim.label(im > 0)
props = regionprops(labels, coordinates='xy')
chord_lens = sp.array([i.filled_area for i in props])
return chord_lens | python | def chord_counts(im):
r"""
Finds the length of each chord in the supplied image and returns a list
of their individual sizes
Parameters
----------
im : ND-array
An image containing chords drawn in the void space.
Returns
-------
result : 1D-array
A 1D array with one element for each chord, containing its length.
Notes
----
The returned array can be passed to ``plt.hist`` to plot the histogram,
or to ``sp.histogram`` to get the histogram data directly. Another useful
function is ``sp.bincount`` which gives the number of chords of each
length in a format suitable for ``plt.plot``.
"""
labels, N = spim.label(im > 0)
props = regionprops(labels, coordinates='xy')
chord_lens = sp.array([i.filled_area for i in props])
return chord_lens | [
"def",
"chord_counts",
"(",
"im",
")",
":",
"labels",
",",
"N",
"=",
"spim",
".",
"label",
"(",
"im",
">",
"0",
")",
"props",
"=",
"regionprops",
"(",
"labels",
",",
"coordinates",
"=",
"'xy'",
")",
"chord_lens",
"=",
"sp",
".",
"array",
"(",
"[",
... | r"""
Finds the length of each chord in the supplied image and returns a list
of their individual sizes
Parameters
----------
im : ND-array
An image containing chords drawn in the void space.
Returns
-------
result : 1D-array
A 1D array with one element for each chord, containing its length.
Notes
----
The returned array can be passed to ``plt.hist`` to plot the histogram,
or to ``sp.histogram`` to get the histogram data directly. Another useful
function is ``sp.bincount`` which gives the number of chords of each
length in a format suitable for ``plt.plot``. | [
"r",
"Finds",
"the",
"length",
"of",
"each",
"chord",
"in",
"the",
"supplied",
"image",
"and",
"returns",
"a",
"list",
"of",
"their",
"individual",
"sizes"
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__funcs__.py#L451-L476 | train | 213,385 |
PMEAL/porespy | porespy/metrics/__funcs__.py | chord_length_distribution | def chord_length_distribution(im, bins=None, log=False, voxel_size=1,
normalization='count'):
r"""
Determines the distribution of chord lengths in an image containing chords.
Parameters
----------
im : ND-image
An image with chords drawn in the pore space, as produced by
``apply_chords`` or ``apply_chords_3d``.
``im`` can be either boolean, in which case each chord will be
identified using ``scipy.ndimage.label``, or numerical values in which
case it is assumed that chords have already been identifed and labeled.
In both cases, the size of each chord will be computed as the number
of voxels belonging to each labelled region.
bins : scalar or array_like
If a scalar is given it is interpreted as the number of bins to use,
and if an array is given they are used as the bins directly.
log : Boolean
If true, the logarithm of the chord lengths will be used, which can
make the data more clear.
normalization : string
Indicates how to normalize the bin heights. Options are:
*'count' or 'number'* - (default) This simply counts the number of
chords in each bin in the normal sense of a histogram. This is the
rigorous definition according to Torquato [1].
*'length'* - This multiplies the number of chords in each bin by the
chord length (i.e. bin size). The normalization scheme accounts for
the fact that long chords are less frequent than shorert chords,
thus giving a more balanced distribution.
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A tuple containing the following elements, which can be retrieved by
attribute name:
*L* or *logL* - chord length, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*relfreq* - relative frequency chords in each bin. The sum of all bin
heights is 1.0. For the cumulative relativce, use *cdf* which is
already normalized to 1.
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
References
----------
[1] Torquato, S. Random Heterogeneous Materials: Mircostructure and
Macroscopic Properties. Springer, New York (2002) - See page 45 & 292
"""
x = chord_counts(im)
if bins is None:
bins = sp.array(range(0, x.max()+2))*voxel_size
x = x*voxel_size
if log:
x = sp.log10(x)
if normalization == 'length':
h = list(sp.histogram(x, bins=bins, density=False))
h[0] = h[0]*(h[1][1:]+h[1][:-1])/2 # Scale bin heigths by length
h[0] = h[0]/h[0].sum()/(h[1][1:]-h[1][:-1]) # Normalize h[0] manually
elif normalization in ['number', 'count']:
h = sp.histogram(x, bins=bins, density=True)
else:
raise Exception('Unsupported normalization:', normalization)
h = _parse_histogram(h)
cld = namedtuple('chord_length_distribution',
(log*'log' + 'L', 'pdf', 'cdf', 'relfreq',
'bin_centers', 'bin_edges', 'bin_widths'))
return cld(h.bin_centers, h.pdf, h.cdf, h.relfreq,
h.bin_centers, h.bin_edges, h.bin_widths) | python | def chord_length_distribution(im, bins=None, log=False, voxel_size=1,
normalization='count'):
r"""
Determines the distribution of chord lengths in an image containing chords.
Parameters
----------
im : ND-image
An image with chords drawn in the pore space, as produced by
``apply_chords`` or ``apply_chords_3d``.
``im`` can be either boolean, in which case each chord will be
identified using ``scipy.ndimage.label``, or numerical values in which
case it is assumed that chords have already been identifed and labeled.
In both cases, the size of each chord will be computed as the number
of voxels belonging to each labelled region.
bins : scalar or array_like
If a scalar is given it is interpreted as the number of bins to use,
and if an array is given they are used as the bins directly.
log : Boolean
If true, the logarithm of the chord lengths will be used, which can
make the data more clear.
normalization : string
Indicates how to normalize the bin heights. Options are:
*'count' or 'number'* - (default) This simply counts the number of
chords in each bin in the normal sense of a histogram. This is the
rigorous definition according to Torquato [1].
*'length'* - This multiplies the number of chords in each bin by the
chord length (i.e. bin size). The normalization scheme accounts for
the fact that long chords are less frequent than shorert chords,
thus giving a more balanced distribution.
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A tuple containing the following elements, which can be retrieved by
attribute name:
*L* or *logL* - chord length, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*relfreq* - relative frequency chords in each bin. The sum of all bin
heights is 1.0. For the cumulative relativce, use *cdf* which is
already normalized to 1.
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
References
----------
[1] Torquato, S. Random Heterogeneous Materials: Mircostructure and
Macroscopic Properties. Springer, New York (2002) - See page 45 & 292
"""
x = chord_counts(im)
if bins is None:
bins = sp.array(range(0, x.max()+2))*voxel_size
x = x*voxel_size
if log:
x = sp.log10(x)
if normalization == 'length':
h = list(sp.histogram(x, bins=bins, density=False))
h[0] = h[0]*(h[1][1:]+h[1][:-1])/2 # Scale bin heigths by length
h[0] = h[0]/h[0].sum()/(h[1][1:]-h[1][:-1]) # Normalize h[0] manually
elif normalization in ['number', 'count']:
h = sp.histogram(x, bins=bins, density=True)
else:
raise Exception('Unsupported normalization:', normalization)
h = _parse_histogram(h)
cld = namedtuple('chord_length_distribution',
(log*'log' + 'L', 'pdf', 'cdf', 'relfreq',
'bin_centers', 'bin_edges', 'bin_widths'))
return cld(h.bin_centers, h.pdf, h.cdf, h.relfreq,
h.bin_centers, h.bin_edges, h.bin_widths) | [
"def",
"chord_length_distribution",
"(",
"im",
",",
"bins",
"=",
"None",
",",
"log",
"=",
"False",
",",
"voxel_size",
"=",
"1",
",",
"normalization",
"=",
"'count'",
")",
":",
"x",
"=",
"chord_counts",
"(",
"im",
")",
"if",
"bins",
"is",
"None",
":",
... | r"""
Determines the distribution of chord lengths in an image containing chords.
Parameters
----------
im : ND-image
An image with chords drawn in the pore space, as produced by
``apply_chords`` or ``apply_chords_3d``.
``im`` can be either boolean, in which case each chord will be
identified using ``scipy.ndimage.label``, or numerical values in which
case it is assumed that chords have already been identifed and labeled.
In both cases, the size of each chord will be computed as the number
of voxels belonging to each labelled region.
bins : scalar or array_like
If a scalar is given it is interpreted as the number of bins to use,
and if an array is given they are used as the bins directly.
log : Boolean
If true, the logarithm of the chord lengths will be used, which can
make the data more clear.
normalization : string
Indicates how to normalize the bin heights. Options are:
*'count' or 'number'* - (default) This simply counts the number of
chords in each bin in the normal sense of a histogram. This is the
rigorous definition according to Torquato [1].
*'length'* - This multiplies the number of chords in each bin by the
chord length (i.e. bin size). The normalization scheme accounts for
the fact that long chords are less frequent than shorert chords,
thus giving a more balanced distribution.
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A tuple containing the following elements, which can be retrieved by
attribute name:
*L* or *logL* - chord length, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*relfreq* - relative frequency chords in each bin. The sum of all bin
heights is 1.0. For the cumulative relativce, use *cdf* which is
already normalized to 1.
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
References
----------
[1] Torquato, S. Random Heterogeneous Materials: Mircostructure and
Macroscopic Properties. Springer, New York (2002) - See page 45 & 292 | [
"r",
"Determines",
"the",
"distribution",
"of",
"chord",
"lengths",
"in",
"an",
"image",
"containing",
"chords",
"."
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__funcs__.py#L523-L612 | train | 213,386 |
PMEAL/porespy | porespy/metrics/__funcs__.py | region_interface_areas | def region_interface_areas(regions, areas, voxel_size=1, strel=None):
r"""
Calculates the interfacial area between all pairs of adjecent regions
Parameters
----------
regions : ND-array
An image of the pore space partitioned into individual pore regions.
Note that zeros in the image will not be considered for area
calculation.
areas : array_like
A list containing the areas of each regions, as determined by
``region_surface_area``. Note that the region number and list index
are offset by 1, such that the area for region 1 is stored in
``areas[0]``.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1.
strel : array_like
The structuring element used to blur the region. If not provided,
then a spherical element (or disk) with radius 1 is used. See the
docstring for ``mesh_region`` for more details, as this argument is
passed to there.
Returns
-------
result : named_tuple
A named-tuple containing 2 arrays. ``conns`` holds the connectivity
information and ``area`` holds the result for each pair. ``conns`` is
a N-regions by 2 array with each row containing the region number of an
adjacent pair of regions. For instance, if ``conns[0, 0]`` is 0 and
``conns[0, 1]`` is 5, then row 0 of ``area`` contains the interfacial
area shared by regions 0 and 5.
"""
print('_'*60)
print('Finding interfacial areas between each region')
from skimage.morphology import disk, square, ball, cube
im = regions.copy()
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
if im.ndim == 2:
cube = square
ball = disk
# Get 'slices' into im for each region
slices = spim.find_objects(im)
# Initialize arrays
Ps = sp.arange(1, sp.amax(im)+1)
sa = sp.zeros_like(Ps, dtype=float)
sa_combined = [] # Difficult to preallocate since number of conns unknown
cn = []
# Start extracting area from im
for i in tqdm(Ps):
reg = i - 1
if slices[reg] is not None:
s = extend_slice(slices[reg], im.shape)
sub_im = im[s]
mask_im = sub_im == i
sa[reg] = areas[reg]
im_w_throats = spim.binary_dilation(input=mask_im,
structure=ball(1))
im_w_throats = im_w_throats*sub_im
Pn = sp.unique(im_w_throats)[1:] - 1
for j in Pn:
if j > reg:
cn.append([reg, j])
merged_region = im[(min(slices[reg][0].start,
slices[j][0].start)):
max(slices[reg][0].stop,
slices[j][0].stop),
(min(slices[reg][1].start,
slices[j][1].start)):
max(slices[reg][1].stop,
slices[j][1].stop)]
merged_region = ((merged_region == reg + 1) +
(merged_region == j + 1))
mesh = mesh_region(region=merged_region, strel=strel)
sa_combined.append(mesh_surface_area(mesh))
# Interfacial area calculation
cn = sp.array(cn)
ia = 0.5 * (sa[cn[:, 0]] + sa[cn[:, 1]] - sa_combined)
ia[ia <= 0] = 1
result = namedtuple('interfacial_areas', ('conns', 'area'))
result.conns = cn
result.area = ia * voxel_size**2
return result | python | def region_interface_areas(regions, areas, voxel_size=1, strel=None):
r"""
Calculates the interfacial area between all pairs of adjecent regions
Parameters
----------
regions : ND-array
An image of the pore space partitioned into individual pore regions.
Note that zeros in the image will not be considered for area
calculation.
areas : array_like
A list containing the areas of each regions, as determined by
``region_surface_area``. Note that the region number and list index
are offset by 1, such that the area for region 1 is stored in
``areas[0]``.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1.
strel : array_like
The structuring element used to blur the region. If not provided,
then a spherical element (or disk) with radius 1 is used. See the
docstring for ``mesh_region`` for more details, as this argument is
passed to there.
Returns
-------
result : named_tuple
A named-tuple containing 2 arrays. ``conns`` holds the connectivity
information and ``area`` holds the result for each pair. ``conns`` is
a N-regions by 2 array with each row containing the region number of an
adjacent pair of regions. For instance, if ``conns[0, 0]`` is 0 and
``conns[0, 1]`` is 5, then row 0 of ``area`` contains the interfacial
area shared by regions 0 and 5.
"""
print('_'*60)
print('Finding interfacial areas between each region')
from skimage.morphology import disk, square, ball, cube
im = regions.copy()
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
if im.ndim == 2:
cube = square
ball = disk
# Get 'slices' into im for each region
slices = spim.find_objects(im)
# Initialize arrays
Ps = sp.arange(1, sp.amax(im)+1)
sa = sp.zeros_like(Ps, dtype=float)
sa_combined = [] # Difficult to preallocate since number of conns unknown
cn = []
# Start extracting area from im
for i in tqdm(Ps):
reg = i - 1
if slices[reg] is not None:
s = extend_slice(slices[reg], im.shape)
sub_im = im[s]
mask_im = sub_im == i
sa[reg] = areas[reg]
im_w_throats = spim.binary_dilation(input=mask_im,
structure=ball(1))
im_w_throats = im_w_throats*sub_im
Pn = sp.unique(im_w_throats)[1:] - 1
for j in Pn:
if j > reg:
cn.append([reg, j])
merged_region = im[(min(slices[reg][0].start,
slices[j][0].start)):
max(slices[reg][0].stop,
slices[j][0].stop),
(min(slices[reg][1].start,
slices[j][1].start)):
max(slices[reg][1].stop,
slices[j][1].stop)]
merged_region = ((merged_region == reg + 1) +
(merged_region == j + 1))
mesh = mesh_region(region=merged_region, strel=strel)
sa_combined.append(mesh_surface_area(mesh))
# Interfacial area calculation
cn = sp.array(cn)
ia = 0.5 * (sa[cn[:, 0]] + sa[cn[:, 1]] - sa_combined)
ia[ia <= 0] = 1
result = namedtuple('interfacial_areas', ('conns', 'area'))
result.conns = cn
result.area = ia * voxel_size**2
return result | [
"def",
"region_interface_areas",
"(",
"regions",
",",
"areas",
",",
"voxel_size",
"=",
"1",
",",
"strel",
"=",
"None",
")",
":",
"print",
"(",
"'_'",
"*",
"60",
")",
"print",
"(",
"'Finding interfacial areas between each region'",
")",
"from",
"skimage",
".",
... | r"""
Calculates the interfacial area between all pairs of adjecent regions
Parameters
----------
regions : ND-array
An image of the pore space partitioned into individual pore regions.
Note that zeros in the image will not be considered for area
calculation.
areas : array_like
A list containing the areas of each regions, as determined by
``region_surface_area``. Note that the region number and list index
are offset by 1, such that the area for region 1 is stored in
``areas[0]``.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1.
strel : array_like
The structuring element used to blur the region. If not provided,
then a spherical element (or disk) with radius 1 is used. See the
docstring for ``mesh_region`` for more details, as this argument is
passed to there.
Returns
-------
result : named_tuple
A named-tuple containing 2 arrays. ``conns`` holds the connectivity
information and ``area`` holds the result for each pair. ``conns`` is
a N-regions by 2 array with each row containing the region number of an
adjacent pair of regions. For instance, if ``conns[0, 0]`` is 0 and
``conns[0, 1]`` is 5, then row 0 of ``area`` contains the interfacial
area shared by regions 0 and 5. | [
"r",
"Calculates",
"the",
"interfacial",
"area",
"between",
"all",
"pairs",
"of",
"adjecent",
"regions"
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__funcs__.py#L615-L703 | train | 213,387 |
PMEAL/porespy | porespy/metrics/__funcs__.py | region_surface_areas | def region_surface_areas(regions, voxel_size=1, strel=None):
r"""
Extracts the surface area of each region in a labeled image.
Optionally, it can also find the the interfacial area between all
adjoining regions.
Parameters
----------
regions : ND-array
An image of the pore space partitioned into individual pore regions.
Note that zeros in the image will not be considered for area
calculation.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1.
strel : array_like
The structuring element used to blur the region. If not provided,
then a spherical element (or disk) with radius 1 is used. See the
docstring for ``mesh_region`` for more details, as this argument is
passed to there.
Returns
-------
result : list
A list containing the surface area of each region, offset by 1, such
that the surface area of region 1 is stored in element 0 of the list.
"""
print('_'*60)
print('Finding surface area of each region')
im = regions.copy()
# Get 'slices' into im for each pore region
slices = spim.find_objects(im)
# Initialize arrays
Ps = sp.arange(1, sp.amax(im)+1)
sa = sp.zeros_like(Ps, dtype=float)
# Start extracting marching cube area from im
for i in tqdm(Ps):
reg = i - 1
if slices[reg] is not None:
s = extend_slice(slices[reg], im.shape)
sub_im = im[s]
mask_im = sub_im == i
mesh = mesh_region(region=mask_im, strel=strel)
sa[reg] = mesh_surface_area(mesh)
result = sa * voxel_size**2
return result | python | def region_surface_areas(regions, voxel_size=1, strel=None):
r"""
Extracts the surface area of each region in a labeled image.
Optionally, it can also find the the interfacial area between all
adjoining regions.
Parameters
----------
regions : ND-array
An image of the pore space partitioned into individual pore regions.
Note that zeros in the image will not be considered for area
calculation.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1.
strel : array_like
The structuring element used to blur the region. If not provided,
then a spherical element (or disk) with radius 1 is used. See the
docstring for ``mesh_region`` for more details, as this argument is
passed to there.
Returns
-------
result : list
A list containing the surface area of each region, offset by 1, such
that the surface area of region 1 is stored in element 0 of the list.
"""
print('_'*60)
print('Finding surface area of each region')
im = regions.copy()
# Get 'slices' into im for each pore region
slices = spim.find_objects(im)
# Initialize arrays
Ps = sp.arange(1, sp.amax(im)+1)
sa = sp.zeros_like(Ps, dtype=float)
# Start extracting marching cube area from im
for i in tqdm(Ps):
reg = i - 1
if slices[reg] is not None:
s = extend_slice(slices[reg], im.shape)
sub_im = im[s]
mask_im = sub_im == i
mesh = mesh_region(region=mask_im, strel=strel)
sa[reg] = mesh_surface_area(mesh)
result = sa * voxel_size**2
return result | [
"def",
"region_surface_areas",
"(",
"regions",
",",
"voxel_size",
"=",
"1",
",",
"strel",
"=",
"None",
")",
":",
"print",
"(",
"'_'",
"*",
"60",
")",
"print",
"(",
"'Finding surface area of each region'",
")",
"im",
"=",
"regions",
".",
"copy",
"(",
")",
... | r"""
Extracts the surface area of each region in a labeled image.
Optionally, it can also find the the interfacial area between all
adjoining regions.
Parameters
----------
regions : ND-array
An image of the pore space partitioned into individual pore regions.
Note that zeros in the image will not be considered for area
calculation.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1.
strel : array_like
The structuring element used to blur the region. If not provided,
then a spherical element (or disk) with radius 1 is used. See the
docstring for ``mesh_region`` for more details, as this argument is
passed to there.
Returns
-------
result : list
A list containing the surface area of each region, offset by 1, such
that the surface area of region 1 is stored in element 0 of the list. | [
"r",
"Extracts",
"the",
"surface",
"area",
"of",
"each",
"region",
"in",
"a",
"labeled",
"image",
"."
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__funcs__.py#L706-L754 | train | 213,388 |
PMEAL/porespy | porespy/metrics/__funcs__.py | mesh_surface_area | def mesh_surface_area(mesh=None, verts=None, faces=None):
r"""
Calculates the surface area of a meshed region
Parameters
----------
mesh : tuple
The tuple returned from the ``mesh_region`` function
verts : array
An N-by-ND array containing the coordinates of each mesh vertex
faces : array
An N-by-ND array indicating which elements in ``verts`` form a mesh
element.
Returns
-------
surface_area : float
The surface area of the mesh, calculated by
``skimage.measure.mesh_surface_area``
Notes
-----
This function simply calls ``scikit-image.measure.mesh_surface_area``, but
it allows for the passing of the ``mesh`` tuple returned by the
``mesh_region`` function, entirely for convenience.
"""
if mesh:
verts = mesh.verts
faces = mesh.faces
else:
if (verts is None) or (faces is None):
raise Exception('Either mesh or verts and faces must be given')
surface_area = measure.mesh_surface_area(verts, faces)
return surface_area | python | def mesh_surface_area(mesh=None, verts=None, faces=None):
r"""
Calculates the surface area of a meshed region
Parameters
----------
mesh : tuple
The tuple returned from the ``mesh_region`` function
verts : array
An N-by-ND array containing the coordinates of each mesh vertex
faces : array
An N-by-ND array indicating which elements in ``verts`` form a mesh
element.
Returns
-------
surface_area : float
The surface area of the mesh, calculated by
``skimage.measure.mesh_surface_area``
Notes
-----
This function simply calls ``scikit-image.measure.mesh_surface_area``, but
it allows for the passing of the ``mesh`` tuple returned by the
``mesh_region`` function, entirely for convenience.
"""
if mesh:
verts = mesh.verts
faces = mesh.faces
else:
if (verts is None) or (faces is None):
raise Exception('Either mesh or verts and faces must be given')
surface_area = measure.mesh_surface_area(verts, faces)
return surface_area | [
"def",
"mesh_surface_area",
"(",
"mesh",
"=",
"None",
",",
"verts",
"=",
"None",
",",
"faces",
"=",
"None",
")",
":",
"if",
"mesh",
":",
"verts",
"=",
"mesh",
".",
"verts",
"faces",
"=",
"mesh",
".",
"faces",
"else",
":",
"if",
"(",
"verts",
"is",
... | r"""
Calculates the surface area of a meshed region
Parameters
----------
mesh : tuple
The tuple returned from the ``mesh_region`` function
verts : array
An N-by-ND array containing the coordinates of each mesh vertex
faces : array
An N-by-ND array indicating which elements in ``verts`` form a mesh
element.
Returns
-------
surface_area : float
The surface area of the mesh, calculated by
``skimage.measure.mesh_surface_area``
Notes
-----
This function simply calls ``scikit-image.measure.mesh_surface_area``, but
it allows for the passing of the ``mesh`` tuple returned by the
``mesh_region`` function, entirely for convenience. | [
"r",
"Calculates",
"the",
"surface",
"area",
"of",
"a",
"meshed",
"region"
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__funcs__.py#L757-L790 | train | 213,389 |
PMEAL/porespy | porespy/visualization/__views__.py | show_planes | def show_planes(im):
r"""
Create a quick montage showing a 3D image in all three directions
Parameters
----------
im : ND-array
A 3D image of the porous material
Returns
-------
image : ND-array
A 2D array containing the views. This single image can be viewed using
``matplotlib.pyplot.imshow``.
"""
if sp.squeeze(im.ndim) < 3:
raise Exception('This view is only necessary for 3D images')
x, y, z = (sp.array(im.shape)/2).astype(int)
im_xy = im[:, :, z]
im_xz = im[:, y, :]
im_yz = sp.rot90(im[x, :, :])
new_x = im_xy.shape[0] + im_yz.shape[0] + 10
new_y = im_xy.shape[1] + im_xz.shape[1] + 10
new_im = sp.zeros([new_x + 20, new_y + 20], dtype=im.dtype)
# Add xy image to upper left corner
new_im[10:im_xy.shape[0]+10,
10:im_xy.shape[1]+10] = im_xy
# Add xz image to lower left coner
x_off = im_xy.shape[0]+20
y_off = im_xy.shape[1]+20
new_im[10:10 + im_xz.shape[0],
y_off:y_off + im_xz.shape[1]] = im_xz
new_im[x_off:x_off + im_yz.shape[0],
10:10 + im_yz.shape[1]] = im_yz
return new_im | python | def show_planes(im):
r"""
Create a quick montage showing a 3D image in all three directions
Parameters
----------
im : ND-array
A 3D image of the porous material
Returns
-------
image : ND-array
A 2D array containing the views. This single image can be viewed using
``matplotlib.pyplot.imshow``.
"""
if sp.squeeze(im.ndim) < 3:
raise Exception('This view is only necessary for 3D images')
x, y, z = (sp.array(im.shape)/2).astype(int)
im_xy = im[:, :, z]
im_xz = im[:, y, :]
im_yz = sp.rot90(im[x, :, :])
new_x = im_xy.shape[0] + im_yz.shape[0] + 10
new_y = im_xy.shape[1] + im_xz.shape[1] + 10
new_im = sp.zeros([new_x + 20, new_y + 20], dtype=im.dtype)
# Add xy image to upper left corner
new_im[10:im_xy.shape[0]+10,
10:im_xy.shape[1]+10] = im_xy
# Add xz image to lower left coner
x_off = im_xy.shape[0]+20
y_off = im_xy.shape[1]+20
new_im[10:10 + im_xz.shape[0],
y_off:y_off + im_xz.shape[1]] = im_xz
new_im[x_off:x_off + im_yz.shape[0],
10:10 + im_yz.shape[1]] = im_yz
return new_im | [
"def",
"show_planes",
"(",
"im",
")",
":",
"if",
"sp",
".",
"squeeze",
"(",
"im",
".",
"ndim",
")",
"<",
"3",
":",
"raise",
"Exception",
"(",
"'This view is only necessary for 3D images'",
")",
"x",
",",
"y",
",",
"z",
"=",
"(",
"sp",
".",
"array",
"... | r"""
Create a quick montage showing a 3D image in all three directions
Parameters
----------
im : ND-array
A 3D image of the porous material
Returns
-------
image : ND-array
A 2D array containing the views. This single image can be viewed using
``matplotlib.pyplot.imshow``. | [
"r",
"Create",
"a",
"quick",
"montage",
"showing",
"a",
"3D",
"image",
"in",
"all",
"three",
"directions"
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/visualization/__views__.py#L6-L46 | train | 213,390 |
PMEAL/porespy | porespy/visualization/__views__.py | sem | def sem(im, direction='X'):
r"""
Simulates an SEM photograph looking into the porous material in the
specified direction. Features are colored according to their depth into
the image, so darker features are further away.
Parameters
----------
im : array_like
ND-image of the porous material with the solid phase marked as 1 or
True
direction : string
Specify the axis along which the camera will point. Options are
'X', 'Y', and 'Z'.
Returns
-------
image : 2D-array
A 2D greyscale image suitable for use in matplotlib\'s ```imshow```
function.
"""
im = sp.array(~im, dtype=int)
if direction in ['Y', 'y']:
im = sp.transpose(im, axes=[1, 0, 2])
if direction in ['Z', 'z']:
im = sp.transpose(im, axes=[2, 1, 0])
t = im.shape[0]
depth = sp.reshape(sp.arange(0, t), [t, 1, 1])
im = im*depth
im = sp.amax(im, axis=0)
return im | python | def sem(im, direction='X'):
r"""
Simulates an SEM photograph looking into the porous material in the
specified direction. Features are colored according to their depth into
the image, so darker features are further away.
Parameters
----------
im : array_like
ND-image of the porous material with the solid phase marked as 1 or
True
direction : string
Specify the axis along which the camera will point. Options are
'X', 'Y', and 'Z'.
Returns
-------
image : 2D-array
A 2D greyscale image suitable for use in matplotlib\'s ```imshow```
function.
"""
im = sp.array(~im, dtype=int)
if direction in ['Y', 'y']:
im = sp.transpose(im, axes=[1, 0, 2])
if direction in ['Z', 'z']:
im = sp.transpose(im, axes=[2, 1, 0])
t = im.shape[0]
depth = sp.reshape(sp.arange(0, t), [t, 1, 1])
im = im*depth
im = sp.amax(im, axis=0)
return im | [
"def",
"sem",
"(",
"im",
",",
"direction",
"=",
"'X'",
")",
":",
"im",
"=",
"sp",
".",
"array",
"(",
"~",
"im",
",",
"dtype",
"=",
"int",
")",
"if",
"direction",
"in",
"[",
"'Y'",
",",
"'y'",
"]",
":",
"im",
"=",
"sp",
".",
"transpose",
"(",
... | r"""
Simulates an SEM photograph looking into the porous material in the
specified direction. Features are colored according to their depth into
the image, so darker features are further away.
Parameters
----------
im : array_like
ND-image of the porous material with the solid phase marked as 1 or
True
direction : string
Specify the axis along which the camera will point. Options are
'X', 'Y', and 'Z'.
Returns
-------
image : 2D-array
A 2D greyscale image suitable for use in matplotlib\'s ```imshow```
function. | [
"r",
"Simulates",
"an",
"SEM",
"photograph",
"looking",
"into",
"the",
"porous",
"material",
"in",
"the",
"specified",
"direction",
".",
"Features",
"are",
"colored",
"according",
"to",
"their",
"depth",
"into",
"the",
"image",
"so",
"darker",
"features",
"are... | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/visualization/__views__.py#L49-L80 | train | 213,391 |
PMEAL/porespy | porespy/visualization/__views__.py | xray | def xray(im, direction='X'):
r"""
Simulates an X-ray radiograph looking through the porouls material in the
specfied direction. The resulting image is colored according to the amount
of attenuation an X-ray would experience, so regions with more solid will
appear darker.
Parameters
----------
im : array_like
ND-image of the porous material with the solid phase marked as 1 or
True
direction : string
Specify the axis along which the camera will point. Options are
'X', 'Y', and 'Z'.
Returns
-------
image : 2D-array
A 2D greyscale image suitable for use in matplotlib\'s ```imshow```
function.
"""
im = sp.array(~im, dtype=int)
if direction in ['Y', 'y']:
im = sp.transpose(im, axes=[1, 0, 2])
if direction in ['Z', 'z']:
im = sp.transpose(im, axes=[2, 1, 0])
im = sp.sum(im, axis=0)
return im | python | def xray(im, direction='X'):
r"""
Simulates an X-ray radiograph looking through the porouls material in the
specfied direction. The resulting image is colored according to the amount
of attenuation an X-ray would experience, so regions with more solid will
appear darker.
Parameters
----------
im : array_like
ND-image of the porous material with the solid phase marked as 1 or
True
direction : string
Specify the axis along which the camera will point. Options are
'X', 'Y', and 'Z'.
Returns
-------
image : 2D-array
A 2D greyscale image suitable for use in matplotlib\'s ```imshow```
function.
"""
im = sp.array(~im, dtype=int)
if direction in ['Y', 'y']:
im = sp.transpose(im, axes=[1, 0, 2])
if direction in ['Z', 'z']:
im = sp.transpose(im, axes=[2, 1, 0])
im = sp.sum(im, axis=0)
return im | [
"def",
"xray",
"(",
"im",
",",
"direction",
"=",
"'X'",
")",
":",
"im",
"=",
"sp",
".",
"array",
"(",
"~",
"im",
",",
"dtype",
"=",
"int",
")",
"if",
"direction",
"in",
"[",
"'Y'",
",",
"'y'",
"]",
":",
"im",
"=",
"sp",
".",
"transpose",
"(",... | r"""
Simulates an X-ray radiograph looking through the porouls material in the
specfied direction. The resulting image is colored according to the amount
of attenuation an X-ray would experience, so regions with more solid will
appear darker.
Parameters
----------
im : array_like
ND-image of the porous material with the solid phase marked as 1 or
True
direction : string
Specify the axis along which the camera will point. Options are
'X', 'Y', and 'Z'.
Returns
-------
image : 2D-array
A 2D greyscale image suitable for use in matplotlib\'s ```imshow```
function. | [
"r",
"Simulates",
"an",
"X",
"-",
"ray",
"radiograph",
"looking",
"through",
"the",
"porouls",
"material",
"in",
"the",
"specfied",
"direction",
".",
"The",
"resulting",
"image",
"is",
"colored",
"according",
"to",
"the",
"amount",
"of",
"attenuation",
"an",
... | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/visualization/__views__.py#L83-L112 | train | 213,392 |
PMEAL/porespy | porespy/metrics/__regionprops__.py | props_to_DataFrame | def props_to_DataFrame(regionprops):
r"""
Returns a Pandas DataFrame containing all the scalar metrics for each
region, such as volume, sphericity, and so on, calculated by
``regionprops_3D``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by ``regionprops_3D``. Because ``regionprops_3D`` returns data in
the same ``list`` format as the ``regionprops`` function in **Skimage**
you can pass in either.
Returns
-------
DataFrame : Pandas DataFrame
A Pandas DataFrame with each region corresponding to a row and each
column corresponding to a key metric. All the values for a given
property (e.g. 'sphericity') can be obtained as
``val = df['sphericity']``. Conversely, all the key metrics for a
given region can be found with ``df.iloc[1]``.
See Also
--------
props_to_image
regionprops_3d
"""
# Parse the regionprops list and pull out all props with scalar values
metrics = []
reg = regionprops[0]
for item in reg.__dir__():
if not item.startswith('_'):
try:
if sp.shape(getattr(reg, item)) == ():
metrics.append(item)
except (TypeError, NotImplementedError, AttributeError):
pass
# Create a dictionary of all metrics that are simple scalar propertie
d = {}
for k in metrics:
try:
d[k] = sp.array([r[k] for r in regionprops])
except ValueError:
print('Error encountered evaluating ' + k + ' so skipping it')
# Create pandas data frame an return
df = DataFrame(d)
return df | python | def props_to_DataFrame(regionprops):
r"""
Returns a Pandas DataFrame containing all the scalar metrics for each
region, such as volume, sphericity, and so on, calculated by
``regionprops_3D``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by ``regionprops_3D``. Because ``regionprops_3D`` returns data in
the same ``list`` format as the ``regionprops`` function in **Skimage**
you can pass in either.
Returns
-------
DataFrame : Pandas DataFrame
A Pandas DataFrame with each region corresponding to a row and each
column corresponding to a key metric. All the values for a given
property (e.g. 'sphericity') can be obtained as
``val = df['sphericity']``. Conversely, all the key metrics for a
given region can be found with ``df.iloc[1]``.
See Also
--------
props_to_image
regionprops_3d
"""
# Parse the regionprops list and pull out all props with scalar values
metrics = []
reg = regionprops[0]
for item in reg.__dir__():
if not item.startswith('_'):
try:
if sp.shape(getattr(reg, item)) == ():
metrics.append(item)
except (TypeError, NotImplementedError, AttributeError):
pass
# Create a dictionary of all metrics that are simple scalar propertie
d = {}
for k in metrics:
try:
d[k] = sp.array([r[k] for r in regionprops])
except ValueError:
print('Error encountered evaluating ' + k + ' so skipping it')
# Create pandas data frame an return
df = DataFrame(d)
return df | [
"def",
"props_to_DataFrame",
"(",
"regionprops",
")",
":",
"# Parse the regionprops list and pull out all props with scalar values",
"metrics",
"=",
"[",
"]",
"reg",
"=",
"regionprops",
"[",
"0",
"]",
"for",
"item",
"in",
"reg",
".",
"__dir__",
"(",
")",
":",
"if"... | r"""
Returns a Pandas DataFrame containing all the scalar metrics for each
region, such as volume, sphericity, and so on, calculated by
``regionprops_3D``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by ``regionprops_3D``. Because ``regionprops_3D`` returns data in
the same ``list`` format as the ``regionprops`` function in **Skimage**
you can pass in either.
Returns
-------
DataFrame : Pandas DataFrame
A Pandas DataFrame with each region corresponding to a row and each
column corresponding to a key metric. All the values for a given
property (e.g. 'sphericity') can be obtained as
``val = df['sphericity']``. Conversely, all the key metrics for a
given region can be found with ``df.iloc[1]``.
See Also
--------
props_to_image
regionprops_3d | [
"r",
"Returns",
"a",
"Pandas",
"DataFrame",
"containing",
"all",
"the",
"scalar",
"metrics",
"for",
"each",
"region",
"such",
"as",
"volume",
"sphericity",
"and",
"so",
"on",
"calculated",
"by",
"regionprops_3D",
"."
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__regionprops__.py#L11-L58 | train | 213,393 |
PMEAL/porespy | porespy/metrics/__regionprops__.py | props_to_image | def props_to_image(regionprops, shape, prop):
r"""
Creates an image with each region colored according the specified ``prop``,
as obtained by ``regionprops_3d``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by PoreSpy's ``regionprops_3D`` or Skimage's ``regionsprops``.
shape : array_like
The shape of the original image for which ``regionprops`` was obtained.
prop : string
The region property of interest. Can be a scalar item such as 'volume'
in which case the the regions will be colored by their respective
volumes, or can be an image-type property such as 'border' or
'convex_image', which will return an image composed of the sub-images.
Returns
-------
image : ND-array
An ND-image the same size as the original image, with each region
represented by the values specified in ``prop``.
See Also
--------
props_to_DataFrame
regionprops_3d
"""
im = sp.zeros(shape=shape)
for r in regionprops:
if prop == 'convex':
mask = r.convex_image
else:
mask = r.image
temp = mask * r[prop]
s = bbox_to_slices(r.bbox)
im[s] += temp
return im | python | def props_to_image(regionprops, shape, prop):
r"""
Creates an image with each region colored according the specified ``prop``,
as obtained by ``regionprops_3d``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by PoreSpy's ``regionprops_3D`` or Skimage's ``regionsprops``.
shape : array_like
The shape of the original image for which ``regionprops`` was obtained.
prop : string
The region property of interest. Can be a scalar item such as 'volume'
in which case the the regions will be colored by their respective
volumes, or can be an image-type property such as 'border' or
'convex_image', which will return an image composed of the sub-images.
Returns
-------
image : ND-array
An ND-image the same size as the original image, with each region
represented by the values specified in ``prop``.
See Also
--------
props_to_DataFrame
regionprops_3d
"""
im = sp.zeros(shape=shape)
for r in regionprops:
if prop == 'convex':
mask = r.convex_image
else:
mask = r.image
temp = mask * r[prop]
s = bbox_to_slices(r.bbox)
im[s] += temp
return im | [
"def",
"props_to_image",
"(",
"regionprops",
",",
"shape",
",",
"prop",
")",
":",
"im",
"=",
"sp",
".",
"zeros",
"(",
"shape",
"=",
"shape",
")",
"for",
"r",
"in",
"regionprops",
":",
"if",
"prop",
"==",
"'convex'",
":",
"mask",
"=",
"r",
".",
"con... | r"""
Creates an image with each region colored according the specified ``prop``,
as obtained by ``regionprops_3d``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by PoreSpy's ``regionprops_3D`` or Skimage's ``regionsprops``.
shape : array_like
The shape of the original image for which ``regionprops`` was obtained.
prop : string
The region property of interest. Can be a scalar item such as 'volume'
in which case the the regions will be colored by their respective
volumes, or can be an image-type property such as 'border' or
'convex_image', which will return an image composed of the sub-images.
Returns
-------
image : ND-array
An ND-image the same size as the original image, with each region
represented by the values specified in ``prop``.
See Also
--------
props_to_DataFrame
regionprops_3d | [
"r",
"Creates",
"an",
"image",
"with",
"each",
"region",
"colored",
"according",
"the",
"specified",
"prop",
"as",
"obtained",
"by",
"regionprops_3d",
"."
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__regionprops__.py#L61-L102 | train | 213,394 |
PMEAL/porespy | porespy/metrics/__regionprops__.py | regionprops_3D | def regionprops_3D(im):
r"""
Calculates various metrics for each labeled region in a 3D image.
The ``regionsprops`` method in **skimage** is very thorough for 2D images,
but is a bit limited when it comes to 3D images, so this function aims
to fill this gap.
Parameters
----------
im : array_like
An imaging containing at least one labeled region. If a boolean image
is received than the ``True`` voxels are treated as a single region
labeled ``1``. Regions labeled 0 are ignored in all cases.
Returns
-------
props : list
An augmented version of the list returned by skimage's ``regionprops``.
Information, such as ``volume``, can be found for region A using the
following syntax: ``result[A-1].volume``.
The returned list contains all the metrics normally returned by
**skimage.measure.regionprops** plus the following:
'slice': Slice indices into the image that can be used to extract the
region
'volume': Volume of the region in number of voxels.
'bbox_volume': Volume of the bounding box that contains the region.
'border': The edges of the region, found as the locations where
the distance transform is 1.
'inscribed_sphere': An image containing the largest sphere can can
fit entirely inside the region.
'surface_mesh_vertices': Obtained by applying the marching cubes
algorithm on the region, AFTER first blurring the voxel image. This
allows marching cubes more freedom to fit the surface contours. See
also ``surface_mesh_simplices``
'surface_mesh_simplices': This accompanies ``surface_mesh_vertices``
and together they can be used to define the region as a mesh.
'surface_area': Calculated using the mesh obtained as described above,
using the ``porespy.metrics.mesh_surface_area`` method.
'sphericity': Defined as the ratio of the area of a sphere with the
same volume as the region to the actual surface area of the region.
'skeleton': The medial axis of the region obtained using the
``skeletonize_3D`` method from **skimage**.
'convex_volume': Same as convex_area, but translated to a more
meaningful name.
See Also
--------
snow_partitioning
Notes
-----
This function may seem slow compared to the skimage version, but that is
because they defer calculation of certain properties until they are
accessed, while this one evalulates everything (inlcuding the deferred
properties from skimage's ``regionprops``)
Regions can be identified using a watershed algorithm, which can be a bit
tricky to obtain desired results. *PoreSpy* includes the SNOW algorithm,
which may be helpful.
"""
print('_'*60)
print('Calculating regionprops')
results = regionprops(im, coordinates='xy')
for i in tqdm(range(len(results))):
mask = results[i].image
mask_padded = sp.pad(mask, pad_width=1, mode='constant')
temp = spim.distance_transform_edt(mask_padded)
dt = extract_subsection(temp, shape=mask.shape)
# ---------------------------------------------------------------------
# Slice indices
results[i].slice = results[i]._slice
# ---------------------------------------------------------------------
# Volume of regions in voxels
results[i].volume = results[i].area
# ---------------------------------------------------------------------
# Volume of bounding box, in voxels
results[i].bbox_volume = sp.prod(mask.shape)
# ---------------------------------------------------------------------
# Create an image of the border
results[i].border = dt == 1
# ---------------------------------------------------------------------
# Create an image of the maximal inscribed sphere
r = dt.max()
inv_dt = spim.distance_transform_edt(dt < r)
results[i].inscribed_sphere = inv_dt < r
# ---------------------------------------------------------------------
# Find surface area using marching cubes and analyze the mesh
tmp = sp.pad(sp.atleast_3d(mask), pad_width=1, mode='constant')
tmp = spim.convolve(tmp, weights=ball(1))/5
verts, faces, norms, vals = marching_cubes_lewiner(volume=tmp, level=0)
results[i].surface_mesh_vertices = verts
results[i].surface_mesh_simplices = faces
area = mesh_surface_area(verts, faces)
results[i].surface_area = area
# ---------------------------------------------------------------------
# Find sphericity
vol = results[i].volume
r = (3/4/sp.pi*vol)**(1/3)
a_equiv = 4*sp.pi*(r)**2
a_region = results[i].surface_area
results[i].sphericity = a_equiv/a_region
# ---------------------------------------------------------------------
# Find skeleton of region
results[i].skeleton = skeletonize_3d(mask)
# ---------------------------------------------------------------------
# Volume of convex image, equal to area in 2D, so just translating
results[i].convex_volume = results[i].convex_area
return results | python | def regionprops_3D(im):
r"""
Calculates various metrics for each labeled region in a 3D image.
The ``regionsprops`` method in **skimage** is very thorough for 2D images,
but is a bit limited when it comes to 3D images, so this function aims
to fill this gap.
Parameters
----------
im : array_like
An imaging containing at least one labeled region. If a boolean image
is received than the ``True`` voxels are treated as a single region
labeled ``1``. Regions labeled 0 are ignored in all cases.
Returns
-------
props : list
An augmented version of the list returned by skimage's ``regionprops``.
Information, such as ``volume``, can be found for region A using the
following syntax: ``result[A-1].volume``.
The returned list contains all the metrics normally returned by
**skimage.measure.regionprops** plus the following:
'slice': Slice indices into the image that can be used to extract the
region
'volume': Volume of the region in number of voxels.
'bbox_volume': Volume of the bounding box that contains the region.
'border': The edges of the region, found as the locations where
the distance transform is 1.
'inscribed_sphere': An image containing the largest sphere can can
fit entirely inside the region.
'surface_mesh_vertices': Obtained by applying the marching cubes
algorithm on the region, AFTER first blurring the voxel image. This
allows marching cubes more freedom to fit the surface contours. See
also ``surface_mesh_simplices``
'surface_mesh_simplices': This accompanies ``surface_mesh_vertices``
and together they can be used to define the region as a mesh.
'surface_area': Calculated using the mesh obtained as described above,
using the ``porespy.metrics.mesh_surface_area`` method.
'sphericity': Defined as the ratio of the area of a sphere with the
same volume as the region to the actual surface area of the region.
'skeleton': The medial axis of the region obtained using the
``skeletonize_3D`` method from **skimage**.
'convex_volume': Same as convex_area, but translated to a more
meaningful name.
See Also
--------
snow_partitioning
Notes
-----
This function may seem slow compared to the skimage version, but that is
because they defer calculation of certain properties until they are
accessed, while this one evalulates everything (inlcuding the deferred
properties from skimage's ``regionprops``)
Regions can be identified using a watershed algorithm, which can be a bit
tricky to obtain desired results. *PoreSpy* includes the SNOW algorithm,
which may be helpful.
"""
print('_'*60)
print('Calculating regionprops')
results = regionprops(im, coordinates='xy')
for i in tqdm(range(len(results))):
mask = results[i].image
mask_padded = sp.pad(mask, pad_width=1, mode='constant')
temp = spim.distance_transform_edt(mask_padded)
dt = extract_subsection(temp, shape=mask.shape)
# ---------------------------------------------------------------------
# Slice indices
results[i].slice = results[i]._slice
# ---------------------------------------------------------------------
# Volume of regions in voxels
results[i].volume = results[i].area
# ---------------------------------------------------------------------
# Volume of bounding box, in voxels
results[i].bbox_volume = sp.prod(mask.shape)
# ---------------------------------------------------------------------
# Create an image of the border
results[i].border = dt == 1
# ---------------------------------------------------------------------
# Create an image of the maximal inscribed sphere
r = dt.max()
inv_dt = spim.distance_transform_edt(dt < r)
results[i].inscribed_sphere = inv_dt < r
# ---------------------------------------------------------------------
# Find surface area using marching cubes and analyze the mesh
tmp = sp.pad(sp.atleast_3d(mask), pad_width=1, mode='constant')
tmp = spim.convolve(tmp, weights=ball(1))/5
verts, faces, norms, vals = marching_cubes_lewiner(volume=tmp, level=0)
results[i].surface_mesh_vertices = verts
results[i].surface_mesh_simplices = faces
area = mesh_surface_area(verts, faces)
results[i].surface_area = area
# ---------------------------------------------------------------------
# Find sphericity
vol = results[i].volume
r = (3/4/sp.pi*vol)**(1/3)
a_equiv = 4*sp.pi*(r)**2
a_region = results[i].surface_area
results[i].sphericity = a_equiv/a_region
# ---------------------------------------------------------------------
# Find skeleton of region
results[i].skeleton = skeletonize_3d(mask)
# ---------------------------------------------------------------------
# Volume of convex image, equal to area in 2D, so just translating
results[i].convex_volume = results[i].convex_area
return results | [
"def",
"regionprops_3D",
"(",
"im",
")",
":",
"print",
"(",
"'_'",
"*",
"60",
")",
"print",
"(",
"'Calculating regionprops'",
")",
"results",
"=",
"regionprops",
"(",
"im",
",",
"coordinates",
"=",
"'xy'",
")",
"for",
"i",
"in",
"tqdm",
"(",
"range",
"... | r"""
Calculates various metrics for each labeled region in a 3D image.
The ``regionsprops`` method in **skimage** is very thorough for 2D images,
but is a bit limited when it comes to 3D images, so this function aims
to fill this gap.
Parameters
----------
im : array_like
An imaging containing at least one labeled region. If a boolean image
is received than the ``True`` voxels are treated as a single region
labeled ``1``. Regions labeled 0 are ignored in all cases.
Returns
-------
props : list
An augmented version of the list returned by skimage's ``regionprops``.
Information, such as ``volume``, can be found for region A using the
following syntax: ``result[A-1].volume``.
The returned list contains all the metrics normally returned by
**skimage.measure.regionprops** plus the following:
'slice': Slice indices into the image that can be used to extract the
region
'volume': Volume of the region in number of voxels.
'bbox_volume': Volume of the bounding box that contains the region.
'border': The edges of the region, found as the locations where
the distance transform is 1.
'inscribed_sphere': An image containing the largest sphere can can
fit entirely inside the region.
'surface_mesh_vertices': Obtained by applying the marching cubes
algorithm on the region, AFTER first blurring the voxel image. This
allows marching cubes more freedom to fit the surface contours. See
also ``surface_mesh_simplices``
'surface_mesh_simplices': This accompanies ``surface_mesh_vertices``
and together they can be used to define the region as a mesh.
'surface_area': Calculated using the mesh obtained as described above,
using the ``porespy.metrics.mesh_surface_area`` method.
'sphericity': Defined as the ratio of the area of a sphere with the
same volume as the region to the actual surface area of the region.
'skeleton': The medial axis of the region obtained using the
``skeletonize_3D`` method from **skimage**.
'convex_volume': Same as convex_area, but translated to a more
meaningful name.
See Also
--------
snow_partitioning
Notes
-----
This function may seem slow compared to the skimage version, but that is
because they defer calculation of certain properties until they are
accessed, while this one evalulates everything (inlcuding the deferred
properties from skimage's ``regionprops``)
Regions can be identified using a watershed algorithm, which can be a bit
tricky to obtain desired results. *PoreSpy* includes the SNOW algorithm,
which may be helpful. | [
"r",
"Calculates",
"various",
"metrics",
"for",
"each",
"labeled",
"region",
"in",
"a",
"3D",
"image",
"."
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__regionprops__.py#L105-L228 | train | 213,395 |
PMEAL/porespy | porespy/networks/__snow_n__.py | snow_n | def snow_n(im,
voxel_size=1,
boundary_faces=['top', 'bottom', 'left', 'right', 'front', 'back'],
marching_cubes_area=False,
alias=None):
r"""
Analyzes an image that has been segemented into N phases and extracts all
a network for each of the N phases, including geometerical information as
well as network connectivity between each phase.
Parameters
----------
im : ND-array
Image of porous material where each phase is represented by unique
integer. Phase integer should start from 1 (0 is ignored)
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1, which is useful when overlaying the PNM on the original
image since the scale of the image is always 1 unit lenth per voxel.
boundary_faces : list of strings
Boundary faces labels are provided to assign hypothetical boundary
nodes having zero resistance to transport process. For cubical
geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,
‘front’ and ‘back’ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
marching_cubes_area : bool
If ``True`` then the surface area and interfacial area between regions
will be calculated using the marching cube algorithm. This is a more
accurate representation of area in extracted network, but is quite
slow, so it is ``False`` by default. The default method simply counts
voxels so does not correctly account for the voxelated nature of the
images.
alias : dict (Optional)
A dictionary that assigns unique image label to specific phases. For
example {1: 'Solid'} will show all structural properties associated
with label 1 as Solid phase properties. If ``None`` then default
labelling will be used i.e {1: 'Phase1',..}.
Returns
-------
A dictionary containing all N phases size data, as well as the
network topological information. The dictionary names use the OpenPNM
convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
directly to an OpenPNM network object using the ``update`` command.
"""
# -------------------------------------------------------------------------
# Get alias if provided by user
al = _create_alias_map(im, alias=alias)
# -------------------------------------------------------------------------
# Perform snow on each phase and merge all segmentation and dt together
snow = snow_partitioning_n(im, r_max=4, sigma=0.4, return_all=True,
mask=True, randomize=False, alias=al)
# -------------------------------------------------------------------------
# Add boundary regions
f = boundary_faces
regions = add_boundary_regions(regions=snow.regions, faces=f)
# -------------------------------------------------------------------------
# Padding distance transform to extract geometrical properties
dt = pad_faces(im=snow.dt, faces=f)
# -------------------------------------------------------------------------
# For only one phase extraction with boundary regions
phases_num = sp.unique(im).astype(int)
phases_num = sp.trim_zeros(phases_num)
if len(phases_num) == 1:
if f is not None:
snow.im = pad_faces(im=snow.im, faces=f)
regions = regions * (snow.im.astype(bool))
regions = make_contiguous(regions)
# -------------------------------------------------------------------------
# Extract N phases sites and bond information from image
net = regions_to_network(im=regions, dt=dt, voxel_size=voxel_size)
# -------------------------------------------------------------------------
# Extract marching cube surface area and interfacial area of regions
if marching_cubes_area:
areas = region_surface_areas(regions=regions)
interface_area = region_interface_areas(regions=regions, areas=areas,
voxel_size=voxel_size)
net['pore.surface_area'] = areas * voxel_size ** 2
net['throat.area'] = interface_area.area
# -------------------------------------------------------------------------
# Find interconnection and interfacial area between ith and jth phases
net = add_phase_interconnections(net=net, snow_partitioning_n=snow,
marching_cubes_area=marching_cubes_area,
alias=al)
# -------------------------------------------------------------------------
# label boundary cells
net = label_boundary_cells(network=net, boundary_faces=f)
# -------------------------------------------------------------------------
temp = _net_dict(net)
temp.im = im.copy()
temp.dt = dt
temp.regions = regions
return temp | python | def snow_n(im,
voxel_size=1,
boundary_faces=['top', 'bottom', 'left', 'right', 'front', 'back'],
marching_cubes_area=False,
alias=None):
r"""
Analyzes an image that has been segemented into N phases and extracts all
a network for each of the N phases, including geometerical information as
well as network connectivity between each phase.
Parameters
----------
im : ND-array
Image of porous material where each phase is represented by unique
integer. Phase integer should start from 1 (0 is ignored)
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1, which is useful when overlaying the PNM on the original
image since the scale of the image is always 1 unit lenth per voxel.
boundary_faces : list of strings
Boundary faces labels are provided to assign hypothetical boundary
nodes having zero resistance to transport process. For cubical
geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,
‘front’ and ‘back’ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
marching_cubes_area : bool
If ``True`` then the surface area and interfacial area between regions
will be calculated using the marching cube algorithm. This is a more
accurate representation of area in extracted network, but is quite
slow, so it is ``False`` by default. The default method simply counts
voxels so does not correctly account for the voxelated nature of the
images.
alias : dict (Optional)
A dictionary that assigns unique image label to specific phases. For
example {1: 'Solid'} will show all structural properties associated
with label 1 as Solid phase properties. If ``None`` then default
labelling will be used i.e {1: 'Phase1',..}.
Returns
-------
A dictionary containing all N phases size data, as well as the
network topological information. The dictionary names use the OpenPNM
convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
directly to an OpenPNM network object using the ``update`` command.
"""
# -------------------------------------------------------------------------
# Get alias if provided by user
al = _create_alias_map(im, alias=alias)
# -------------------------------------------------------------------------
# Perform snow on each phase and merge all segmentation and dt together
snow = snow_partitioning_n(im, r_max=4, sigma=0.4, return_all=True,
mask=True, randomize=False, alias=al)
# -------------------------------------------------------------------------
# Add boundary regions
f = boundary_faces
regions = add_boundary_regions(regions=snow.regions, faces=f)
# -------------------------------------------------------------------------
# Padding distance transform to extract geometrical properties
dt = pad_faces(im=snow.dt, faces=f)
# -------------------------------------------------------------------------
# For only one phase extraction with boundary regions
phases_num = sp.unique(im).astype(int)
phases_num = sp.trim_zeros(phases_num)
if len(phases_num) == 1:
if f is not None:
snow.im = pad_faces(im=snow.im, faces=f)
regions = regions * (snow.im.astype(bool))
regions = make_contiguous(regions)
# -------------------------------------------------------------------------
# Extract N phases sites and bond information from image
net = regions_to_network(im=regions, dt=dt, voxel_size=voxel_size)
# -------------------------------------------------------------------------
# Extract marching cube surface area and interfacial area of regions
if marching_cubes_area:
areas = region_surface_areas(regions=regions)
interface_area = region_interface_areas(regions=regions, areas=areas,
voxel_size=voxel_size)
net['pore.surface_area'] = areas * voxel_size ** 2
net['throat.area'] = interface_area.area
# -------------------------------------------------------------------------
# Find interconnection and interfacial area between ith and jth phases
net = add_phase_interconnections(net=net, snow_partitioning_n=snow,
marching_cubes_area=marching_cubes_area,
alias=al)
# -------------------------------------------------------------------------
# label boundary cells
net = label_boundary_cells(network=net, boundary_faces=f)
# -------------------------------------------------------------------------
temp = _net_dict(net)
temp.im = im.copy()
temp.dt = dt
temp.regions = regions
return temp | [
"def",
"snow_n",
"(",
"im",
",",
"voxel_size",
"=",
"1",
",",
"boundary_faces",
"=",
"[",
"'top'",
",",
"'bottom'",
",",
"'left'",
",",
"'right'",
",",
"'front'",
",",
"'back'",
"]",
",",
"marching_cubes_area",
"=",
"False",
",",
"alias",
"=",
"None",
... | r"""
Analyzes an image that has been segemented into N phases and extracts all
a network for each of the N phases, including geometerical information as
well as network connectivity between each phase.
Parameters
----------
im : ND-array
Image of porous material where each phase is represented by unique
integer. Phase integer should start from 1 (0 is ignored)
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1, which is useful when overlaying the PNM on the original
image since the scale of the image is always 1 unit lenth per voxel.
boundary_faces : list of strings
Boundary faces labels are provided to assign hypothetical boundary
nodes having zero resistance to transport process. For cubical
geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,
‘front’ and ‘back’ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
marching_cubes_area : bool
If ``True`` then the surface area and interfacial area between regions
will be calculated using the marching cube algorithm. This is a more
accurate representation of area in extracted network, but is quite
slow, so it is ``False`` by default. The default method simply counts
voxels so does not correctly account for the voxelated nature of the
images.
alias : dict (Optional)
A dictionary that assigns unique image label to specific phases. For
example {1: 'Solid'} will show all structural properties associated
with label 1 as Solid phase properties. If ``None`` then default
labelling will be used i.e {1: 'Phase1',..}.
Returns
-------
A dictionary containing all N phases size data, as well as the
network topological information. The dictionary names use the OpenPNM
convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
directly to an OpenPNM network object using the ``update`` command. | [
"r",
"Analyzes",
"an",
"image",
"that",
"has",
"been",
"segemented",
"into",
"N",
"phases",
"and",
"extracts",
"all",
"a",
"network",
"for",
"each",
"of",
"the",
"N",
"phases",
"including",
"geometerical",
"information",
"as",
"well",
"as",
"network",
"conne... | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/networks/__snow_n__.py#L13-L112 | train | 213,396 |
PMEAL/porespy | porespy/io/__funcs__.py | dict_to_vtk | def dict_to_vtk(data, path='./dictvtk', voxel_size=1, origin=(0, 0, 0)):
r"""
Accepts multiple images as a dictionary and compiles them into a vtk file
Parameters
----------
data : dict
A dictionary of *key: value* pairs, where the *key* is the name of the
scalar property stored in each voxel of the array stored in the
corresponding *value*.
path : string
Path to output file
voxel_size : int
The side length of the voxels (voxels are cubic)
origin : float
data origin (according to selected voxel size)
Notes
-----
Outputs a vtk, vtp or vti file that can opened in ParaView
"""
vs = voxel_size
for entry in data:
if data[entry].dtype == bool:
data[entry] = data[entry].astype(np.int8)
if data[entry].flags['C_CONTIGUOUS']:
data[entry] = np.ascontiguousarray(data[entry])
imageToVTK(path, cellData=data, spacing=(vs, vs, vs), origin=origin) | python | def dict_to_vtk(data, path='./dictvtk', voxel_size=1, origin=(0, 0, 0)):
r"""
Accepts multiple images as a dictionary and compiles them into a vtk file
Parameters
----------
data : dict
A dictionary of *key: value* pairs, where the *key* is the name of the
scalar property stored in each voxel of the array stored in the
corresponding *value*.
path : string
Path to output file
voxel_size : int
The side length of the voxels (voxels are cubic)
origin : float
data origin (according to selected voxel size)
Notes
-----
Outputs a vtk, vtp or vti file that can opened in ParaView
"""
vs = voxel_size
for entry in data:
if data[entry].dtype == bool:
data[entry] = data[entry].astype(np.int8)
if data[entry].flags['C_CONTIGUOUS']:
data[entry] = np.ascontiguousarray(data[entry])
imageToVTK(path, cellData=data, spacing=(vs, vs, vs), origin=origin) | [
"def",
"dict_to_vtk",
"(",
"data",
",",
"path",
"=",
"'./dictvtk'",
",",
"voxel_size",
"=",
"1",
",",
"origin",
"=",
"(",
"0",
",",
"0",
",",
"0",
")",
")",
":",
"vs",
"=",
"voxel_size",
"for",
"entry",
"in",
"data",
":",
"if",
"data",
"[",
"entr... | r"""
Accepts multiple images as a dictionary and compiles them into a vtk file
Parameters
----------
data : dict
A dictionary of *key: value* pairs, where the *key* is the name of the
scalar property stored in each voxel of the array stored in the
corresponding *value*.
path : string
Path to output file
voxel_size : int
The side length of the voxels (voxels are cubic)
origin : float
data origin (according to selected voxel size)
Notes
-----
Outputs a vtk, vtp or vti file that can opened in ParaView | [
"r",
"Accepts",
"multiple",
"images",
"as",
"a",
"dictionary",
"and",
"compiles",
"them",
"into",
"a",
"vtk",
"file"
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/io/__funcs__.py#L8-L35 | train | 213,397 |
PMEAL/porespy | porespy/io/__funcs__.py | to_openpnm | def to_openpnm(net, filename):
r"""
Save the result of the `snow` network extraction function in a format
suitable for opening in OpenPNM.
Parameters
----------
net : dict
The dictionary object produced by the network extraction functions
filename : string or path object
The name and location to save the file, which will have `.net` file
extension.
"""
from openpnm.network import GenericNetwork
# Convert net dict to an openpnm Network
pn = GenericNetwork()
pn.update(net)
pn.project.save_project(filename)
ws = pn.project.workspace
ws.close_project(pn.project) | python | def to_openpnm(net, filename):
r"""
Save the result of the `snow` network extraction function in a format
suitable for opening in OpenPNM.
Parameters
----------
net : dict
The dictionary object produced by the network extraction functions
filename : string or path object
The name and location to save the file, which will have `.net` file
extension.
"""
from openpnm.network import GenericNetwork
# Convert net dict to an openpnm Network
pn = GenericNetwork()
pn.update(net)
pn.project.save_project(filename)
ws = pn.project.workspace
ws.close_project(pn.project) | [
"def",
"to_openpnm",
"(",
"net",
",",
"filename",
")",
":",
"from",
"openpnm",
".",
"network",
"import",
"GenericNetwork",
"# Convert net dict to an openpnm Network",
"pn",
"=",
"GenericNetwork",
"(",
")",
"pn",
".",
"update",
"(",
"net",
")",
"pn",
".",
"proj... | r"""
Save the result of the `snow` network extraction function in a format
suitable for opening in OpenPNM.
Parameters
----------
net : dict
The dictionary object produced by the network extraction functions
filename : string or path object
The name and location to save the file, which will have `.net` file
extension. | [
"r",
"Save",
"the",
"result",
"of",
"the",
"snow",
"network",
"extraction",
"function",
"in",
"a",
"format",
"suitable",
"for",
"opening",
"in",
"OpenPNM",
"."
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/io/__funcs__.py#L38-L59 | train | 213,398 |
PMEAL/porespy | porespy/io/__funcs__.py | to_vtk | def to_vtk(im, path='./voxvtk', divide=False, downsample=False, voxel_size=1,
vox=False):
r"""
Converts an array to a vtk file.
Parameters
----------
im : 3D image
The image of the porous material
path : string
Path to output file
divide : bool
vtk files can get very large, this option allows you for two output
files, divided at z = half. This allows for large data sets to be
imaged without loss of information
downsample : bool
very large images acan be downsampled to half the size in each
dimension, this doubles the effective voxel size
voxel_size : int
The side length of the voxels (voxels are cubic)
vox : bool
For an image that is binary (1's and 0's) this reduces the file size by
using int8 format (can also be used to reduce file size when accuracy
is not necessary ie: just visulization)
Notes
-----
Outputs a vtk, vtp or vti file that can opened in paraview
"""
if len(im.shape) == 2:
im = im[:, :, np.newaxis]
if im.dtype == bool:
vox = True
if vox:
im = im.astype(np.int8)
vs = voxel_size
if divide:
split = np.round(im.shape[2]/2).astype(np.int)
im1 = im[:, :, 0:split]
im2 = im[:, :, split:]
imageToVTK(path+'1', cellData={'im': np.ascontiguousarray(im1)},
spacing=(vs, vs, vs))
imageToVTK(path+'2', origin=(0.0, 0.0, split*vs),
cellData={'im': np.ascontiguousarray(im2)},
spacing=(vs, vs, vs))
elif downsample:
im = spim.interpolation.zoom(im, zoom=0.5, order=0, mode='reflect')
imageToVTK(path, cellData={'im': np.ascontiguousarray(im)},
spacing=(2*vs, 2*vs, 2*vs))
else:
imageToVTK(path, cellData={'im': np.ascontiguousarray(im)},
spacing=(vs, vs, vs)) | python | def to_vtk(im, path='./voxvtk', divide=False, downsample=False, voxel_size=1,
vox=False):
r"""
Converts an array to a vtk file.
Parameters
----------
im : 3D image
The image of the porous material
path : string
Path to output file
divide : bool
vtk files can get very large, this option allows you for two output
files, divided at z = half. This allows for large data sets to be
imaged without loss of information
downsample : bool
very large images acan be downsampled to half the size in each
dimension, this doubles the effective voxel size
voxel_size : int
The side length of the voxels (voxels are cubic)
vox : bool
For an image that is binary (1's and 0's) this reduces the file size by
using int8 format (can also be used to reduce file size when accuracy
is not necessary ie: just visulization)
Notes
-----
Outputs a vtk, vtp or vti file that can opened in paraview
"""
if len(im.shape) == 2:
im = im[:, :, np.newaxis]
if im.dtype == bool:
vox = True
if vox:
im = im.astype(np.int8)
vs = voxel_size
if divide:
split = np.round(im.shape[2]/2).astype(np.int)
im1 = im[:, :, 0:split]
im2 = im[:, :, split:]
imageToVTK(path+'1', cellData={'im': np.ascontiguousarray(im1)},
spacing=(vs, vs, vs))
imageToVTK(path+'2', origin=(0.0, 0.0, split*vs),
cellData={'im': np.ascontiguousarray(im2)},
spacing=(vs, vs, vs))
elif downsample:
im = spim.interpolation.zoom(im, zoom=0.5, order=0, mode='reflect')
imageToVTK(path, cellData={'im': np.ascontiguousarray(im)},
spacing=(2*vs, 2*vs, 2*vs))
else:
imageToVTK(path, cellData={'im': np.ascontiguousarray(im)},
spacing=(vs, vs, vs)) | [
"def",
"to_vtk",
"(",
"im",
",",
"path",
"=",
"'./voxvtk'",
",",
"divide",
"=",
"False",
",",
"downsample",
"=",
"False",
",",
"voxel_size",
"=",
"1",
",",
"vox",
"=",
"False",
")",
":",
"if",
"len",
"(",
"im",
".",
"shape",
")",
"==",
"2",
":",
... | r"""
Converts an array to a vtk file.
Parameters
----------
im : 3D image
The image of the porous material
path : string
Path to output file
divide : bool
vtk files can get very large, this option allows you for two output
files, divided at z = half. This allows for large data sets to be
imaged without loss of information
downsample : bool
very large images acan be downsampled to half the size in each
dimension, this doubles the effective voxel size
voxel_size : int
The side length of the voxels (voxels are cubic)
vox : bool
For an image that is binary (1's and 0's) this reduces the file size by
using int8 format (can also be used to reduce file size when accuracy
is not necessary ie: just visulization)
Notes
-----
Outputs a vtk, vtp or vti file that can opened in paraview | [
"r",
"Converts",
"an",
"array",
"to",
"a",
"vtk",
"file",
"."
] | 1e13875b56787d8f5b7ffdabce8c4342c33ba9f8 | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/io/__funcs__.py#L62-L113 | train | 213,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.