docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Calculate the stats from the given numpy functions
Parameters:
data: array of data points to be used for the stats
Options:
functions: tuple of numpy stat functions to apply on data
Returns:
Dictionary with the name of the function as key and the result
as the respective value
|
def scalar_stats(data, functions=('min', 'max', 'mean', 'std')):
stats = {}
for func in functions:
stats[func] = getattr(np, func)(data)
return stats
| 427,187
|
Extract a summary statistic from an array of list of values
Parameters:
values: numpy array of values
mode: summary stat to extract. One of ['min', 'max', 'median', 'mean', 'std', 'raw']
Note: fails silently if values is empty, and None is returned
|
def eval_stats(values, mode):
if mode == 'raw':
return values.tolist()
if mode == 'total':
mode = 'sum'
try:
return getattr(np, mode)(values, axis=0)
except ValueError:
pass
return None
| 427,202
|
Construct a neuron population
Arguments:
neurons: iterable of neuron objects.
name: Optional name for this Population.
|
def __init__(self, neurons, name='Population'):
self.neurons = tuple(neurons)
self.somata = tuple(neu.soma for neu in neurons)
self.neurites = tuple(chain.from_iterable(neu.neurites for neu in neurons))
self.name = name
| 427,223
|
add a section
Args:
id_(int): identifying number of the section
parent_id(int): identifying number of the parent of this section
section_type(int): the section type as defined by POINT_TYPE
points is an array of [X, Y, Z, R]
|
def add_section(self, id_, parent_id, section_type, points):
# L.debug('Adding section %d, with parent %d, of type: %d with count: %d',
# id_, parent_id, section_type, len(points))
assert id_ not in self.sections, 'id %s already exists in sections' % id_
self.sections[id_] = BlockNeuronBuilder.BlockSection(parent_id, section_type, points)
| 427,234
|
Flatten a subsection from its nested version
Args:
subsection: Nested subsection as produced by _parse_section, except one level in
_type: type of section, ie: AXON, etc
parent: first element has this as it's parent
offset: position in the final array of the first element
Returns:
Generator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID]
|
def _flatten_subsection(subsection, _type, offset, parent):
for row in subsection:
# TODO: Figure out what these correspond to in neurolucida
if row in ('Low', 'Generated', 'High', ):
continue
elif isinstance(row[0], StringType):
if len(row) in (4, 5, ):
if len(row) == 5:
assert row[4][0] == 'S', \
'Only known usage of a fifth member is Sn, found: %s' % row[4][0]
yield (float(row[0]), float(row[1]), float(row[2]), float(row[3]) / 2.,
_type, offset, parent)
parent = offset
offset += 1
elif isinstance(row[0], list):
split_parent = offset - 1
start_offset = 0
slices = []
start = 0
for i, value in enumerate(row):
if value == '|':
slices.append(slice(start + start_offset, i))
start = i + 1
slices.append(slice(start + start_offset, len(row)))
for split_slice in slices:
for _row in _flatten_subsection(row[split_slice], _type, offset,
split_parent):
offset += 1
yield _row
| 427,263
|
calculate crossings of neurites
Args:
nrn(morph): morphology on which to perform Sholl analysis
radii(iterable of floats): radii for which crossings will be counted
Returns:
Array of same length as radii, with a count of the number of crossings
for the respective radius
|
def sholl_crossings(neurites, center, radii):
def _count_crossings(neurite, radius):
r2 = radius ** 2
count = 0
for start, end in iter_segments(neurite):
start_dist2, end_dist2 = (morphmath.point_dist2(center, start),
morphmath.point_dist2(center, end))
count += int(start_dist2 <= r2 <= end_dist2 or
end_dist2 <= r2 <= start_dist2)
return count
return np.array([sum(_count_crossings(neurite, r)
for neurite in iter_neurites(neurites))
for r in radii])
| 427,278
|
Check if neurite tree is monotonic
If each child has smaller or equal diameters from its parent
Args:
neurite(Neurite): neurite to operate on
tol(float): tolerance
Returns:
True if neurite monotonic
|
def is_monotonic(neurite, tol):
for node in neurite.iter_sections():
# check that points in section satisfy monotonicity
sec = node.points
for point_id in range(len(sec) - 1):
if sec[point_id + 1][COLS.R] > sec[point_id][COLS.R] + tol:
return False
# Check that section boundary points satisfy monotonicity
if(node.parent is not None and
sec[0][COLS.R] > node.parent.points[-1][COLS.R] + tol):
return False
return True
| 427,289
|
Check if neurite is flat using the given method
Args:
neurite(Neurite): neurite to operate on
tol(float): tolerance
method(string): the method of flatness estimation:
'tolerance' returns true if any extent of the tree is smaller
than the given tolerance
'ratio' returns true if the ratio of the smallest directions
is smaller than tol. e.g. [1,2,3] -> 1/2 < tol
Returns:
True if neurite is flat
|
def is_flat(neurite, tol, method='tolerance'):
ext = principal_direction_extent(neurite.points[:, COLS.XYZ])
assert method in ('tolerance', 'ratio'), "Method must be one of 'tolerance', 'ratio'"
if method == 'ratio':
sorted_ext = np.sort(ext)
return sorted_ext[0] / sorted_ext[1] < float(tol)
return any(ext < float(tol))
| 427,290
|
Check if a neuron has neurites that are flat within a tolerance
Args:
neurite(Neurite): neurite to operate on
tol(float): the tolerance or the ratio
method(string): 'tolerance' or 'ratio' described in :meth:`is_flat`
Returns:
Bool list corresponding to the flatness check for each neurite
in neuron neurites with respect to the given criteria
|
def get_flat_neurites(neuron, tol=0.1, method='ratio'):
return [n for n in neuron.neurites if is_flat(n, tol, method)]
| 427,292
|
Get neurites that are not monotonic
Args:
neurite(Neurite): neurite to operate on
tol(float): the tolerance or the ratio
Returns:
list of neurites that do not satisfy monotonicity test
|
def get_nonmonotonic_neurites(neuron, tol=1e-6):
return [n for n in neuron.neurites if not is_monotonic(n, tol)]
| 427,293
|
Handles the saving/updating of a Publishable instance.
Arguments:
revision - if True, a new version of this Publishable will be created.
|
def save(self, revision=True, *args, **kwargs):
if revision:
# If this is a revision, set it to be the head of the list and increment the revision id
self.head = True
self.revision_id += 1
previous_revision = self.get_previous_revision()
if not self.is_parent():
# If this is a revision, delete the old head of the list.
type(self).objects \
.filter(parent=self.parent, head=True) \
.update(head=None)
# Clear the instance id to force Django to save a new instance.
# Both fields (pk, id) required for this to work -- something to do with model inheritance
self.pk = None
self.id = None
# New version is unpublished by default
self.is_published = None
# Set created_at to current time, but only for first version
if not self.created_at:
self.created_at = timezone.now()
self.updated_at = timezone.now()
if revision:
self.updated_at = timezone.now()
super(Publishable, self).save(*args, **kwargs)
# Update the parent foreign key
if not self.parent:
self.parent = self
super(Publishable, self).save(update_fields=['parent'])
if revision:
# Set latest version for all articles
type(self).objects \
.filter(parent=self.parent) \
.update(latest_version=self.revision_id)
self.latest_version = self.revision_id
return self
| 427,513
|
Add a comment to the database.
Args:
comment (hotdoc.core.Comment): comment to add
|
def add_comment(self, comment):
if not comment:
return
self.__comments[comment.name] = comment
self.comment_added_signal(self, comment)
| 427,692
|
Constructor for `Extension`.
Args:
dependency_name: str, see `ExtDependency.dependency_name`
is_upstream: bool, see `ExtDependency.is_upstream`
|
def __init__(self, dependency_name, is_upstream=False, optional=False):
self.dependency_name = dependency_name
self.is_upstream = is_upstream
self.optional = optional
| 427,715
|
Constructor for `Extension`.
This should never get called directly.
Args:
project: The `project.Project` instance which documentation
is being generated.
|
def __init__(self, app, project):
self.project = project
self.app = app
self.sources = set()
self.smart_sources = []
self.index = None
self.source_roots = OrderedSet()
self._created_symbols = DefaultOrderedDict(OrderedSet)
self.__package_root = None
self.__toplevel_comments = OrderedSet()
self.formatter = self._make_formatter()
| 427,716
|
Shortcut function for `utils.loggable.debug`
Args:
message: see `utils.loggable.debug`
domain: see `utils.loggable.debug`
|
def debug(self, message, domain=None):
if domain is None:
domain = self.extension_name
debug(message, domain)
| 427,717
|
Shortcut function for `utils.loggable.info`
Args:
message: see `utils.loggable.info`
domain: see `utils.loggable.info`
|
def info(self, message, domain=None):
if domain is None:
domain = self.extension_name
info(message, domain)
| 427,718
|
Subclasses may call this to add an index argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
prefix: str, arguments have to be namespaced
|
def add_index_argument(cls, group):
prefix = cls.argument_prefix
group.add_argument(
'--%s-index' % prefix, action="store",
dest="%s_index" % prefix,
help=("Name of the %s root markdown file, can be None" % (
cls.extension_name)))
| 427,725
|
Subclasses may call this to add sources and source_filters arguments.
Args:
group: arparse.ArgumentGroup, the extension argument group
allow_filters: bool, Whether the extension wishes to expose a
source_filters argument.
prefix: str, arguments have to be namespaced.
|
def add_sources_argument(cls, group, allow_filters=True, prefix=None, add_root_paths=False):
prefix = prefix or cls.argument_prefix
group.add_argument("--%s-sources" % prefix,
action="store", nargs="+",
dest="%s_sources" % prefix.replace('-', '_'),
help="%s source files to parse" % prefix)
if allow_filters:
group.add_argument("--%s-source-filters" % prefix,
action="store", nargs="+",
dest="%s_source_filters" % prefix.replace(
'-', '_'),
help="%s source files to ignore" % prefix)
if add_root_paths:
group.add_argument("--%s-source-roots" % prefix,
action="store", nargs="+",
dest="%s_source_roots" % prefix.replace(
'-', '_'),
help="%s source root directories allowing files "
"to be referenced relatively to those" % prefix)
| 427,726
|
Subclasses may call this to expose a path argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
argname: str, the name of the argument, will be namespaced.
dest: str, similar to the `dest` argument of
`argparse.ArgumentParser.add_argument`, will be namespaced.
help_: str, similar to the `help` argument of
`argparse.ArgumentParser.add_argument`.
|
def add_path_argument(cls, group, argname, dest=None, help_=None):
prefixed = '%s-%s' % (cls.argument_prefix, argname)
if dest is None:
dest = prefixed.replace('-', '_')
final_dest = dest[len(cls.argument_prefix) + 1:]
else:
final_dest = dest
dest = '%s_%s' % (cls.argument_prefix, dest)
group.add_argument('--%s' % prefixed, action='store',
dest=dest, help=help_)
cls.path_arguments[dest] = final_dest
| 427,727
|
Subclasses may call this to expose a paths argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
argname: str, the name of the argument, will be namespaced.
dest: str, similar to the `dest` argument of
`argparse.ArgumentParser.add_argument`, will be namespaced.
help_: str, similar to the `help` argument of
`argparse.ArgumentParser.add_argument`.
|
def add_paths_argument(cls, group, argname, dest=None, help_=None):
prefixed = '%s-%s' % (cls.argument_prefix, argname)
if dest is None:
dest = prefixed.replace('-', '_')
final_dest = dest[len(cls.argument_prefix) + 1:]
else:
final_dest = dest
dest = '%s_%s' % (cls.argument_prefix, dest)
group.add_argument('--%s' % prefixed, action='store', nargs='+',
dest=dest, help=help_)
cls.paths_arguments[dest] = final_dest
| 427,728
|
Extensions that discover and create instances of `symbols.Symbol`
should do this through this method, as it will keep an index
of these which can be used when generating a "naive index".
See `database.Database.create_symbol` for more
information.
Args:
args: see `database.Database.create_symbol`
kwargs: see `database.Database.create_symbol`
Returns:
symbols.Symbol: the created symbol, or `None`.
|
def create_symbol(self, *args, **kwargs):
if not kwargs.get('project_name'):
kwargs['project_name'] = self.project.project_name
sym = self.app.database.create_symbol(*args, **kwargs)
if sym:
# pylint: disable=unidiomatic-typecheck
if type(sym) != Symbol:
self._created_symbols[sym.filename].add(sym.unique_name)
return sym
| 427,730
|
Called by `project.Project.format_page`, to leave full control
to extensions over the formatting of the pages they are
responsible of.
Args:
page: tree.Page, the page to format.
link_resolver: links.LinkResolver, object responsible
for resolving links potentially mentioned in `page`
output: str, path to the output directory.
|
def format_page(self, page, link_resolver, output):
debug('Formatting page %s' % page.link.ref, 'formatting')
if output:
actual_output = os.path.join(output,
'html')
if not os.path.exists(actual_output):
os.makedirs(actual_output)
else:
actual_output = None
page.format(self.formatter, link_resolver, actual_output)
| 427,733
|
Generator that yields pages in infix order
Args:
parent: hotdoc.core.tree.Page, optional, the page to start
traversal from. If None, defaults to the root of the tree.
Yields:
hotdoc.core.tree.Page: the next page
|
def walk(self, parent=None):
if parent is None:
yield self.root
parent = self.root
for cpage_name in parent.subpages:
cpage = self.__all_pages[cpage_name]
yield cpage
for page in self.walk(parent=cpage):
yield page
| 427,775
|
Will call resolve_symbols on all the stale subpages of the tree.
Args:
page: hotdoc.core.tree.Page, the page to resolve symbols in,
will recurse on potential subpages.
|
def resolve_symbols(self, database, link_resolver, page=None):
page = page or self.root
if page.ast is None and not page.generated:
with io.open(page.source_file, 'r', encoding='utf-8') as _:
page.ast = cmark.hotdoc_to_ast(_.read(), self)
page.resolve_symbols(self, database, link_resolver)
self.__update_dep_map(page, page.symbols)
for pagename in page.subpages:
cpage = self.__all_pages[pagename]
self.resolve_symbols(database, link_resolver, page=cpage)
| 427,778
|
Constructor for `ConfigParser`.
Args:
command_line_args: list, a list of command line arguments
that will override the keys defined in `conf_file`,
or `None`
conf_file: str, the path to the configuration file. If
`None`, `ConfigParser` will look for a file named
`hotdoc.json` in the current directory.
|
def __init__(self, command_line_args=None, conf_file=None, defaults=None,
json_conf=None):
self.conf_file = None
self.__conf_dir = None
self.__config = {}
if conf_file:
self.conf_file = os.path.abspath(conf_file)
self.__conf_dir = os.path.dirname(self.conf_file)
if not json_conf:
self.__config = load_config_json(self.conf_file)
else:
self.__config = json_conf
self.__invoke_dir = os.getcwd()
self.__cli = command_line_args or {}
self.__defaults = defaults or {}
| 428,011
|
Get all the markdown files in a folder, recursively
Args:
dir_: str, a toplevel folder to walk.
|
def get_markdown_files(self, dir_):
md_files = OrderedSet()
for root, _, files in os.walk(dir_):
for name in files:
split = os.path.splitext(name)
if len(split) == 1:
continue
if split[1] in ('.markdown', '.md', '.yaml'):
md_files.add(os.path.join(root, name))
return md_files
| 428,015
|
Get the value for `key`.
Gives priority to command-line overrides.
Args:
key: str, the key to get the value for.
Returns:
object: The value for `key`
|
def get(self, key, default=None):
if key in self.__cli:
return self.__cli[key]
if key in self.__config:
return self.__config.get(key)
if key in self.__defaults:
return self.__defaults.get(key)
return default
| 428,016
|
Retrieve the absolute path to an index, according to
`prefix`.
Args:
prefix: str, the desired prefix or `None`.
Returns:
str: An absolute path, or `None`
|
def get_index(self, prefix=''):
if prefix:
prefixed = '%s_index' % prefix
else:
prefixed = 'index'
if prefixed in self.__cli and self.__cli[prefixed]:
index = self.__cli.get(prefixed)
from_conf = False
else:
index = self.__config.get(prefixed)
from_conf = True
return self.__abspath(index, from_conf)
| 428,017
|
Retrieve a path from the config, resolving it against
the invokation directory or the configuration file directory,
depending on whether it was passed through the command-line
or the configuration file.
Args:
key: str, the key to lookup the path with
Returns:
str: The path, or `None`
|
def get_path(self, key, rel_to_cwd=False, rel_to_conf=False):
if key in self.__cli:
path = self.__cli[key]
from_conf = False
else:
path = self.__config.get(key)
from_conf = True
if not isinstance(path, str):
return None
res = self.__abspath(path, from_conf)
if rel_to_cwd:
return os.path.relpath(res, self.__invoke_dir)
if rel_to_conf:
return os.path.relpath(res, self.__conf_dir)
return self.__abspath(path, from_conf)
| 428,018
|
Same as `ConfigParser.get_path` for a list of paths.
Args:
key: str, the key to lookup the paths with
Returns:
list: The paths.
|
def get_paths(self, key):
final_paths = []
if key in self.__cli:
paths = self.__cli[key] or []
from_conf = False
else:
paths = self.__config.get(key) or []
from_conf = True
for path in flatten_list(paths):
final_path = self.__abspath(path, from_conf)
if final_path:
final_paths.append(final_path)
return final_paths
| 428,019
|
Retrieve a set of absolute paths to sources, according to `prefix`
`ConfigParser` will perform wildcard expansion and
filtering.
Args:
prefix: str, the desired prefix.
Returns:
utils.utils.OrderedSet: The set of sources for the given
`prefix`.
|
def get_sources(self, prefix=''):
prefix = prefix.replace('-', '_')
prefixed = '%s_sources' % prefix
if prefixed in self.__cli:
sources = self.__cli.get(prefixed)
from_conf = False
else:
sources = self.__config.get(prefixed)
from_conf = True
if sources is None:
return OrderedSet()
sources = self.__resolve_patterns(sources, from_conf)
prefixed = '%s_source_filters' % prefix
if prefixed in self.__cli:
filters = self.__cli.get(prefixed)
from_conf = False
else:
filters = self.__config.get(prefixed)
from_conf = True
if filters is None:
return sources
sources -= self.__resolve_patterns(filters, from_conf)
return sources
| 428,020
|
Dump the possibly updated config to a file.
Args:
conf_file: str, the destination, or None to overwrite the
existing configuration.
|
def dump(self, conf_file=None):
if conf_file:
conf_dir = os.path.dirname(conf_file)
if not conf_dir:
conf_dir = self.__invoke_dir
elif not os.path.exists(conf_dir):
os.makedirs(conf_dir)
else:
conf_dir = self.__conf_dir
final_conf = {}
for key, value in list(self.__config.items()):
if key in self.__cli:
continue
final_conf[key] = value
for key, value in list(self.__cli.items()):
if key.endswith('index') or key in ['sitemap', 'output']:
path = self.__abspath(value, from_conf=False)
if path:
relpath = os.path.relpath(path, conf_dir)
final_conf[key] = relpath
elif key.endswith('sources') or key.endswith('source_filters'):
new_list = []
for path in value:
path = self.__abspath(path, from_conf=False)
if path:
relpath = os.path.relpath(path, conf_dir)
new_list.append(relpath)
final_conf[key] = new_list
elif key not in ['command', 'output_conf_file']:
final_conf[key] = value
with open(conf_file or self.conf_file or 'hotdoc.json', 'w') as _:
_.write(json.dumps(final_conf, sort_keys=True, indent=4))
| 428,022
|
Walk the hierarchy, applying action to each filename.
Args:
action: callable, the callable to invoke for each filename,
will be invoked with the filename, the subfiles, and
the level in the sitemap.
|
def walk(self, action, user_data=None):
action(self.index_file, self.__root, 0, user_data)
self.__do_walk(self.__root, 1, action, user_data)
| 428,080
|
Parse a sitemap file.
Args:
filename: str, the path to the sitemap file.
Returns:
Sitemap: the generated sitemap.
|
def parse(self, filename):
with io.open(filename, 'r', encoding='utf-8') as _:
lines = _.readlines()
all_source_files = set()
source_map = {}
lineno = 0
root = None
index = None
cur_level = -1
parent_queue = []
for line in lines:
try:
level, line = dedent(line)
if line.startswith('#'):
lineno += 1
continue
elif line.startswith('\\#'):
line = line[1:]
except IndentError as exc:
error('bad-indent', 'Invalid indentation', filename=filename,
lineno=lineno, column=exc.column)
if not line:
lineno += 1
continue
source_file = dequote(line)
if not source_file:
lineno += 1
continue
if source_file in all_source_files:
error('sitemap-duplicate', 'Filename listed twice',
filename=filename, lineno=lineno, column=level * 8 + 1)
all_source_files.add(source_file)
source_map[source_file] = (lineno, level * 8 + 1)
page = OrderedDict()
if root is not None and level == 0:
error('sitemap-error', 'Sitemaps only support one root',
filename=filename, lineno=lineno, column=0)
if root is None:
root = page
index = source_file
else:
lvl_diff = cur_level - level
while lvl_diff >= 0:
parent_queue.pop()
lvl_diff -= 1
parent_queue[-1][source_file] = page
parent_queue.append(page)
cur_level = level
lineno += 1
return Sitemap(root, filename, index, source_map)
| 428,086
|
See the documentation of `to_ast` for
more information.
Args:
ast: PyCapsule, a capsule as returned by `to_ast`
link_resolver: hotdoc.core.links.LinkResolver, a link
resolver instance.
|
def ast_to_html(self, ast, link_resolver):
out, _ = cmark.ast_to_html(ast, link_resolver)
return out
| 428,107
|
Python 2 and 3 compatible string checker.
Args:
string (str | basestring): the string to check
Returns:
bool: True or False
|
def is_str(string):
if sys.version_info[:2] >= (3, 0):
return isinstance(string, str)
return isinstance(string, basestring)
| 428,771
|
Try to find a c++ parser (xml generator)
Args:
name (str): name of the c++ parser (e.g. castxml)
Returns:
path (str), name (str): path to the xml generator and it's name
If no c++ parser is found the function raises an exception.
pygccxml does currently only support castxml as c++ parser.
|
def find_xml_generator(name="castxml"):
if sys.version_info[:2] >= (3, 3):
path = _find_xml_generator_for_python_greater_equals_33(name)
else:
path = _find_xml_generator_for_legacy_python(name)
if path == "" or path is None:
raise Exception("No c++ parser found. Please install castxml.")
return path.rstrip(), name
| 428,772
|
Class constructor that parses the XML generator's command line
Args:
cflags (str): cflags command line arguments passed to the XML
generator
|
def __init__(self, cflags):
super(cxx_standard, self).__init__()
self._stdcxx = None
self._is_implicit = False
for key in cxx_standard.__STD_CXX:
if key in cflags:
self._stdcxx = key
self._cplusplus = cxx_standard.__STD_CXX[key]
if not self._stdcxx:
if '-std=' in cflags:
raise RuntimeError('Unknown -std=c++xx flag used')
# Assume c++03 by default
self._stdcxx = '-std=c++03'
self._cplusplus = cxx_standard.__STD_CXX['-std=c++03']
self._is_implicit = True
| 428,780
|
Create a new xml_generators object.
Args:
logger (logging.Logger) : a logger for debugging output
gccxml_cvs_revision (str|None): the xml output version
castxml_format (str|None): the xml output version
|
def __init__(self, logger, gccxml_cvs_revision=None, castxml_format=None):
if castxml_format is not None and gccxml_cvs_revision is not None:
raise RuntimeError("Setting both gccxml_cvs_revision and"
"castxml_format is not allowed!")
self._is_castxml1 = False
self._is_castxml = False
self._is_gccxml = False
if castxml_format is not None:
self._xml_generator_version = self.__castxml
self._xml_output_version = castxml_format
self._is_castxml = True
self._is_castxml1 = True
elif gccxml_cvs_revision is not None:
self._xml_generator_version, self._xml_output_version = \
self.__extract_versions(logger, gccxml_cvs_revision)
self._is_gccxml = "GCC-XML" in self._xml_generator_version
self._is_castxml = "CastXML" in self._xml_generator_version
else:
raise RuntimeError("Either castxml_format or gccxml_cvs_revision"
"need to be defined!")
| 428,812
|
Implementation detail.
Args:
type_ (type_t): type
Returns:
type_t: the type associated to the inputted type
|
def __remove_alias(type_):
if isinstance(type_, cpptypes.declarated_t) and \
isinstance(type_.declaration, typedef.typedef_t):
return __remove_alias(type_.declaration.decl_type)
if isinstance(type_, cpptypes.compound_t):
type_.base = __remove_alias(type_.base)
return type_
return type_
| 428,817
|
Returns `type_t` without typedef
Args:
type_ (type_t | declaration_t): type or declaration
Returns:
type_t: the type associated to the inputted declaration
|
def remove_alias(type_):
if isinstance(type_, cpptypes.type_t):
type_ref = type_
elif isinstance(type_, typedef.typedef_t):
type_ref = type_.decl_type
else:
# Not a valid input, just return it
return type_
if type_ref.cache.remove_alias:
return type_ref.cache.remove_alias
no_alias = __remove_alias(type_ref.clone())
type_ref.cache.remove_alias = no_alias
return no_alias
| 428,818
|
Extracts a list of arguments from the provided declaration string.
Implementation detail. Example usages:
Input: myClass<std::vector<int>, std::vector<double>>
Output: [std::vector<int>, std::vector<double>]
Args:
decl_string (str): the full declaration string
Returns:
list: list of arguments as strings
|
def args(self, decl_string):
args_begin = decl_string.find(self.__begin)
args_end = decl_string.rfind(self.__end)
if -1 in (args_begin, args_end) or args_begin == args_end:
raise RuntimeError(
"%s doesn't validate template instantiation string" %
decl_string)
args_only = decl_string[args_begin + 1: args_end].strip()
# The list of arguments to be returned
args = []
parentheses_blocks = []
prev_span = 0
if self.__begin == "<":
# In case where we are splitting template names, there
# can be parentheses blocks (for arguments) that need to be taken
# care of.
# Build a regex matching a space (\s)
# + something inside parentheses
regex = re.compile("\\s\\(.*?\\)")
for m in regex.finditer(args_only):
# Store the position and the content
parentheses_blocks.append([m.start() - prev_span, m.group()])
prev_span = m.end() - m.start()
# Cleanup the args_only string by removing the parentheses and
# their content.
args_only = args_only.replace(m.group(), "")
# Now we are trying to split the args_only string in multiple arguments
previous_found, found = 0, 0
while True:
found = self.__find_args_separator(args_only, previous_found)
if found == -1:
args.append(args_only[previous_found:].strip())
# This is the last argument. Break out of the loop.
break
else:
args.append(args_only[previous_found: found].strip())
previous_found = found + 1 # skip found separator
# Get the size and position for each argument
absolute_pos_list = []
absolute_pos = 0
for arg in args:
absolute_pos += len(arg)
absolute_pos_list.append(absolute_pos)
for item in parentheses_blocks:
# In case where there are parentheses blocks we add them back
# to the right argument
parentheses_block_absolute_pos = item[0]
parentheses_block_string = item[1]
current_arg_absolute_pos = 0
for arg_index, arg_absolute_pos in enumerate(absolute_pos_list):
current_arg_absolute_pos += arg_absolute_pos
if current_arg_absolute_pos >= parentheses_block_absolute_pos:
# Add the parentheses block back and break out of the loop.
args[arg_index] += parentheses_block_string
break
return args
| 428,846
|
Returns True if declaration represents a C++ union
Args:
declaration (declaration_t): the declaration to be checked.
Returns:
bool: True if declaration represents a C++ union
|
def is_union(declaration):
if not is_class(declaration):
return False
decl = class_traits.get_declaration(declaration)
return decl.class_type == class_declaration.CLASS_TYPES.UNION
| 428,885
|
Returns True if declaration represents a C++ struct
Args:
declaration (declaration_t): the declaration to be checked.
Returns:
bool: True if declaration represents a C++ struct
|
def is_struct(declaration):
if not is_class(declaration):
return False
decl = class_traits.get_declaration(declaration)
return decl.class_type == class_declaration.CLASS_TYPES.STRUCT
| 428,886
|
Returns reference to trivial constructor.
Args:
type_ (declarations.class_t): the class to be searched.
Returns:
declarations.constructor_t: the trivial constructor
|
def find_trivial_constructor(type_):
assert isinstance(type_, class_declaration.class_t)
trivial = type_.constructors(
lambda x: is_trivial_constructor(x),
recursive=False,
allow_empty=True)
if trivial:
return trivial[0]
return None
| 428,887
|
Returns reference to copy constructor.
Args:
type_ (declarations.class_t): the class to be searched.
Returns:
declarations.constructor_t: the copy constructor
|
def find_copy_constructor(type_):
copy_ = type_.constructors(
lambda x: is_copy_constructor(x),
recursive=False,
allow_empty=True)
if copy_:
return copy_[0]
return None
| 428,888
|
Implementation detail.
Checks if the class is non copyable, without considering the base classes.
Args:
class_ (declarations.class_t): the class to be checked
already_visited_cls_vars (list): optional list of vars that should not
be checked a second time, to prevent infinite recursions.
Returns:
bool: if the class is non copyable
|
def __is_noncopyable_single(class_, already_visited_cls_vars=None):
# It is not enough to check base classes, we should also to check
# member variables.
logger = utils.loggers.cxx_parser
if has_copy_constructor(class_) \
and has_public_constructor(class_) \
and has_public_assign(class_) \
and has_public_destructor(class_):
msg = os.linesep.join([
"__is_noncopyable_single - %s - COPYABLE:" % class_.decl_string,
" trivial copy constructor: yes",
" public constructor: yes",
" public assign: yes",
" public destructor: yes"])
logger.debug(msg)
return False
if already_visited_cls_vars is None:
already_visited_cls_vars = []
if find_noncopyable_vars(class_, already_visited_cls_vars):
logger.debug(
("__is_noncopyable_single(TRUE) - %s - contains noncopyable " +
"members"), class_.decl_string)
return True
logger.debug((
"__is_noncopyable_single(FALSE) - %s - COPYABLE, because is " +
"doesn't contains noncopyable members"), class_.decl_string)
return False
| 428,897
|
Checks if class is non copyable
Args:
class_ (declarations.class_t): the class to be checked
already_visited_cls_vars (list): optional list of vars that should not
be checked a second time, to prevent infinite recursions.
In general you can ignore this argument, it is mainly used during
recursive calls of is_noncopyable() done by pygccxml.
Returns:
bool: if the class is non copyable
|
def is_noncopyable(class_, already_visited_cls_vars=None):
logger = utils.loggers.cxx_parser
class_decl = class_traits.get_declaration(class_)
true_header = "is_noncopyable(TRUE) - %s - " % class_.decl_string
if is_union(class_):
return False
if class_decl.is_abstract:
logger.debug(true_header + "abstract client")
return True
# if class has public, user defined copy constructor, than this class is
# copyable
copy_ = find_copy_constructor(class_decl)
if copy_ and copy_.access_type == 'public' and not copy_.is_artificial:
return False
if already_visited_cls_vars is None:
already_visited_cls_vars = []
for base_desc in class_decl.recursive_bases:
assert isinstance(base_desc, class_declaration.hierarchy_info_t)
if base_desc.related_class.decl_string in \
('::boost::noncopyable', '::boost::noncopyable_::noncopyable'):
logger.debug(true_header + "derives from boost::noncopyable")
return True
if not has_copy_constructor(base_desc.related_class):
base_copy_ = find_copy_constructor(base_desc.related_class)
if base_copy_ and base_copy_.access_type == 'private':
logger.debug(
true_header +
"there is private copy constructor")
return True
elif __is_noncopyable_single(
base_desc.related_class, already_visited_cls_vars):
logger.debug(
true_header +
"__is_noncopyable_single returned True")
return True
if __is_noncopyable_single(
base_desc.related_class, already_visited_cls_vars):
logger.debug(
true_header +
"__is_noncopyable_single returned True")
return True
if not has_copy_constructor(class_decl):
logger.debug(true_header + "does not have trivial copy constructor")
return True
elif not has_public_constructor(class_decl):
logger.debug(true_header + "does not have a public constructor")
return True
elif has_destructor(class_decl) and not has_public_destructor(class_decl):
logger.debug(true_header + "has private destructor")
return True
return __is_noncopyable_single(class_decl, already_visited_cls_vars)
| 428,898
|
Check if the declaration is a copy constructor,
Args:
constructor (declarations.constructor_t): the constructor
to be checked.
Returns:
bool: True if this is a copy constructor, False instead.
|
def is_copy_constructor(constructor):
assert isinstance(constructor, calldef_members.constructor_t)
args = constructor.arguments
parent = constructor.parent
# A copy constructor has only one argument
if len(args) != 1:
return False
# We have only one argument, get it
arg = args[0]
if not isinstance(arg.decl_type, cpptypes.compound_t):
# An argument of type declarated_t (a typedef) could be passed to
# the constructor; and it could be a reference.
# But in c++ you can NOT write :
# "typedef class MyClass { MyClass(const MyClass & arg) {} }"
# If the argument is a typedef, this is not a copy constructor.
# See the hierarchy of declarated_t and coumpound_t. They both
# inherit from type_t but are not related so we can discriminate
# between them.
return False
# The argument needs to be passed by reference in a copy constructor
if not type_traits.is_reference(arg.decl_type):
return False
# The argument needs to be const for a copy constructor
if not type_traits.is_const(arg.decl_type.base):
return False
un_aliased = type_traits.remove_alias(arg.decl_type.base)
# un_aliased now refers to const_t instance
if not isinstance(un_aliased.base, cpptypes.declarated_t):
# We are looking for a declaration
# If "class MyClass { MyClass(const int & arg) {} }" is used,
# this is not copy constructor, so we return False here.
# -> un_aliased.base == cpptypes.int_t (!= cpptypes.declarated_t)
return False
# Final check: compare the parent (the class declaration for example)
# with the declaration of the type passed as argument.
return id(un_aliased.base.declaration) == id(parent)
| 428,901
|
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
|
def normalize_name(decl):
if decl.cache.normalized_name is None:
decl.cache.normalized_name = normalize(decl.name)
return decl.cache.normalized_name
| 428,982
|
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
|
def normalize_partial_name(decl):
if decl.cache.normalized_partial_name is None:
decl.cache.normalized_partial_name = normalize(decl.partial_name)
return decl.cache.normalized_partial_name
| 428,983
|
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
|
def normalize_full_name_true(decl):
if decl.cache.normalized_full_name_true is None:
decl.cache.normalized_full_name_true = normalize(
declaration_utils.full_name(decl, with_defaults=True))
return decl.cache.normalized_full_name_true
| 428,984
|
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
|
def normalize_full_name_false(decl):
if decl.cache.normalized_full_name_false is None:
decl.cache.normalized_full_name_false = normalize(
declaration_utils.full_name(decl, with_defaults=False))
return decl.cache.normalized_full_name_false
| 428,985
|
Find the container traits type of a declaration.
Args:
cls_or_string (str | declarations.declaration_t): a string
Returns:
declarations.container_traits: a container traits
|
def find_container_traits(cls_or_string):
if utils.is_str(cls_or_string):
if not templates.is_instantiation(cls_or_string):
return None
name = templates.name(cls_or_string)
if name.startswith('std::'):
name = name[len('std::'):]
if name.startswith('std::tr1::'):
name = name[len('std::tr1::'):]
for cls_traits in all_container_traits:
if cls_traits.name() == name:
return cls_traits
else:
if isinstance(cls_or_string, class_declaration.class_types):
# Look in the cache.
if cls_or_string.cache.container_traits is not None:
return cls_or_string.cache.container_traits
# Look for a container traits
for cls_traits in all_container_traits:
if cls_traits.is_my_case(cls_or_string):
# Store in the cache
if isinstance(cls_or_string, class_declaration.class_types):
cls_or_string.cache.container_traits = cls_traits
return cls_traits
| 429,003
|
Get the global namespace (::) from a declaration tree.
Args:
decls (list[declaration_t]): a list of declarations
Returns:
namespace_t: the global namespace_t object (::)
|
def get_global_namespace(decls):
found = [
decl for decl in scopedef.make_flatten(decls) if decl.name == '::' and
isinstance(decl, namespace_t)]
if len(found) == 1:
return found[0]
raise RuntimeError("Unable to find global namespace.")
| 429,022
|
Creates an object that describes a C++ namespace declaration.
Args:
name (str): name of the namespace
declarations (list[declaration_t]): list of declarations
|
def __init__(self, name='', declarations=None):
scopedef.scopedef_t.__init__(self, name)
if not declarations:
declarations = []
# List of all declarations belonging to this namespace
self._declarations = declarations
| 429,023
|
Takes parenting from inst and transfers it to self.
Args:
inst (namespace_t): a namespace declaration
|
def take_parenting(self, inst):
if self is inst:
return
for decl in inst.declarations:
decl.parent = self
self.declarations.append(decl)
inst.declarations = []
| 429,025
|
Returns a list of parent declarations names.
Args:
decl (declaration_t): declaration for which declaration path
should be calculated.
Returns:
list[(str | basestring)]: list of names, where first item is the top
parent name and last item the inputted
declaration name.
|
def declaration_path(decl):
if not decl:
return []
if not decl.cache.declaration_path:
result = [decl.name]
parent = decl.parent
while parent:
if parent.cache.declaration_path:
result.reverse()
decl.cache.declaration_path = parent.cache.declaration_path + \
result
return decl.cache.declaration_path
else:
result.append(parent.name)
parent = parent.parent
result.reverse()
decl.cache.declaration_path = result
return result
return decl.cache.declaration_path
| 429,094
|
Returns a list of parent declarations names without template arguments that
have default value.
Args:
decl (declaration_t): declaration for which the partial declaration
path should be calculated.
Returns:
list[(str | basestring)]: list of names, where first item is the top
parent name and last item the inputted
declaration name.
|
def partial_declaration_path(decl):
# TODO:
# If parent declaration cache already has declaration_path, reuse it for
# calculation.
if not decl:
return []
if not decl.cache.partial_declaration_path:
result = [decl.partial_name]
parent = decl.parent
while parent:
if parent.cache.partial_declaration_path:
result.reverse()
decl.cache.partial_declaration_path \
= parent.cache.partial_declaration_path + result
return decl.cache.partial_declaration_path
else:
result.append(parent.partial_name)
parent = parent.parent
result.reverse()
decl.cache.partial_declaration_path = result
return result
return decl.cache.partial_declaration_path
| 429,095
|
Returns declaration full qualified name.
If `decl` belongs to anonymous namespace or class, the function will return
C++ illegal qualified name.
Args:
decl (declaration_t): declaration for which the full qualified name
should be calculated.
Returns:
list[(str | basestring)]: full name of the declaration.
|
def full_name(decl, with_defaults=True):
if None is decl:
raise RuntimeError("Unable to generate full name for None object!")
if with_defaults:
if not decl.cache.full_name:
path = declaration_path(decl)
if path == [""]:
# Declarations without names are allowed (for examples class
# or struct instances). In this case set an empty name..
decl.cache.full_name = ""
else:
decl.cache.full_name = full_name_from_declaration_path(path)
return decl.cache.full_name
else:
if not decl.cache.full_partial_name:
path = partial_declaration_path(decl)
if path == [""]:
# Declarations without names are allowed (for examples class
# or struct instances). In this case set an empty name.
decl.cache.full_partial_name = ""
else:
decl.cache.full_partial_name = \
full_name_from_declaration_path(path)
return decl.cache.full_partial_name
| 429,097
|
Returns a reference to a named parent declaration.
Args:
decl (declaration_t): the child declaration
Returns:
declaration_t: the declaration or None if not found.
|
def get_named_parent(decl):
if not decl:
return None
parent = decl.parent
while parent and (not parent.name or parent.name == '::'):
parent = parent.parent
return parent
| 429,098
|
Find the version of pygccxml.
Used by setup.py and the sphinx's conf.py.
Inspired by https://packaging.python.org/single_source_version/
Args:
file_path (str): path to the file containing the version.
|
def find_version(file_path):
with io.open(
os.path.join(
os.path.dirname(__file__),
os.path.normpath(file_path)),
encoding="utf8") as fp:
content = fp.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
content, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
| 429,184
|
Jordan_Wigner mode.
Args:
n (int): number of modes
|
def _jordan_wigner_mode(n):
a = []
for i in range(n):
xv = np.asarray([1] * i + [0] + [0] * (n - i - 1))
xw = np.asarray([0] * i + [1] + [0] * (n - i - 1))
yv = np.asarray([1] * i + [1] + [0] * (n - i - 1))
yw = np.asarray([0] * i + [1] + [0] * (n - i - 1))
a.append((Pauli(xv, xw), Pauli(yv, yw)))
return a
| 430,844
|
Subroutine for one body mapping.
Args:
a_i (Pauli): pauli at index i
a_j (Pauli): pauli at index j
threshold: (float): threshold to remove a pauli
Returns:
Operator: Operator for those paulis
|
def _one_body_mapping(a_i, a_j, threshold=0.000001):
pauli_list = []
for alpha in range(2):
for beta in range(2):
pauli_prod = Pauli.sgn_prod(a_i[alpha], a_j[beta])
coeff = 1.0/4 * pauli_prod[1] * np.power(-1j, alpha) * np.power(1j, beta)
pauli_term = [coeff, pauli_prod[0]]
if np.absolute(pauli_term[0]) > threshold:
pauli_list.append(pauli_term)
return Operator(paulis=pauli_list)
| 430,845
|
Load kbs (without caching)
Args:
- kb_files: list of custom paths you can specify to override the
default values
If path starts with "kb:", the kb will be loaded from the database
|
def load_kbs(kbs_files):
return {
'journals_re': build_journals_re_kb(kbs_files['journals-re']),
'journals': load_kb(kbs_files['journals'], build_journals_kb),
'report-numbers': build_reportnum_kb(kbs_files['report-numbers']),
'authors': build_authors_kb(kbs_files['authors']),
'books': build_books_kb(kbs_files['books']),
'publishers': load_kb(kbs_files['publishers'], build_publishers_kb),
'special_journals': build_special_journals_kb(kbs_files['special-journals']),
'collaborations': load_kb(kbs_files['collaborations'], build_collaborations_kb),
}
| 431,096
|
This function wraps around the bz2, gzip and standard python's open
function to deal intelligently with bzipped, gzipped or standard text
files.
Args:
filename (str/Path): filename or pathlib.Path.
\*args: Standard args for python open(..). E.g., 'r' for read, 'w' for
write.
\*\*kwargs: Standard kwargs for python open(..).
Returns:
File-like object. Supports with context.
|
def zopen(filename, *args, **kwargs):
if Path is not None and isinstance(filename, Path):
filename = str(filename)
name, ext = os.path.splitext(filename)
ext = ext.upper()
if ext == ".BZ2":
if PY_VERSION[0] >= 3:
return bz2.open(filename, *args, **kwargs)
else:
args = list(args)
if len(args) > 0:
args[0] = "".join([c for c in args[0] if c != "t"])
if "mode" in kwargs:
kwargs["mode"] = "".join([c for c in kwargs["mode"]
if c != "t"])
return bz2.BZ2File(filename, *args, **kwargs)
elif ext in (".GZ", ".Z"):
return gzip.open(filename, *args, **kwargs)
else:
return io.open(filename, *args, **kwargs)
| 431,226
|
A much faster reverse read of file by using Python's mmap to generate a
memory-mapped file. It is slower for very small files than
reverse_readline, but at least 2x faster for large files (the primary use
of such a method).
Args:
filename (str):
Name of file to read.
Yields:
Lines from the file in reverse order.
|
def reverse_readfile(filename):
try:
with zopen(filename, "rb") as f:
if isinstance(f, gzip.GzipFile) or isinstance(f, bz2.BZ2File):
for l in reversed(f.readlines()):
yield l.decode("utf-8").rstrip()
else:
fm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
n = len(fm)
while n > 0:
i = fm.rfind(b"\n", 0, n)
yield fm[i + 1:n].decode("utf-8").strip("\n")
n = i
except ValueError:
return
| 431,227
|
Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock.
Args:
file_name: Name of file to lock.
timeout: Maximum timeout for locking. Defaults to 10.
delay: Delay between each attempt to lock. Defaults to 0.05.
|
def __init__(self, file_name, timeout=10, delay=.05):
self.file_name = os.path.abspath(file_name)
self.lockfile = os.path.abspath(file_name) + ".lock"
self.timeout = float(timeout)
self.delay = float(delay)
self.is_locked = False
if self.delay > self.timeout or self.delay <= 0 or self.timeout <= 0:
raise ValueError("delay and timeout must be positive with delay "
"<= timeout")
| 431,230
|
Initializes a WildCard.
Args:
wildcard (str): String of tokens separated by sep. Each token
represents a pattern.
sep (str): Separator for shell patterns.
|
def __init__(self, wildcard, sep="|"):
self.pats = ["*"]
if wildcard:
self.pats = wildcard.split(sep)
| 431,233
|
Decorator to mark classes or functions as deprecated,
with a possible replacement.
Args:
replacement (callable): A replacement class or method.
message (str): A warning message to be displayed.
Returns:
Original function, but with a warning to use the updated class.
|
def deprecated(replacement=None, message=None):
def wrap(old):
def wrapped(*args, **kwargs):
msg = "%s is deprecated" % old.__name__
if replacement is not None:
if isinstance(replacement, property):
r = replacement.fget
elif isinstance(replacement, (classmethod, staticmethod)):
r = replacement.__func__
else:
r = replacement
msg += "; use %s in %s instead." % (r.__name__, r.__module__)
if message is not None:
msg += "\n" + message
warnings.simplefilter('default')
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return old(*args, **kwargs)
return wrapped
return wrap
| 431,236
|
Return the input string centered in a 'marquee'.
Args:
text (str): Input string
width (int): Width of final output string.
mark (str): Character used to fill string.
:Examples:
>>> marquee('A test', width=40)
'**************** A test ****************'
>>> marquee('A test', width=40, mark='-')
'---------------- A test ----------------'
marquee('A test',40, ' ')
' A test '
|
def marquee(text="", width=78, mark='*'):
if not text:
return (mark*width)[:width]
nmark = (width-len(text)-2)//len(mark)//2
if nmark < 0:
nmark = 0
marks = mark * nmark
return '%s %s %s' % (marks, text, marks)
| 431,246
|
Returns a string in a box
Args:
msg: Input string.
ch: Character used to form the box.
pad: Number of characters ch added before and after msg.
>>> print(boxed("hello", ch="*", pad=2))
***********
** hello **
***********
|
def boxed(msg, ch="=", pad=5):
if pad > 0:
msg = pad * ch + " " + msg.strip() + " " + pad * ch
return "\n".join([len(msg) * ch,
msg,
len(msg) * ch,
])
| 431,247
|
Useful logging decorator. If a method is logged, the beginning and end of
the method call will be logged at a pre-specified level.
Args:
level: Level to log method at. Defaults to DEBUG.
|
def logged(level=logging.DEBUG):
def wrap(f):
_logger = logging.getLogger("{}.{}".format(f.__module__, f.__name__))
def wrapped_f(*args, **kwargs):
_logger.log(level, "Called at {} with args = {} and kwargs = {}"
.format(datetime.datetime.now(), args, kwargs))
data = f(*args, **kwargs)
_logger.log(level, "Done at {} with args = {} and kwargs = {}"
.format(datetime.datetime.now(), args, kwargs))
return data
return wrapped_f
return wrap
| 431,272
|
This decorator is used to decorate main functions.
It adds the initialization of the logger and an argument parser that allows
one to select the loglevel.
Useful if we are writing simple main functions that call libraries where
the logging module is used
Args:
main:
main function.
|
def enable_logging(main):
@functools.wraps(main)
def wrapper(*args, **kwargs):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--loglevel', default="ERROR", type=str,
help="Set the loglevel. Possible values: CRITICAL, ERROR (default),"
"WARNING, INFO, DEBUG")
options = parser.parse_args()
# loglevel is bound to the string value obtained from the command line
# argument.
# Convert to upper case to allow the user to specify --loglevel=DEBUG
# or --loglevel=debug
numeric_level = getattr(logging, options.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % options.loglevel)
logging.basicConfig(level=numeric_level)
retcode = main(*args, **kwargs)
return retcode
return wrapper
| 431,273
|
Returns full path to a executable.
Args:
cmd (str): Executable command to search for.
Returns:
(str) Full path to command. None if it is not found.
Example::
full_path_to_python = which("python")
|
def which(cmd):
def is_exe(fp):
return os.path.isfile(fp) and os.access(fp, os.X_OK)
fpath, fname = os.path.split(cmd)
if fpath:
if is_exe(cmd):
return cmd
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, cmd)
if is_exe(exe_file):
return exe_file
return None
| 431,274
|
Returns an existing (zipped or unzipped) file path given the unzipped
version. If no path exists, returns the filename unmodified.
Args:
filename: filename without zip extension
Returns:
filename with a zip extension (unless an unzipped version
exists). If filename is not found, the same filename is returned
unchanged.
|
def zpath(filename):
for ext in ["", '.gz', '.GZ', '.bz2', '.BZ2', '.z', '.Z']:
zfilename = "{}{}".format(filename, ext)
if os.path.exists(zfilename):
return zfilename
return filename
| 431,275
|
Prints out a table of data, padded for alignment
Each row must have the same number of columns.
Args:
table: The table to print. A list of lists.
out: Output stream (file-like object)
rstrip: if True, trailing withespaces are removed from the entries.
|
def pprint_table(table, out=sys.stdout, rstrip=False):
def max_width_col(table, col_idx):
return max([len(row[col_idx]) for row in table])
if rstrip:
for row_idx, row in enumerate(table):
table[row_idx] = [c.rstrip() for c in row]
col_paddings = []
ncols = len(table[0])
for i in range(ncols):
col_paddings.append(max_width_col(table, i))
for row in table:
# left col
out.write(row[0].ljust(col_paddings[0] + 1))
# rest of the cols
for i in range(1, len(row)):
col = row[i].rjust(col_paddings[i] + 2)
out.write(col)
out.write("\n")
| 431,280
|
Returns the greatest common divisor for a sequence of numbers.
Args:
\*numbers: Sequence of numbers.
Returns:
(int) Greatest common divisor of numbers.
|
def gcd(*numbers):
n = numbers[0]
for i in numbers:
n = pygcd(n, i)
return n
| 431,283
|
Return lowest common multiple of a sequence of numbers.
Args:
\*numbers: Sequence of numbers.
Returns:
(int) Lowest common multiple of numbers.
|
def lcm(*numbers):
n = 1
for i in numbers:
n = (i * n) // gcd(i, n)
return n
| 431,284
|
Returns the greatest common divisor for a sequence of numbers.
Uses a numerical tolerance, so can be used on floats
Args:
numbers: Sequence of numbers.
tol: Numerical tolerance
Returns:
(int) Greatest common divisor of numbers.
|
def gcd_float(numbers, tol=1e-8):
def pair_gcd_tol(a, b):
while b > tol:
a, b = b, a % b
return a
n = numbers[0]
for i in numbers:
n = pair_gcd_tol(n, i)
return n
| 431,285
|
A generator that yields the upper triangle of the matrix (items x items)
Args:
items: Iterable object with elements [e0, e1, ...]
diago: False if diagonal matrix elements should be excluded
with_inds: If True, (i,j) (e_i, e_j) is returned else (e_i, e_j)
>>> for (ij, mate) in iuptri([0,1], with_inds=True):
... print("ij:", ij, "mate:", mate)
ij: (0, 0) mate: (0, 0)
ij: (0, 1) mate: (0, 1)
ij: (1, 1) mate: (1, 1)
|
def iuptri(items, diago=True, with_inds=False):
for (ii, item1) in enumerate(items):
for (jj, item2) in enumerate(items):
do_yield = (jj >= ii) if diago else (jj > ii)
if do_yield:
if with_inds:
yield (ii, jj), (item1, item2)
else:
yield item1, item2
| 431,288
|
Overriding default method for JSON encoding. This method does two
things: (a) If an object has a to_dict property, return the to_dict
output. (b) If the @module and @class keys are not in the to_dict,
add them to the output automatically. If the object has no to_dict
property, the default Python json encoder default method is called.
Args:
o: Python object.
Return:
Python dict representation.
|
def default(self, o):
if isinstance(o, datetime.datetime):
return {"@module": "datetime", "@class": "datetime",
"string": o.__str__()}
if np is not None:
if isinstance(o, np.ndarray):
return {"@module": "numpy",
"@class": "array",
"dtype": o.dtype.__str__(),
"data": o.tolist()}
elif isinstance(o, np.generic):
return o.item()
if bson is not None:
if isinstance(o, bson.objectid.ObjectId):
return {"@module": "bson.objectid",
"@class": "ObjectId",
"oid": str(o)}
try:
d = o.as_dict()
if "@module" not in d:
d["@module"] = u"{}".format(o.__class__.__module__)
if "@class" not in d:
d["@class"] = u"{}".format(o.__class__.__name__)
if "@version" not in d:
try:
parent_module = o.__class__.__module__.split('.')[0]
module_version = import_module(parent_module).__version__
d["@version"] = u"{}".format(module_version)
except AttributeError:
d["@version"] = None
return d
except AttributeError:
return json.JSONEncoder.default(self, o)
| 431,302
|
Calculates nCr.
Args:
n (int): total number of items.
r (int): items to choose
Returns:
nCr.
|
def nCr(n, r):
f = math.factorial
return int(f(n) / f(r) / f(n-r))
| 431,305
|
Calculates nPr.
Args:
n (int): total number of items.
r (int): items to permute
Returns:
nPr.
|
def nPr(n, r):
f = math.factorial
return int(f(n) / f(n-r))
| 431,306
|
Implements a recursive copy function similar to Unix's "cp -r" command.
Surprisingly, python does not have a real equivalent. shutil.copytree
only works if the destination directory is not present.
Args:
src (str): Source folder to copy.
dst (str): Destination folder.
|
def copy_r(src, dst):
abssrc = os.path.abspath(src)
absdst = os.path.abspath(dst)
try:
os.makedirs(absdst)
except OSError:
# If absdst exists, an OSError is raised. We ignore this error.
pass
for f in os.listdir(abssrc):
fpath = os.path.join(abssrc, f)
if os.path.isfile(fpath):
shutil.copy(fpath, absdst)
elif not absdst.startswith(fpath):
copy_r(fpath, os.path.join(absdst, f))
else:
warnings.warn("Cannot copy %s to itself" % fpath)
| 431,307
|
Gzips all files in a directory. Note that this is different from
shutil.make_archive, which creates a tar archive. The aim of this method
is to create gzipped files that can still be read using common Unix-style
commands like zless or zcat.
Args:
path (str): Path to directory.
compresslevel (int): Level of compression, 1-9. 9 is default for
GzipFile, 6 is default for gzip.
|
def gzip_dir(path, compresslevel=6):
for f in os.listdir(path):
full_f = os.path.join(path, f)
if not f.lower().endswith("gz"):
with open(full_f, 'rb') as f_in, \
GzipFile('{}.gz'.format(full_f), 'wb',
compresslevel=compresslevel) as f_out:
shutil.copyfileobj(f_in, f_out)
shutil.copystat(full_f,'{}.gz'.format(full_f))
os.remove(full_f)
| 431,308
|
Compresses a file with the correct extension. Functions like standard
Unix command line gzip and bzip2 in the sense that the original
uncompressed files are not retained.
Args:
filepath (str): Path to file.
compression (str): A compression mode. Valid options are "gz" or
"bz2". Defaults to "gz".
|
def compress_file(filepath, compression="gz"):
if compression not in ["gz", "bz2"]:
raise ValueError("Supported compression formats are 'gz' and 'bz2'.")
from monty.io import zopen
if not filepath.lower().endswith(".%s" % compression):
with open(filepath, 'rb') as f_in, \
zopen('%s.%s' % (filepath, compression), 'wb') as f_out:
f_out.writelines(f_in)
os.remove(filepath)
| 431,309
|
Recursively compresses all files in a directory. Note that this
compresses all files singly, i.e., it does not create a tar archive. For
that, just use Python tarfile class.
Args:
path (str): Path to parent directory.
compression (str): A compression mode. Valid options are "gz" or
"bz2". Defaults to gz.
|
def compress_dir(path, compression="gz"):
for parent, subdirs, files in os.walk(path):
for f in files:
compress_file(os.path.join(parent, f), compression=compression)
| 431,310
|
Decompresses a file with the correct extension. Automatically detects
gz, bz2 or z extension.
Args:
filepath (str): Path to file.
compression (str): A compression mode. Valid options are "gz" or
"bz2". Defaults to "gz".
|
def decompress_file(filepath):
toks = filepath.split(".")
file_ext = toks[-1].upper()
from monty.io import zopen
if file_ext in ["BZ2", "GZ", "Z"]:
with open(".".join(toks[0:-1]), 'wb') as f_out, \
zopen(filepath, 'rb') as f_in:
f_out.writelines(f_in)
os.remove(filepath)
| 431,311
|
Recursively decompresses all files in a directory.
Args:
path (str): Path to parent directory.
|
def decompress_dir(path):
for parent, subdirs, files in os.walk(path):
for f in files:
decompress_file(os.path.join(parent, f))
| 431,312
|
Implements an remove function that will delete files, folder trees and symlink trees
1.) Remove a file
2.) Remove a symlink and follow into with a recursive rm if follow_symlink
3.) Remove directory with rmtree
Args:
path (str): path to remove
follow_symlink(bool): follow symlinks and removes whatever is in them
|
def remove(path, follow_symlink=False):
if os.path.isfile(path):
os.remove(path)
elif os.path.islink(path):
if follow_symlink:
remove(os.readlink(path))
os.unlink(path)
else:
shutil.rmtree(path)
| 431,313
|
Check if a tunnel is up (remote target's host is reachable on TCP
target's port)
Arguments:
target (tuple):
tuple of type (``str``, ``int``) indicating the listen IP
address and port
Return:
boolean
.. deprecated:: 0.1.0
Replaced by :meth:`.check_tunnels()` and :attr:`.tunnel_is_up`
|
def local_is_up(self, target):
try:
check_address(target)
except ValueError:
self.logger.warning('Target must be a tuple (IP, port), where IP '
'is a string (i.e. "192.168.0.1") and port is '
'an integer (i.e. 40000). Alternatively '
'target can be a valid UNIX domain socket.')
return False
if self.skip_tunnel_checkup: # force tunnel check at this point
self.skip_tunnel_checkup = False
self.check_tunnels()
self.skip_tunnel_checkup = True # roll it back
return self.tunnel_is_up.get(target, True)
| 431,834
|
Load public keys from any available SSH agent
Arguments:
logger (Optional[logging.Logger])
Return:
list
|
def get_agent_keys(logger=None):
paramiko_agent = paramiko.Agent()
agent_keys = paramiko_agent.get_keys()
if logger:
logger.info('{0} keys loaded from agent'.format(len(agent_keys)))
return list(agent_keys)
| 431,839
|
Get SSH Public key from a private key file, given an optional password
Arguments:
pkey_file (str):
File containing a private key (RSA, DSS or ECDSA)
Keyword Arguments:
pkey_password (Optional[str]):
Password to decrypt the private key
logger (Optional[logging.Logger])
Return:
paramiko.Pkey
|
def read_private_key_file(pkey_file,
pkey_password=None,
key_type=None,
logger=None):
ssh_pkey = None
for pkey_class in (key_type,) if key_type else (
paramiko.RSAKey,
paramiko.DSSKey,
paramiko.ECDSAKey,
paramiko.Ed25519Key
):
try:
ssh_pkey = pkey_class.from_private_key_file(
pkey_file,
password=pkey_password
)
if logger:
logger.debug('Private key file ({0}, {1}) successfully '
'loaded'.format(pkey_file, pkey_class))
break
except paramiko.PasswordRequiredException:
if logger:
logger.error('Password is required for key {0}'
.format(pkey_file))
break
except paramiko.SSHException:
if logger:
logger.debug('Private key file ({0}) could not be loaded '
'as type {1} or bad password'
.format(pkey_file, pkey_class))
return ssh_pkey
| 431,848
|
Authentication used by Airtable Class
Args:
api_key (``str``): Airtable API Key. Optional.
If not set, it will look for
enviroment variable ``AIRTABLE_API_KEY``
|
def __init__(self, api_key=None):
try:
self.api_key = api_key or os.environ['AIRTABLE_API_KEY']
except KeyError:
raise KeyError('Api Key not found. Pass api_key as a kwarg \
or set an env var AIRTABLE_API_KEY with your key')
| 432,972
|
Retrieves a record by its id
>>> record = airtable.get('recwPQIfs4wKPyc9D')
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Record
|
def get(self, record_id):
record_url = self.record_url(record_id)
return self._get(record_url)
| 432,980
|
Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record
|
def insert(self, fields, typecast=False):
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
| 432,985
|
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)
To change the rate limit use ``airtable.API_LIMIT = 0.2``
(5 per second)
>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]
>>> airtable.batch_insert(records)
Args:
records(``list``): Records to insert
typecast(``boolean``): Automatic data conversion from string values.
Returns:
records (``list``): list of added records
|
def batch_insert(self, records, typecast=False):
return self._batch_request(self.insert, records)
| 432,987
|
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
|
def delete(self, record_id):
record_url = self.record_url(record_id)
return self._delete(record_url)
| 432,992
|
Specify only WANTED labels to minimize get_labels() requests
Args:
- labels: <list> of wanted labels.
Example:
page.wanted_labels(['P18', 'P31'])
|
def wanted_labels(self, labels):
if not isinstance(labels, list):
raise ValueError("Input labels must be a list.")
self.user_labels = labels
| 433,038
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.