repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
blubberdiblub/eztemplate | eztemplate/__main__.py | dump_engines | python | def dump_engines(target=sys.stderr):
print("Available templating engines:", file=target)
width = max(len(engine) for engine in engines.engines)
for handle, engine in sorted(engines.engines.items()):
description = engine.__doc__.split('\n', 0)[0]
print(" %-*s - %s" % (width, handle, description), file=target) | Print successfully imported templating engines. | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/__main__.py#L220-L227 | null | #!/usr/bin/env python
"""Provide a simple templating system for text files."""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import errno
import os
import os.path
import re
import sys
from . import engines
from . import __version__
def is_filelike(ob):
"""Check for filelikeness of an object.
Needed to distinguish it from file names.
Returns true if it has a read or a write method.
"""
if hasattr(ob, 'read') and callable(ob.read):
return True
if hasattr(ob, 'write') and callable(ob.write):
return True
return False
class _PyArg(str):
"""Wrap a command line python argument.
Makes it distinguishable from a plain text argument.
"""
pass
def parse_args(args=None):
"""Parse command line arguments."""
# The argparse module provides a nice abstraction for argument parsing.
# It automatically builds up the help text, too.
parser = argparse.ArgumentParser(
prog=__package__,
description='Make substitutions in text files.',
)
parser.add_argument('-V', '--version',
action='version',
version="%%(prog)s %s" % (__version__,),
)
group = parser.add_argument_group("Engine")
group.add_argument('-e', '--engine',
dest='engine',
default='string.Template',
help="templating engine",
metavar="ENGINE",
)
group.add_argument('-t', '--tolerant',
action='store_true',
dest='tolerant',
help="don't fail on missing names",
)
group = parser.add_argument_group("Output")
group.add_argument('-s', '--stdout',
action='append_const',
dest='outfiles',
const=sys.stdout,
help="use standard output",
)
group.add_argument('-o', '--outfile',
action='append',
dest='outfiles',
help="output file",
metavar="FILE",
)
group.add_argument('--vary',
action='store_true',
dest='vary',
help="vary output file name according to template",
)
group.add_argument('-r', '--read-old',
action='store_true',
dest='read_old',
help="read preexisting output files and"
"hand the respective content to the template",
)
group.add_argument('-d', '--delete-empty',
action='store_true',
dest='delete_empty',
help="delete file if output is empty",
)
group = parser.add_argument_group("Input")
group.add_argument('--stdin',
action='append_const',
dest='infiles',
const=sys.stdin,
help="use standard input",
)
group.add_argument('-i', '--infile',
action='append',
dest='infiles',
help="any number of input files",
metavar="FILE",
)
group.add_argument('-c', '--concatenate',
action='store_true',
dest='concatenate',
help="concatenate multiple input files into one output",
)
group = parser.add_argument_group("Name-value pairs")
group.add_argument('-a', '--arg',
action='append',
dest='args',
help="any number of name-value pairs",
metavar="NAME=VALUE",
)
group.add_argument('-p', '--pyarg',
action='append',
dest='args',
type=_PyArg,
help="evaluate a python expression",
metavar="NAME=EXPRESSION",
)
group.add_argument('-n', '--next',
action='append_const',
dest='args',
const='--',
help="begin next argument group",
)
parser.add_argument(
dest='remainder',
nargs=argparse.REMAINDER,
help="possible input files and name-value pair groups "
"if not already specified through options",
)
args = parser.parse_args(args)
if args.engine == 'help':
dump_engines()
parser.exit(0)
if args.engine not in engines.engines:
parser.error("Engine '%s' is not available." % (args.engine,))
if args.vary:
if len(args.outfiles) != 1:
parser.error("need exactly one output file template")
if is_filelike(args.outfiles[0]):
parser.error("vary requires an output file template")
elif not args.outfiles:
args.outfiles = [sys.stdout]
if not args.infiles:
if args.args:
infiles = args.remainder
args.remainder = []
try:
infiles.remove('--')
except ValueError:
pass
else:
first = 1 if args.remainder and args.remainder[0] == '--' else 0
last = (len(args.remainder)
if args.vary or args.concatenate
else first + 1)
for split, infile in enumerate(args.remainder[first:last], first):
if infile == '--' or '=' in infile:
break
else:
split = last
infiles = args.remainder[first:split]
args.remainder = args.remainder[split:]
args.infiles = [path if path != '-' else sys.stdin
for path in infiles] if infiles else [sys.stdin]
if args.args:
flat_args = args.args
else:
flat_args = args.remainder
args.remainder = []
if flat_args and flat_args[0] == '--':
flat_args = flat_args[1:]
args.args = []
mapping = {}
for arg in flat_args:
if isinstance(arg, _PyArg):
name_value = arg.split('=', 1)
mapping[name_value[0]] = eval(name_value[1], {}, mapping)
elif arg == '--':
args.args.append(mapping)
mapping = {}
else:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
args.args.append(mapping)
if args.remainder:
parser.error("extraneous arguments left over")
else:
del args.remainder
return args
def check_engine(handle):
"""Check availability of requested template engine."""
if handle == 'help':
dump_engines()
sys.exit(0)
if handle not in engines.engines:
print('Engine "%s" is not available.' % (handle,), file=sys.stderr)
sys.exit(1)
def make_mapping(args):
"""Make a mapping from the name=value pairs."""
mapping = {}
if args:
for arg in args:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
return mapping
def make_path_properties(file_or_path, prefix=''):
"""Build useful properties from a file path."""
is_std = file_or_path in (sys.stdin, sys.stdout, sys.stderr)
if is_std:
path = '-'
elif is_filelike(file_or_path):
try:
path = str(file_or_path.name)
except AttributeError:
path = None
else:
path = str(file_or_path)
if is_std or not path:
abspath = dirname = basename = stem = ext = None
realpath = realdrive = realdir = realbase = realstem = realext = None
numbers = num = None
else:
abspath = os.path.abspath(path)
dirname, basename = os.path.split(path)
stem, ext = os.path.splitext(basename)
if not dirname:
dirname = os.curdir
realpath = os.path.realpath(path)
realdrive, tail = os.path.splitdrive(realpath)
realdir, realbase = os.path.split(tail)
realstem, realext = os.path.splitext(realbase)
numbers = [int(s) for s in re.findall(r'\d+', basename)]
num = numbers[-1] if numbers else None
return {
prefix + 'path': path,
prefix + 'abspath': abspath,
prefix + 'dirname': dirname,
prefix + 'basename': basename,
prefix + 'stem': stem,
prefix + 'ext': ext,
prefix + 'realpath': realpath,
prefix + 'realdrive': realdrive,
prefix + 'realdir': realdir,
prefix + 'realbase': realbase,
prefix + 'realstem': realstem,
prefix + 'realext': realext,
prefix + 'numbers': numbers,
prefix + 'num': num,
}
def constant_outfile_iterator(outfiles, infiles, arggroups):
"""Iterate over all output files."""
assert len(infiles) == 1
assert len(arggroups) == 1
return ((outfile, infiles[0], arggroups[0]) for outfile in outfiles)
def variable_outfile_iterator(outfiles, infiles, arggroups, engine):
"""Iterate over variable output file name template."""
assert len(outfiles) == 1
template = engine(outfiles[0], tolerant=False)
for infile in infiles:
properties = make_path_properties(infile, prefix='')
for arggroup in arggroups:
outfile = template.apply(dict(arggroup, **properties))
yield (outfile, infile, arggroup)
class CachedTemplateReader(object):
"""Read templates and cache them."""
def __init__(self, engine, tolerant=False):
"""Initialize reader."""
self._engine = engine
self._tolerant = tolerant
self._cached_templates = {}
def read(self, file_or_path):
"""Read template from cache or file."""
if file_or_path in self._cached_templates:
return self._cached_templates[file_or_path]
if is_filelike(file_or_path):
template = file_or_path.read()
dirname = None
else:
with open(file_or_path, 'r') as f:
template = f.read()
dirname = os.path.dirname(file_or_path)
template = self._engine(template,
dirname=dirname,
tolerant=self._tolerant)
self._cached_templates[file_or_path] = template
return template
def process_combinations(combinations, engine,
tolerant=False,
read_old=False,
delete_empty=False,
):
"""Process outfile-infile-arggroup combinations."""
outfiles = set()
templatereader = CachedTemplateReader(engine, tolerant=tolerant)
for outfile, infile, arggroup in combinations:
template = templatereader.read(infile)
properties = make_path_properties(outfile, prefix='ez_')
if read_old:
if is_filelike(outfile):
raise Exception("cannot read already open output streams")
try:
with open(outfile, 'r') as f:
properties['ez_content'] = f.read()
except IOError:
properties['ez_content'] = None
result = template.apply(dict(arggroup, **properties))
if is_filelike(outfile):
if result:
outfile.write(result)
elif result or not delete_empty:
if outfile in outfiles:
raise IOError("trying to write twice to the same file")
outfiles.add(outfile)
with open(outfile, 'w') as f:
f.write(result)
else:
try:
os.remove(outfile)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def perform_templating(args):
"""Perform templating according to the given arguments."""
engine = engines.engines[args.engine]
if args.vary:
it = variable_outfile_iterator(args.outfiles,
args.infiles,
args.args,
engine)
else:
it = constant_outfile_iterator(args.outfiles,
args.infiles,
args.args)
process_combinations(it, engine,
tolerant=args.tolerant,
read_old=args.read_old,
delete_empty=args.delete_empty,
)
def main_command():
"""Parse command line arguments and perform main action."""
args = parse_args()
perform_templating(args)
if __name__ == '__main__':
sys.exit(main_command())
|
blubberdiblub/eztemplate | eztemplate/__main__.py | check_engine | python | def check_engine(handle):
if handle == 'help':
dump_engines()
sys.exit(0)
if handle not in engines.engines:
print('Engine "%s" is not available.' % (handle,), file=sys.stderr)
sys.exit(1) | Check availability of requested template engine. | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/__main__.py#L230-L238 | [
"def dump_engines(target=sys.stderr):\n \"\"\"Print successfully imported templating engines.\"\"\"\n print(\"Available templating engines:\", file=target)\n\n width = max(len(engine) for engine in engines.engines)\n for handle, engine in sorted(engines.engines.items()):\n description = engine.__doc__.split('\\n', 0)[0]\n print(\" %-*s - %s\" % (width, handle, description), file=target)\n"
] | #!/usr/bin/env python
"""Provide a simple templating system for text files."""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import errno
import os
import os.path
import re
import sys
from . import engines
from . import __version__
def is_filelike(ob):
"""Check for filelikeness of an object.
Needed to distinguish it from file names.
Returns true if it has a read or a write method.
"""
if hasattr(ob, 'read') and callable(ob.read):
return True
if hasattr(ob, 'write') and callable(ob.write):
return True
return False
class _PyArg(str):
"""Wrap a command line python argument.
Makes it distinguishable from a plain text argument.
"""
pass
def parse_args(args=None):
"""Parse command line arguments."""
# The argparse module provides a nice abstraction for argument parsing.
# It automatically builds up the help text, too.
parser = argparse.ArgumentParser(
prog=__package__,
description='Make substitutions in text files.',
)
parser.add_argument('-V', '--version',
action='version',
version="%%(prog)s %s" % (__version__,),
)
group = parser.add_argument_group("Engine")
group.add_argument('-e', '--engine',
dest='engine',
default='string.Template',
help="templating engine",
metavar="ENGINE",
)
group.add_argument('-t', '--tolerant',
action='store_true',
dest='tolerant',
help="don't fail on missing names",
)
group = parser.add_argument_group("Output")
group.add_argument('-s', '--stdout',
action='append_const',
dest='outfiles',
const=sys.stdout,
help="use standard output",
)
group.add_argument('-o', '--outfile',
action='append',
dest='outfiles',
help="output file",
metavar="FILE",
)
group.add_argument('--vary',
action='store_true',
dest='vary',
help="vary output file name according to template",
)
group.add_argument('-r', '--read-old',
action='store_true',
dest='read_old',
help="read preexisting output files and"
"hand the respective content to the template",
)
group.add_argument('-d', '--delete-empty',
action='store_true',
dest='delete_empty',
help="delete file if output is empty",
)
group = parser.add_argument_group("Input")
group.add_argument('--stdin',
action='append_const',
dest='infiles',
const=sys.stdin,
help="use standard input",
)
group.add_argument('-i', '--infile',
action='append',
dest='infiles',
help="any number of input files",
metavar="FILE",
)
group.add_argument('-c', '--concatenate',
action='store_true',
dest='concatenate',
help="concatenate multiple input files into one output",
)
group = parser.add_argument_group("Name-value pairs")
group.add_argument('-a', '--arg',
action='append',
dest='args',
help="any number of name-value pairs",
metavar="NAME=VALUE",
)
group.add_argument('-p', '--pyarg',
action='append',
dest='args',
type=_PyArg,
help="evaluate a python expression",
metavar="NAME=EXPRESSION",
)
group.add_argument('-n', '--next',
action='append_const',
dest='args',
const='--',
help="begin next argument group",
)
parser.add_argument(
dest='remainder',
nargs=argparse.REMAINDER,
help="possible input files and name-value pair groups "
"if not already specified through options",
)
args = parser.parse_args(args)
if args.engine == 'help':
dump_engines()
parser.exit(0)
if args.engine not in engines.engines:
parser.error("Engine '%s' is not available." % (args.engine,))
if args.vary:
if len(args.outfiles) != 1:
parser.error("need exactly one output file template")
if is_filelike(args.outfiles[0]):
parser.error("vary requires an output file template")
elif not args.outfiles:
args.outfiles = [sys.stdout]
if not args.infiles:
if args.args:
infiles = args.remainder
args.remainder = []
try:
infiles.remove('--')
except ValueError:
pass
else:
first = 1 if args.remainder and args.remainder[0] == '--' else 0
last = (len(args.remainder)
if args.vary or args.concatenate
else first + 1)
for split, infile in enumerate(args.remainder[first:last], first):
if infile == '--' or '=' in infile:
break
else:
split = last
infiles = args.remainder[first:split]
args.remainder = args.remainder[split:]
args.infiles = [path if path != '-' else sys.stdin
for path in infiles] if infiles else [sys.stdin]
if args.args:
flat_args = args.args
else:
flat_args = args.remainder
args.remainder = []
if flat_args and flat_args[0] == '--':
flat_args = flat_args[1:]
args.args = []
mapping = {}
for arg in flat_args:
if isinstance(arg, _PyArg):
name_value = arg.split('=', 1)
mapping[name_value[0]] = eval(name_value[1], {}, mapping)
elif arg == '--':
args.args.append(mapping)
mapping = {}
else:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
args.args.append(mapping)
if args.remainder:
parser.error("extraneous arguments left over")
else:
del args.remainder
return args
def dump_engines(target=sys.stderr):
"""Print successfully imported templating engines."""
print("Available templating engines:", file=target)
width = max(len(engine) for engine in engines.engines)
for handle, engine in sorted(engines.engines.items()):
description = engine.__doc__.split('\n', 0)[0]
print(" %-*s - %s" % (width, handle, description), file=target)
def make_mapping(args):
"""Make a mapping from the name=value pairs."""
mapping = {}
if args:
for arg in args:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
return mapping
def make_path_properties(file_or_path, prefix=''):
"""Build useful properties from a file path."""
is_std = file_or_path in (sys.stdin, sys.stdout, sys.stderr)
if is_std:
path = '-'
elif is_filelike(file_or_path):
try:
path = str(file_or_path.name)
except AttributeError:
path = None
else:
path = str(file_or_path)
if is_std or not path:
abspath = dirname = basename = stem = ext = None
realpath = realdrive = realdir = realbase = realstem = realext = None
numbers = num = None
else:
abspath = os.path.abspath(path)
dirname, basename = os.path.split(path)
stem, ext = os.path.splitext(basename)
if not dirname:
dirname = os.curdir
realpath = os.path.realpath(path)
realdrive, tail = os.path.splitdrive(realpath)
realdir, realbase = os.path.split(tail)
realstem, realext = os.path.splitext(realbase)
numbers = [int(s) for s in re.findall(r'\d+', basename)]
num = numbers[-1] if numbers else None
return {
prefix + 'path': path,
prefix + 'abspath': abspath,
prefix + 'dirname': dirname,
prefix + 'basename': basename,
prefix + 'stem': stem,
prefix + 'ext': ext,
prefix + 'realpath': realpath,
prefix + 'realdrive': realdrive,
prefix + 'realdir': realdir,
prefix + 'realbase': realbase,
prefix + 'realstem': realstem,
prefix + 'realext': realext,
prefix + 'numbers': numbers,
prefix + 'num': num,
}
def constant_outfile_iterator(outfiles, infiles, arggroups):
"""Iterate over all output files."""
assert len(infiles) == 1
assert len(arggroups) == 1
return ((outfile, infiles[0], arggroups[0]) for outfile in outfiles)
def variable_outfile_iterator(outfiles, infiles, arggroups, engine):
"""Iterate over variable output file name template."""
assert len(outfiles) == 1
template = engine(outfiles[0], tolerant=False)
for infile in infiles:
properties = make_path_properties(infile, prefix='')
for arggroup in arggroups:
outfile = template.apply(dict(arggroup, **properties))
yield (outfile, infile, arggroup)
class CachedTemplateReader(object):
"""Read templates and cache them."""
def __init__(self, engine, tolerant=False):
"""Initialize reader."""
self._engine = engine
self._tolerant = tolerant
self._cached_templates = {}
def read(self, file_or_path):
"""Read template from cache or file."""
if file_or_path in self._cached_templates:
return self._cached_templates[file_or_path]
if is_filelike(file_or_path):
template = file_or_path.read()
dirname = None
else:
with open(file_or_path, 'r') as f:
template = f.read()
dirname = os.path.dirname(file_or_path)
template = self._engine(template,
dirname=dirname,
tolerant=self._tolerant)
self._cached_templates[file_or_path] = template
return template
def process_combinations(combinations, engine,
tolerant=False,
read_old=False,
delete_empty=False,
):
"""Process outfile-infile-arggroup combinations."""
outfiles = set()
templatereader = CachedTemplateReader(engine, tolerant=tolerant)
for outfile, infile, arggroup in combinations:
template = templatereader.read(infile)
properties = make_path_properties(outfile, prefix='ez_')
if read_old:
if is_filelike(outfile):
raise Exception("cannot read already open output streams")
try:
with open(outfile, 'r') as f:
properties['ez_content'] = f.read()
except IOError:
properties['ez_content'] = None
result = template.apply(dict(arggroup, **properties))
if is_filelike(outfile):
if result:
outfile.write(result)
elif result or not delete_empty:
if outfile in outfiles:
raise IOError("trying to write twice to the same file")
outfiles.add(outfile)
with open(outfile, 'w') as f:
f.write(result)
else:
try:
os.remove(outfile)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def perform_templating(args):
"""Perform templating according to the given arguments."""
engine = engines.engines[args.engine]
if args.vary:
it = variable_outfile_iterator(args.outfiles,
args.infiles,
args.args,
engine)
else:
it = constant_outfile_iterator(args.outfiles,
args.infiles,
args.args)
process_combinations(it, engine,
tolerant=args.tolerant,
read_old=args.read_old,
delete_empty=args.delete_empty,
)
def main_command():
"""Parse command line arguments and perform main action."""
args = parse_args()
perform_templating(args)
if __name__ == '__main__':
sys.exit(main_command())
|
blubberdiblub/eztemplate | eztemplate/__main__.py | make_mapping | python | def make_mapping(args):
mapping = {}
if args:
for arg in args:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
return mapping | Make a mapping from the name=value pairs. | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/__main__.py#L241-L252 | null | #!/usr/bin/env python
"""Provide a simple templating system for text files."""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import errno
import os
import os.path
import re
import sys
from . import engines
from . import __version__
def is_filelike(ob):
"""Check for filelikeness of an object.
Needed to distinguish it from file names.
Returns true if it has a read or a write method.
"""
if hasattr(ob, 'read') and callable(ob.read):
return True
if hasattr(ob, 'write') and callable(ob.write):
return True
return False
class _PyArg(str):
"""Wrap a command line python argument.
Makes it distinguishable from a plain text argument.
"""
pass
def parse_args(args=None):
"""Parse command line arguments."""
# The argparse module provides a nice abstraction for argument parsing.
# It automatically builds up the help text, too.
parser = argparse.ArgumentParser(
prog=__package__,
description='Make substitutions in text files.',
)
parser.add_argument('-V', '--version',
action='version',
version="%%(prog)s %s" % (__version__,),
)
group = parser.add_argument_group("Engine")
group.add_argument('-e', '--engine',
dest='engine',
default='string.Template',
help="templating engine",
metavar="ENGINE",
)
group.add_argument('-t', '--tolerant',
action='store_true',
dest='tolerant',
help="don't fail on missing names",
)
group = parser.add_argument_group("Output")
group.add_argument('-s', '--stdout',
action='append_const',
dest='outfiles',
const=sys.stdout,
help="use standard output",
)
group.add_argument('-o', '--outfile',
action='append',
dest='outfiles',
help="output file",
metavar="FILE",
)
group.add_argument('--vary',
action='store_true',
dest='vary',
help="vary output file name according to template",
)
group.add_argument('-r', '--read-old',
action='store_true',
dest='read_old',
help="read preexisting output files and"
"hand the respective content to the template",
)
group.add_argument('-d', '--delete-empty',
action='store_true',
dest='delete_empty',
help="delete file if output is empty",
)
group = parser.add_argument_group("Input")
group.add_argument('--stdin',
action='append_const',
dest='infiles',
const=sys.stdin,
help="use standard input",
)
group.add_argument('-i', '--infile',
action='append',
dest='infiles',
help="any number of input files",
metavar="FILE",
)
group.add_argument('-c', '--concatenate',
action='store_true',
dest='concatenate',
help="concatenate multiple input files into one output",
)
group = parser.add_argument_group("Name-value pairs")
group.add_argument('-a', '--arg',
action='append',
dest='args',
help="any number of name-value pairs",
metavar="NAME=VALUE",
)
group.add_argument('-p', '--pyarg',
action='append',
dest='args',
type=_PyArg,
help="evaluate a python expression",
metavar="NAME=EXPRESSION",
)
group.add_argument('-n', '--next',
action='append_const',
dest='args',
const='--',
help="begin next argument group",
)
parser.add_argument(
dest='remainder',
nargs=argparse.REMAINDER,
help="possible input files and name-value pair groups "
"if not already specified through options",
)
args = parser.parse_args(args)
if args.engine == 'help':
dump_engines()
parser.exit(0)
if args.engine not in engines.engines:
parser.error("Engine '%s' is not available." % (args.engine,))
if args.vary:
if len(args.outfiles) != 1:
parser.error("need exactly one output file template")
if is_filelike(args.outfiles[0]):
parser.error("vary requires an output file template")
elif not args.outfiles:
args.outfiles = [sys.stdout]
if not args.infiles:
if args.args:
infiles = args.remainder
args.remainder = []
try:
infiles.remove('--')
except ValueError:
pass
else:
first = 1 if args.remainder and args.remainder[0] == '--' else 0
last = (len(args.remainder)
if args.vary or args.concatenate
else first + 1)
for split, infile in enumerate(args.remainder[first:last], first):
if infile == '--' or '=' in infile:
break
else:
split = last
infiles = args.remainder[first:split]
args.remainder = args.remainder[split:]
args.infiles = [path if path != '-' else sys.stdin
for path in infiles] if infiles else [sys.stdin]
if args.args:
flat_args = args.args
else:
flat_args = args.remainder
args.remainder = []
if flat_args and flat_args[0] == '--':
flat_args = flat_args[1:]
args.args = []
mapping = {}
for arg in flat_args:
if isinstance(arg, _PyArg):
name_value = arg.split('=', 1)
mapping[name_value[0]] = eval(name_value[1], {}, mapping)
elif arg == '--':
args.args.append(mapping)
mapping = {}
else:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
args.args.append(mapping)
if args.remainder:
parser.error("extraneous arguments left over")
else:
del args.remainder
return args
def dump_engines(target=sys.stderr):
"""Print successfully imported templating engines."""
print("Available templating engines:", file=target)
width = max(len(engine) for engine in engines.engines)
for handle, engine in sorted(engines.engines.items()):
description = engine.__doc__.split('\n', 0)[0]
print(" %-*s - %s" % (width, handle, description), file=target)
def check_engine(handle):
"""Check availability of requested template engine."""
if handle == 'help':
dump_engines()
sys.exit(0)
if handle not in engines.engines:
print('Engine "%s" is not available.' % (handle,), file=sys.stderr)
sys.exit(1)
def make_path_properties(file_or_path, prefix=''):
"""Build useful properties from a file path."""
is_std = file_or_path in (sys.stdin, sys.stdout, sys.stderr)
if is_std:
path = '-'
elif is_filelike(file_or_path):
try:
path = str(file_or_path.name)
except AttributeError:
path = None
else:
path = str(file_or_path)
if is_std or not path:
abspath = dirname = basename = stem = ext = None
realpath = realdrive = realdir = realbase = realstem = realext = None
numbers = num = None
else:
abspath = os.path.abspath(path)
dirname, basename = os.path.split(path)
stem, ext = os.path.splitext(basename)
if not dirname:
dirname = os.curdir
realpath = os.path.realpath(path)
realdrive, tail = os.path.splitdrive(realpath)
realdir, realbase = os.path.split(tail)
realstem, realext = os.path.splitext(realbase)
numbers = [int(s) for s in re.findall(r'\d+', basename)]
num = numbers[-1] if numbers else None
return {
prefix + 'path': path,
prefix + 'abspath': abspath,
prefix + 'dirname': dirname,
prefix + 'basename': basename,
prefix + 'stem': stem,
prefix + 'ext': ext,
prefix + 'realpath': realpath,
prefix + 'realdrive': realdrive,
prefix + 'realdir': realdir,
prefix + 'realbase': realbase,
prefix + 'realstem': realstem,
prefix + 'realext': realext,
prefix + 'numbers': numbers,
prefix + 'num': num,
}
def constant_outfile_iterator(outfiles, infiles, arggroups):
"""Iterate over all output files."""
assert len(infiles) == 1
assert len(arggroups) == 1
return ((outfile, infiles[0], arggroups[0]) for outfile in outfiles)
def variable_outfile_iterator(outfiles, infiles, arggroups, engine):
"""Iterate over variable output file name template."""
assert len(outfiles) == 1
template = engine(outfiles[0], tolerant=False)
for infile in infiles:
properties = make_path_properties(infile, prefix='')
for arggroup in arggroups:
outfile = template.apply(dict(arggroup, **properties))
yield (outfile, infile, arggroup)
class CachedTemplateReader(object):
"""Read templates and cache them."""
def __init__(self, engine, tolerant=False):
"""Initialize reader."""
self._engine = engine
self._tolerant = tolerant
self._cached_templates = {}
def read(self, file_or_path):
"""Read template from cache or file."""
if file_or_path in self._cached_templates:
return self._cached_templates[file_or_path]
if is_filelike(file_or_path):
template = file_or_path.read()
dirname = None
else:
with open(file_or_path, 'r') as f:
template = f.read()
dirname = os.path.dirname(file_or_path)
template = self._engine(template,
dirname=dirname,
tolerant=self._tolerant)
self._cached_templates[file_or_path] = template
return template
def process_combinations(combinations, engine,
tolerant=False,
read_old=False,
delete_empty=False,
):
"""Process outfile-infile-arggroup combinations."""
outfiles = set()
templatereader = CachedTemplateReader(engine, tolerant=tolerant)
for outfile, infile, arggroup in combinations:
template = templatereader.read(infile)
properties = make_path_properties(outfile, prefix='ez_')
if read_old:
if is_filelike(outfile):
raise Exception("cannot read already open output streams")
try:
with open(outfile, 'r') as f:
properties['ez_content'] = f.read()
except IOError:
properties['ez_content'] = None
result = template.apply(dict(arggroup, **properties))
if is_filelike(outfile):
if result:
outfile.write(result)
elif result or not delete_empty:
if outfile in outfiles:
raise IOError("trying to write twice to the same file")
outfiles.add(outfile)
with open(outfile, 'w') as f:
f.write(result)
else:
try:
os.remove(outfile)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def perform_templating(args):
"""Perform templating according to the given arguments."""
engine = engines.engines[args.engine]
if args.vary:
it = variable_outfile_iterator(args.outfiles,
args.infiles,
args.args,
engine)
else:
it = constant_outfile_iterator(args.outfiles,
args.infiles,
args.args)
process_combinations(it, engine,
tolerant=args.tolerant,
read_old=args.read_old,
delete_empty=args.delete_empty,
)
def main_command():
"""Parse command line arguments and perform main action."""
args = parse_args()
perform_templating(args)
if __name__ == '__main__':
sys.exit(main_command())
|
blubberdiblub/eztemplate | eztemplate/__main__.py | make_path_properties | python | def make_path_properties(file_or_path, prefix=''):
is_std = file_or_path in (sys.stdin, sys.stdout, sys.stderr)
if is_std:
path = '-'
elif is_filelike(file_or_path):
try:
path = str(file_or_path.name)
except AttributeError:
path = None
else:
path = str(file_or_path)
if is_std or not path:
abspath = dirname = basename = stem = ext = None
realpath = realdrive = realdir = realbase = realstem = realext = None
numbers = num = None
else:
abspath = os.path.abspath(path)
dirname, basename = os.path.split(path)
stem, ext = os.path.splitext(basename)
if not dirname:
dirname = os.curdir
realpath = os.path.realpath(path)
realdrive, tail = os.path.splitdrive(realpath)
realdir, realbase = os.path.split(tail)
realstem, realext = os.path.splitext(realbase)
numbers = [int(s) for s in re.findall(r'\d+', basename)]
num = numbers[-1] if numbers else None
return {
prefix + 'path': path,
prefix + 'abspath': abspath,
prefix + 'dirname': dirname,
prefix + 'basename': basename,
prefix + 'stem': stem,
prefix + 'ext': ext,
prefix + 'realpath': realpath,
prefix + 'realdrive': realdrive,
prefix + 'realdir': realdir,
prefix + 'realbase': realbase,
prefix + 'realstem': realstem,
prefix + 'realext': realext,
prefix + 'numbers': numbers,
prefix + 'num': num,
} | Build useful properties from a file path. | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/__main__.py#L255-L305 | [
"def is_filelike(ob):\n \"\"\"Check for filelikeness of an object.\n\n Needed to distinguish it from file names.\n Returns true if it has a read or a write method.\n \"\"\"\n if hasattr(ob, 'read') and callable(ob.read):\n return True\n\n if hasattr(ob, 'write') and callable(ob.write):\n return True\n\n return False\n"
] | #!/usr/bin/env python
"""Provide a simple templating system for text files."""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import errno
import os
import os.path
import re
import sys
from . import engines
from . import __version__
def is_filelike(ob):
"""Check for filelikeness of an object.
Needed to distinguish it from file names.
Returns true if it has a read or a write method.
"""
if hasattr(ob, 'read') and callable(ob.read):
return True
if hasattr(ob, 'write') and callable(ob.write):
return True
return False
class _PyArg(str):
"""Wrap a command line python argument.
Makes it distinguishable from a plain text argument.
"""
pass
def parse_args(args=None):
"""Parse command line arguments."""
# The argparse module provides a nice abstraction for argument parsing.
# It automatically builds up the help text, too.
parser = argparse.ArgumentParser(
prog=__package__,
description='Make substitutions in text files.',
)
parser.add_argument('-V', '--version',
action='version',
version="%%(prog)s %s" % (__version__,),
)
group = parser.add_argument_group("Engine")
group.add_argument('-e', '--engine',
dest='engine',
default='string.Template',
help="templating engine",
metavar="ENGINE",
)
group.add_argument('-t', '--tolerant',
action='store_true',
dest='tolerant',
help="don't fail on missing names",
)
group = parser.add_argument_group("Output")
group.add_argument('-s', '--stdout',
action='append_const',
dest='outfiles',
const=sys.stdout,
help="use standard output",
)
group.add_argument('-o', '--outfile',
action='append',
dest='outfiles',
help="output file",
metavar="FILE",
)
group.add_argument('--vary',
action='store_true',
dest='vary',
help="vary output file name according to template",
)
group.add_argument('-r', '--read-old',
action='store_true',
dest='read_old',
help="read preexisting output files and"
"hand the respective content to the template",
)
group.add_argument('-d', '--delete-empty',
action='store_true',
dest='delete_empty',
help="delete file if output is empty",
)
group = parser.add_argument_group("Input")
group.add_argument('--stdin',
action='append_const',
dest='infiles',
const=sys.stdin,
help="use standard input",
)
group.add_argument('-i', '--infile',
action='append',
dest='infiles',
help="any number of input files",
metavar="FILE",
)
group.add_argument('-c', '--concatenate',
action='store_true',
dest='concatenate',
help="concatenate multiple input files into one output",
)
group = parser.add_argument_group("Name-value pairs")
group.add_argument('-a', '--arg',
action='append',
dest='args',
help="any number of name-value pairs",
metavar="NAME=VALUE",
)
group.add_argument('-p', '--pyarg',
action='append',
dest='args',
type=_PyArg,
help="evaluate a python expression",
metavar="NAME=EXPRESSION",
)
group.add_argument('-n', '--next',
action='append_const',
dest='args',
const='--',
help="begin next argument group",
)
parser.add_argument(
dest='remainder',
nargs=argparse.REMAINDER,
help="possible input files and name-value pair groups "
"if not already specified through options",
)
args = parser.parse_args(args)
if args.engine == 'help':
dump_engines()
parser.exit(0)
if args.engine not in engines.engines:
parser.error("Engine '%s' is not available." % (args.engine,))
if args.vary:
if len(args.outfiles) != 1:
parser.error("need exactly one output file template")
if is_filelike(args.outfiles[0]):
parser.error("vary requires an output file template")
elif not args.outfiles:
args.outfiles = [sys.stdout]
if not args.infiles:
if args.args:
infiles = args.remainder
args.remainder = []
try:
infiles.remove('--')
except ValueError:
pass
else:
first = 1 if args.remainder and args.remainder[0] == '--' else 0
last = (len(args.remainder)
if args.vary or args.concatenate
else first + 1)
for split, infile in enumerate(args.remainder[first:last], first):
if infile == '--' or '=' in infile:
break
else:
split = last
infiles = args.remainder[first:split]
args.remainder = args.remainder[split:]
args.infiles = [path if path != '-' else sys.stdin
for path in infiles] if infiles else [sys.stdin]
if args.args:
flat_args = args.args
else:
flat_args = args.remainder
args.remainder = []
if flat_args and flat_args[0] == '--':
flat_args = flat_args[1:]
args.args = []
mapping = {}
for arg in flat_args:
if isinstance(arg, _PyArg):
name_value = arg.split('=', 1)
mapping[name_value[0]] = eval(name_value[1], {}, mapping)
elif arg == '--':
args.args.append(mapping)
mapping = {}
else:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
args.args.append(mapping)
if args.remainder:
parser.error("extraneous arguments left over")
else:
del args.remainder
return args
def dump_engines(target=sys.stderr):
"""Print successfully imported templating engines."""
print("Available templating engines:", file=target)
width = max(len(engine) for engine in engines.engines)
for handle, engine in sorted(engines.engines.items()):
description = engine.__doc__.split('\n', 0)[0]
print(" %-*s - %s" % (width, handle, description), file=target)
def check_engine(handle):
"""Check availability of requested template engine."""
if handle == 'help':
dump_engines()
sys.exit(0)
if handle not in engines.engines:
print('Engine "%s" is not available.' % (handle,), file=sys.stderr)
sys.exit(1)
def make_mapping(args):
"""Make a mapping from the name=value pairs."""
mapping = {}
if args:
for arg in args:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
return mapping
def constant_outfile_iterator(outfiles, infiles, arggroups):
"""Iterate over all output files."""
assert len(infiles) == 1
assert len(arggroups) == 1
return ((outfile, infiles[0], arggroups[0]) for outfile in outfiles)
def variable_outfile_iterator(outfiles, infiles, arggroups, engine):
"""Iterate over variable output file name template."""
assert len(outfiles) == 1
template = engine(outfiles[0], tolerant=False)
for infile in infiles:
properties = make_path_properties(infile, prefix='')
for arggroup in arggroups:
outfile = template.apply(dict(arggroup, **properties))
yield (outfile, infile, arggroup)
class CachedTemplateReader(object):
"""Read templates and cache them."""
def __init__(self, engine, tolerant=False):
"""Initialize reader."""
self._engine = engine
self._tolerant = tolerant
self._cached_templates = {}
def read(self, file_or_path):
"""Read template from cache or file."""
if file_or_path in self._cached_templates:
return self._cached_templates[file_or_path]
if is_filelike(file_or_path):
template = file_or_path.read()
dirname = None
else:
with open(file_or_path, 'r') as f:
template = f.read()
dirname = os.path.dirname(file_or_path)
template = self._engine(template,
dirname=dirname,
tolerant=self._tolerant)
self._cached_templates[file_or_path] = template
return template
def process_combinations(combinations, engine,
tolerant=False,
read_old=False,
delete_empty=False,
):
"""Process outfile-infile-arggroup combinations."""
outfiles = set()
templatereader = CachedTemplateReader(engine, tolerant=tolerant)
for outfile, infile, arggroup in combinations:
template = templatereader.read(infile)
properties = make_path_properties(outfile, prefix='ez_')
if read_old:
if is_filelike(outfile):
raise Exception("cannot read already open output streams")
try:
with open(outfile, 'r') as f:
properties['ez_content'] = f.read()
except IOError:
properties['ez_content'] = None
result = template.apply(dict(arggroup, **properties))
if is_filelike(outfile):
if result:
outfile.write(result)
elif result or not delete_empty:
if outfile in outfiles:
raise IOError("trying to write twice to the same file")
outfiles.add(outfile)
with open(outfile, 'w') as f:
f.write(result)
else:
try:
os.remove(outfile)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def perform_templating(args):
"""Perform templating according to the given arguments."""
engine = engines.engines[args.engine]
if args.vary:
it = variable_outfile_iterator(args.outfiles,
args.infiles,
args.args,
engine)
else:
it = constant_outfile_iterator(args.outfiles,
args.infiles,
args.args)
process_combinations(it, engine,
tolerant=args.tolerant,
read_old=args.read_old,
delete_empty=args.delete_empty,
)
def main_command():
"""Parse command line arguments and perform main action."""
args = parse_args()
perform_templating(args)
if __name__ == '__main__':
sys.exit(main_command())
|
blubberdiblub/eztemplate | eztemplate/__main__.py | constant_outfile_iterator | python | def constant_outfile_iterator(outfiles, infiles, arggroups):
assert len(infiles) == 1
assert len(arggroups) == 1
return ((outfile, infiles[0], arggroups[0]) for outfile in outfiles) | Iterate over all output files. | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/__main__.py#L308-L313 | null | #!/usr/bin/env python
"""Provide a simple templating system for text files."""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import errno
import os
import os.path
import re
import sys
from . import engines
from . import __version__
def is_filelike(ob):
"""Check for filelikeness of an object.
Needed to distinguish it from file names.
Returns true if it has a read or a write method.
"""
if hasattr(ob, 'read') and callable(ob.read):
return True
if hasattr(ob, 'write') and callable(ob.write):
return True
return False
class _PyArg(str):
"""Wrap a command line python argument.
Makes it distinguishable from a plain text argument.
"""
pass
def parse_args(args=None):
"""Parse command line arguments."""
# The argparse module provides a nice abstraction for argument parsing.
# It automatically builds up the help text, too.
parser = argparse.ArgumentParser(
prog=__package__,
description='Make substitutions in text files.',
)
parser.add_argument('-V', '--version',
action='version',
version="%%(prog)s %s" % (__version__,),
)
group = parser.add_argument_group("Engine")
group.add_argument('-e', '--engine',
dest='engine',
default='string.Template',
help="templating engine",
metavar="ENGINE",
)
group.add_argument('-t', '--tolerant',
action='store_true',
dest='tolerant',
help="don't fail on missing names",
)
group = parser.add_argument_group("Output")
group.add_argument('-s', '--stdout',
action='append_const',
dest='outfiles',
const=sys.stdout,
help="use standard output",
)
group.add_argument('-o', '--outfile',
action='append',
dest='outfiles',
help="output file",
metavar="FILE",
)
group.add_argument('--vary',
action='store_true',
dest='vary',
help="vary output file name according to template",
)
group.add_argument('-r', '--read-old',
action='store_true',
dest='read_old',
help="read preexisting output files and"
"hand the respective content to the template",
)
group.add_argument('-d', '--delete-empty',
action='store_true',
dest='delete_empty',
help="delete file if output is empty",
)
group = parser.add_argument_group("Input")
group.add_argument('--stdin',
action='append_const',
dest='infiles',
const=sys.stdin,
help="use standard input",
)
group.add_argument('-i', '--infile',
action='append',
dest='infiles',
help="any number of input files",
metavar="FILE",
)
group.add_argument('-c', '--concatenate',
action='store_true',
dest='concatenate',
help="concatenate multiple input files into one output",
)
group = parser.add_argument_group("Name-value pairs")
group.add_argument('-a', '--arg',
action='append',
dest='args',
help="any number of name-value pairs",
metavar="NAME=VALUE",
)
group.add_argument('-p', '--pyarg',
action='append',
dest='args',
type=_PyArg,
help="evaluate a python expression",
metavar="NAME=EXPRESSION",
)
group.add_argument('-n', '--next',
action='append_const',
dest='args',
const='--',
help="begin next argument group",
)
parser.add_argument(
dest='remainder',
nargs=argparse.REMAINDER,
help="possible input files and name-value pair groups "
"if not already specified through options",
)
args = parser.parse_args(args)
if args.engine == 'help':
dump_engines()
parser.exit(0)
if args.engine not in engines.engines:
parser.error("Engine '%s' is not available." % (args.engine,))
if args.vary:
if len(args.outfiles) != 1:
parser.error("need exactly one output file template")
if is_filelike(args.outfiles[0]):
parser.error("vary requires an output file template")
elif not args.outfiles:
args.outfiles = [sys.stdout]
if not args.infiles:
if args.args:
infiles = args.remainder
args.remainder = []
try:
infiles.remove('--')
except ValueError:
pass
else:
first = 1 if args.remainder and args.remainder[0] == '--' else 0
last = (len(args.remainder)
if args.vary or args.concatenate
else first + 1)
for split, infile in enumerate(args.remainder[first:last], first):
if infile == '--' or '=' in infile:
break
else:
split = last
infiles = args.remainder[first:split]
args.remainder = args.remainder[split:]
args.infiles = [path if path != '-' else sys.stdin
for path in infiles] if infiles else [sys.stdin]
if args.args:
flat_args = args.args
else:
flat_args = args.remainder
args.remainder = []
if flat_args and flat_args[0] == '--':
flat_args = flat_args[1:]
args.args = []
mapping = {}
for arg in flat_args:
if isinstance(arg, _PyArg):
name_value = arg.split('=', 1)
mapping[name_value[0]] = eval(name_value[1], {}, mapping)
elif arg == '--':
args.args.append(mapping)
mapping = {}
else:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
args.args.append(mapping)
if args.remainder:
parser.error("extraneous arguments left over")
else:
del args.remainder
return args
def dump_engines(target=sys.stderr):
"""Print successfully imported templating engines."""
print("Available templating engines:", file=target)
width = max(len(engine) for engine in engines.engines)
for handle, engine in sorted(engines.engines.items()):
description = engine.__doc__.split('\n', 0)[0]
print(" %-*s - %s" % (width, handle, description), file=target)
def check_engine(handle):
"""Check availability of requested template engine."""
if handle == 'help':
dump_engines()
sys.exit(0)
if handle not in engines.engines:
print('Engine "%s" is not available.' % (handle,), file=sys.stderr)
sys.exit(1)
def make_mapping(args):
"""Make a mapping from the name=value pairs."""
mapping = {}
if args:
for arg in args:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
return mapping
def make_path_properties(file_or_path, prefix=''):
"""Build useful properties from a file path."""
is_std = file_or_path in (sys.stdin, sys.stdout, sys.stderr)
if is_std:
path = '-'
elif is_filelike(file_or_path):
try:
path = str(file_or_path.name)
except AttributeError:
path = None
else:
path = str(file_or_path)
if is_std or not path:
abspath = dirname = basename = stem = ext = None
realpath = realdrive = realdir = realbase = realstem = realext = None
numbers = num = None
else:
abspath = os.path.abspath(path)
dirname, basename = os.path.split(path)
stem, ext = os.path.splitext(basename)
if not dirname:
dirname = os.curdir
realpath = os.path.realpath(path)
realdrive, tail = os.path.splitdrive(realpath)
realdir, realbase = os.path.split(tail)
realstem, realext = os.path.splitext(realbase)
numbers = [int(s) for s in re.findall(r'\d+', basename)]
num = numbers[-1] if numbers else None
return {
prefix + 'path': path,
prefix + 'abspath': abspath,
prefix + 'dirname': dirname,
prefix + 'basename': basename,
prefix + 'stem': stem,
prefix + 'ext': ext,
prefix + 'realpath': realpath,
prefix + 'realdrive': realdrive,
prefix + 'realdir': realdir,
prefix + 'realbase': realbase,
prefix + 'realstem': realstem,
prefix + 'realext': realext,
prefix + 'numbers': numbers,
prefix + 'num': num,
}
def variable_outfile_iterator(outfiles, infiles, arggroups, engine):
"""Iterate over variable output file name template."""
assert len(outfiles) == 1
template = engine(outfiles[0], tolerant=False)
for infile in infiles:
properties = make_path_properties(infile, prefix='')
for arggroup in arggroups:
outfile = template.apply(dict(arggroup, **properties))
yield (outfile, infile, arggroup)
class CachedTemplateReader(object):
"""Read templates and cache them."""
def __init__(self, engine, tolerant=False):
"""Initialize reader."""
self._engine = engine
self._tolerant = tolerant
self._cached_templates = {}
def read(self, file_or_path):
"""Read template from cache or file."""
if file_or_path in self._cached_templates:
return self._cached_templates[file_or_path]
if is_filelike(file_or_path):
template = file_or_path.read()
dirname = None
else:
with open(file_or_path, 'r') as f:
template = f.read()
dirname = os.path.dirname(file_or_path)
template = self._engine(template,
dirname=dirname,
tolerant=self._tolerant)
self._cached_templates[file_or_path] = template
return template
def process_combinations(combinations, engine,
tolerant=False,
read_old=False,
delete_empty=False,
):
"""Process outfile-infile-arggroup combinations."""
outfiles = set()
templatereader = CachedTemplateReader(engine, tolerant=tolerant)
for outfile, infile, arggroup in combinations:
template = templatereader.read(infile)
properties = make_path_properties(outfile, prefix='ez_')
if read_old:
if is_filelike(outfile):
raise Exception("cannot read already open output streams")
try:
with open(outfile, 'r') as f:
properties['ez_content'] = f.read()
except IOError:
properties['ez_content'] = None
result = template.apply(dict(arggroup, **properties))
if is_filelike(outfile):
if result:
outfile.write(result)
elif result or not delete_empty:
if outfile in outfiles:
raise IOError("trying to write twice to the same file")
outfiles.add(outfile)
with open(outfile, 'w') as f:
f.write(result)
else:
try:
os.remove(outfile)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def perform_templating(args):
"""Perform templating according to the given arguments."""
engine = engines.engines[args.engine]
if args.vary:
it = variable_outfile_iterator(args.outfiles,
args.infiles,
args.args,
engine)
else:
it = constant_outfile_iterator(args.outfiles,
args.infiles,
args.args)
process_combinations(it, engine,
tolerant=args.tolerant,
read_old=args.read_old,
delete_empty=args.delete_empty,
)
def main_command():
"""Parse command line arguments and perform main action."""
args = parse_args()
perform_templating(args)
if __name__ == '__main__':
sys.exit(main_command())
|
blubberdiblub/eztemplate | eztemplate/__main__.py | variable_outfile_iterator | python | def variable_outfile_iterator(outfiles, infiles, arggroups, engine):
assert len(outfiles) == 1
template = engine(outfiles[0], tolerant=False)
for infile in infiles:
properties = make_path_properties(infile, prefix='')
for arggroup in arggroups:
outfile = template.apply(dict(arggroup, **properties))
yield (outfile, infile, arggroup) | Iterate over variable output file name template. | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/__main__.py#L316-L327 | [
"def make_path_properties(file_or_path, prefix=''):\n \"\"\"Build useful properties from a file path.\"\"\"\n is_std = file_or_path in (sys.stdin, sys.stdout, sys.stderr)\n\n if is_std:\n path = '-'\n elif is_filelike(file_or_path):\n try:\n path = str(file_or_path.name)\n except AttributeError:\n path = None\n else:\n path = str(file_or_path)\n\n if is_std or not path:\n abspath = dirname = basename = stem = ext = None\n realpath = realdrive = realdir = realbase = realstem = realext = None\n numbers = num = None\n else:\n abspath = os.path.abspath(path)\n\n dirname, basename = os.path.split(path)\n stem, ext = os.path.splitext(basename)\n\n if not dirname:\n dirname = os.curdir\n\n realpath = os.path.realpath(path)\n realdrive, tail = os.path.splitdrive(realpath)\n realdir, realbase = os.path.split(tail)\n realstem, realext = os.path.splitext(realbase)\n\n numbers = [int(s) for s in re.findall(r'\\d+', basename)]\n num = numbers[-1] if numbers else None\n\n return {\n prefix + 'path': path,\n prefix + 'abspath': abspath,\n prefix + 'dirname': dirname,\n prefix + 'basename': basename,\n prefix + 'stem': stem,\n prefix + 'ext': ext,\n prefix + 'realpath': realpath,\n prefix + 'realdrive': realdrive,\n prefix + 'realdir': realdir,\n prefix + 'realbase': realbase,\n prefix + 'realstem': realstem,\n prefix + 'realext': realext,\n prefix + 'numbers': numbers,\n prefix + 'num': num,\n }\n"
] | #!/usr/bin/env python
"""Provide a simple templating system for text files."""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import errno
import os
import os.path
import re
import sys
from . import engines
from . import __version__
def is_filelike(ob):
"""Check for filelikeness of an object.
Needed to distinguish it from file names.
Returns true if it has a read or a write method.
"""
if hasattr(ob, 'read') and callable(ob.read):
return True
if hasattr(ob, 'write') and callable(ob.write):
return True
return False
class _PyArg(str):
"""Wrap a command line python argument.
Makes it distinguishable from a plain text argument.
"""
pass
def parse_args(args=None):
"""Parse command line arguments."""
# The argparse module provides a nice abstraction for argument parsing.
# It automatically builds up the help text, too.
parser = argparse.ArgumentParser(
prog=__package__,
description='Make substitutions in text files.',
)
parser.add_argument('-V', '--version',
action='version',
version="%%(prog)s %s" % (__version__,),
)
group = parser.add_argument_group("Engine")
group.add_argument('-e', '--engine',
dest='engine',
default='string.Template',
help="templating engine",
metavar="ENGINE",
)
group.add_argument('-t', '--tolerant',
action='store_true',
dest='tolerant',
help="don't fail on missing names",
)
group = parser.add_argument_group("Output")
group.add_argument('-s', '--stdout',
action='append_const',
dest='outfiles',
const=sys.stdout,
help="use standard output",
)
group.add_argument('-o', '--outfile',
action='append',
dest='outfiles',
help="output file",
metavar="FILE",
)
group.add_argument('--vary',
action='store_true',
dest='vary',
help="vary output file name according to template",
)
group.add_argument('-r', '--read-old',
action='store_true',
dest='read_old',
help="read preexisting output files and"
"hand the respective content to the template",
)
group.add_argument('-d', '--delete-empty',
action='store_true',
dest='delete_empty',
help="delete file if output is empty",
)
group = parser.add_argument_group("Input")
group.add_argument('--stdin',
action='append_const',
dest='infiles',
const=sys.stdin,
help="use standard input",
)
group.add_argument('-i', '--infile',
action='append',
dest='infiles',
help="any number of input files",
metavar="FILE",
)
group.add_argument('-c', '--concatenate',
action='store_true',
dest='concatenate',
help="concatenate multiple input files into one output",
)
group = parser.add_argument_group("Name-value pairs")
group.add_argument('-a', '--arg',
action='append',
dest='args',
help="any number of name-value pairs",
metavar="NAME=VALUE",
)
group.add_argument('-p', '--pyarg',
action='append',
dest='args',
type=_PyArg,
help="evaluate a python expression",
metavar="NAME=EXPRESSION",
)
group.add_argument('-n', '--next',
action='append_const',
dest='args',
const='--',
help="begin next argument group",
)
parser.add_argument(
dest='remainder',
nargs=argparse.REMAINDER,
help="possible input files and name-value pair groups "
"if not already specified through options",
)
args = parser.parse_args(args)
if args.engine == 'help':
dump_engines()
parser.exit(0)
if args.engine not in engines.engines:
parser.error("Engine '%s' is not available." % (args.engine,))
if args.vary:
if len(args.outfiles) != 1:
parser.error("need exactly one output file template")
if is_filelike(args.outfiles[0]):
parser.error("vary requires an output file template")
elif not args.outfiles:
args.outfiles = [sys.stdout]
if not args.infiles:
if args.args:
infiles = args.remainder
args.remainder = []
try:
infiles.remove('--')
except ValueError:
pass
else:
first = 1 if args.remainder and args.remainder[0] == '--' else 0
last = (len(args.remainder)
if args.vary or args.concatenate
else first + 1)
for split, infile in enumerate(args.remainder[first:last], first):
if infile == '--' or '=' in infile:
break
else:
split = last
infiles = args.remainder[first:split]
args.remainder = args.remainder[split:]
args.infiles = [path if path != '-' else sys.stdin
for path in infiles] if infiles else [sys.stdin]
if args.args:
flat_args = args.args
else:
flat_args = args.remainder
args.remainder = []
if flat_args and flat_args[0] == '--':
flat_args = flat_args[1:]
args.args = []
mapping = {}
for arg in flat_args:
if isinstance(arg, _PyArg):
name_value = arg.split('=', 1)
mapping[name_value[0]] = eval(name_value[1], {}, mapping)
elif arg == '--':
args.args.append(mapping)
mapping = {}
else:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
args.args.append(mapping)
if args.remainder:
parser.error("extraneous arguments left over")
else:
del args.remainder
return args
def dump_engines(target=sys.stderr):
"""Print successfully imported templating engines."""
print("Available templating engines:", file=target)
width = max(len(engine) for engine in engines.engines)
for handle, engine in sorted(engines.engines.items()):
description = engine.__doc__.split('\n', 0)[0]
print(" %-*s - %s" % (width, handle, description), file=target)
def check_engine(handle):
"""Check availability of requested template engine."""
if handle == 'help':
dump_engines()
sys.exit(0)
if handle not in engines.engines:
print('Engine "%s" is not available.' % (handle,), file=sys.stderr)
sys.exit(1)
def make_mapping(args):
"""Make a mapping from the name=value pairs."""
mapping = {}
if args:
for arg in args:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
return mapping
def make_path_properties(file_or_path, prefix=''):
"""Build useful properties from a file path."""
is_std = file_or_path in (sys.stdin, sys.stdout, sys.stderr)
if is_std:
path = '-'
elif is_filelike(file_or_path):
try:
path = str(file_or_path.name)
except AttributeError:
path = None
else:
path = str(file_or_path)
if is_std or not path:
abspath = dirname = basename = stem = ext = None
realpath = realdrive = realdir = realbase = realstem = realext = None
numbers = num = None
else:
abspath = os.path.abspath(path)
dirname, basename = os.path.split(path)
stem, ext = os.path.splitext(basename)
if not dirname:
dirname = os.curdir
realpath = os.path.realpath(path)
realdrive, tail = os.path.splitdrive(realpath)
realdir, realbase = os.path.split(tail)
realstem, realext = os.path.splitext(realbase)
numbers = [int(s) for s in re.findall(r'\d+', basename)]
num = numbers[-1] if numbers else None
return {
prefix + 'path': path,
prefix + 'abspath': abspath,
prefix + 'dirname': dirname,
prefix + 'basename': basename,
prefix + 'stem': stem,
prefix + 'ext': ext,
prefix + 'realpath': realpath,
prefix + 'realdrive': realdrive,
prefix + 'realdir': realdir,
prefix + 'realbase': realbase,
prefix + 'realstem': realstem,
prefix + 'realext': realext,
prefix + 'numbers': numbers,
prefix + 'num': num,
}
def constant_outfile_iterator(outfiles, infiles, arggroups):
"""Iterate over all output files."""
assert len(infiles) == 1
assert len(arggroups) == 1
return ((outfile, infiles[0], arggroups[0]) for outfile in outfiles)
class CachedTemplateReader(object):
"""Read templates and cache them."""
def __init__(self, engine, tolerant=False):
"""Initialize reader."""
self._engine = engine
self._tolerant = tolerant
self._cached_templates = {}
def read(self, file_or_path):
"""Read template from cache or file."""
if file_or_path in self._cached_templates:
return self._cached_templates[file_or_path]
if is_filelike(file_or_path):
template = file_or_path.read()
dirname = None
else:
with open(file_or_path, 'r') as f:
template = f.read()
dirname = os.path.dirname(file_or_path)
template = self._engine(template,
dirname=dirname,
tolerant=self._tolerant)
self._cached_templates[file_or_path] = template
return template
def process_combinations(combinations, engine,
tolerant=False,
read_old=False,
delete_empty=False,
):
"""Process outfile-infile-arggroup combinations."""
outfiles = set()
templatereader = CachedTemplateReader(engine, tolerant=tolerant)
for outfile, infile, arggroup in combinations:
template = templatereader.read(infile)
properties = make_path_properties(outfile, prefix='ez_')
if read_old:
if is_filelike(outfile):
raise Exception("cannot read already open output streams")
try:
with open(outfile, 'r') as f:
properties['ez_content'] = f.read()
except IOError:
properties['ez_content'] = None
result = template.apply(dict(arggroup, **properties))
if is_filelike(outfile):
if result:
outfile.write(result)
elif result or not delete_empty:
if outfile in outfiles:
raise IOError("trying to write twice to the same file")
outfiles.add(outfile)
with open(outfile, 'w') as f:
f.write(result)
else:
try:
os.remove(outfile)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def perform_templating(args):
"""Perform templating according to the given arguments."""
engine = engines.engines[args.engine]
if args.vary:
it = variable_outfile_iterator(args.outfiles,
args.infiles,
args.args,
engine)
else:
it = constant_outfile_iterator(args.outfiles,
args.infiles,
args.args)
process_combinations(it, engine,
tolerant=args.tolerant,
read_old=args.read_old,
delete_empty=args.delete_empty,
)
def main_command():
"""Parse command line arguments and perform main action."""
args = parse_args()
perform_templating(args)
if __name__ == '__main__':
sys.exit(main_command())
|
blubberdiblub/eztemplate | eztemplate/__main__.py | process_combinations | python | def process_combinations(combinations, engine,
tolerant=False,
read_old=False,
delete_empty=False,
):
outfiles = set()
templatereader = CachedTemplateReader(engine, tolerant=tolerant)
for outfile, infile, arggroup in combinations:
template = templatereader.read(infile)
properties = make_path_properties(outfile, prefix='ez_')
if read_old:
if is_filelike(outfile):
raise Exception("cannot read already open output streams")
try:
with open(outfile, 'r') as f:
properties['ez_content'] = f.read()
except IOError:
properties['ez_content'] = None
result = template.apply(dict(arggroup, **properties))
if is_filelike(outfile):
if result:
outfile.write(result)
elif result or not delete_empty:
if outfile in outfiles:
raise IOError("trying to write twice to the same file")
outfiles.add(outfile)
with open(outfile, 'w') as f:
f.write(result)
else:
try:
os.remove(outfile)
except OSError as e:
if e.errno != errno.ENOENT:
raise | Process outfile-infile-arggroup combinations. | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/__main__.py#L361-L400 | [
"def is_filelike(ob):\n \"\"\"Check for filelikeness of an object.\n\n Needed to distinguish it from file names.\n Returns true if it has a read or a write method.\n \"\"\"\n if hasattr(ob, 'read') and callable(ob.read):\n return True\n\n if hasattr(ob, 'write') and callable(ob.write):\n return True\n\n return False\n",
"def make_path_properties(file_or_path, prefix=''):\n \"\"\"Build useful properties from a file path.\"\"\"\n is_std = file_or_path in (sys.stdin, sys.stdout, sys.stderr)\n\n if is_std:\n path = '-'\n elif is_filelike(file_or_path):\n try:\n path = str(file_or_path.name)\n except AttributeError:\n path = None\n else:\n path = str(file_or_path)\n\n if is_std or not path:\n abspath = dirname = basename = stem = ext = None\n realpath = realdrive = realdir = realbase = realstem = realext = None\n numbers = num = None\n else:\n abspath = os.path.abspath(path)\n\n dirname, basename = os.path.split(path)\n stem, ext = os.path.splitext(basename)\n\n if not dirname:\n dirname = os.curdir\n\n realpath = os.path.realpath(path)\n realdrive, tail = os.path.splitdrive(realpath)\n realdir, realbase = os.path.split(tail)\n realstem, realext = os.path.splitext(realbase)\n\n numbers = [int(s) for s in re.findall(r'\\d+', basename)]\n num = numbers[-1] if numbers else None\n\n return {\n prefix + 'path': path,\n prefix + 'abspath': abspath,\n prefix + 'dirname': dirname,\n prefix + 'basename': basename,\n prefix + 'stem': stem,\n prefix + 'ext': ext,\n prefix + 'realpath': realpath,\n prefix + 'realdrive': realdrive,\n prefix + 'realdir': realdir,\n prefix + 'realbase': realbase,\n prefix + 'realstem': realstem,\n prefix + 'realext': realext,\n prefix + 'numbers': numbers,\n prefix + 'num': num,\n }\n",
"def read(self, file_or_path):\n \"\"\"Read template from cache or file.\"\"\"\n if file_or_path in self._cached_templates:\n return self._cached_templates[file_or_path]\n\n if is_filelike(file_or_path):\n template = file_or_path.read()\n dirname = None\n else:\n with open(file_or_path, 'r') as f:\n template = f.read()\n dirname = os.path.dirname(file_or_path)\n\n template = self._engine(template,\n dirname=dirname,\n tolerant=self._tolerant)\n\n self._cached_templates[file_or_path] = template\n return template\n"
] | #!/usr/bin/env python
"""Provide a simple templating system for text files."""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import errno
import os
import os.path
import re
import sys
from . import engines
from . import __version__
def is_filelike(ob):
"""Check for filelikeness of an object.
Needed to distinguish it from file names.
Returns true if it has a read or a write method.
"""
if hasattr(ob, 'read') and callable(ob.read):
return True
if hasattr(ob, 'write') and callable(ob.write):
return True
return False
class _PyArg(str):
"""Wrap a command line python argument.
Makes it distinguishable from a plain text argument.
"""
pass
def parse_args(args=None):
"""Parse command line arguments."""
# The argparse module provides a nice abstraction for argument parsing.
# It automatically builds up the help text, too.
parser = argparse.ArgumentParser(
prog=__package__,
description='Make substitutions in text files.',
)
parser.add_argument('-V', '--version',
action='version',
version="%%(prog)s %s" % (__version__,),
)
group = parser.add_argument_group("Engine")
group.add_argument('-e', '--engine',
dest='engine',
default='string.Template',
help="templating engine",
metavar="ENGINE",
)
group.add_argument('-t', '--tolerant',
action='store_true',
dest='tolerant',
help="don't fail on missing names",
)
group = parser.add_argument_group("Output")
group.add_argument('-s', '--stdout',
action='append_const',
dest='outfiles',
const=sys.stdout,
help="use standard output",
)
group.add_argument('-o', '--outfile',
action='append',
dest='outfiles',
help="output file",
metavar="FILE",
)
group.add_argument('--vary',
action='store_true',
dest='vary',
help="vary output file name according to template",
)
group.add_argument('-r', '--read-old',
action='store_true',
dest='read_old',
help="read preexisting output files and"
"hand the respective content to the template",
)
group.add_argument('-d', '--delete-empty',
action='store_true',
dest='delete_empty',
help="delete file if output is empty",
)
group = parser.add_argument_group("Input")
group.add_argument('--stdin',
action='append_const',
dest='infiles',
const=sys.stdin,
help="use standard input",
)
group.add_argument('-i', '--infile',
action='append',
dest='infiles',
help="any number of input files",
metavar="FILE",
)
group.add_argument('-c', '--concatenate',
action='store_true',
dest='concatenate',
help="concatenate multiple input files into one output",
)
group = parser.add_argument_group("Name-value pairs")
group.add_argument('-a', '--arg',
action='append',
dest='args',
help="any number of name-value pairs",
metavar="NAME=VALUE",
)
group.add_argument('-p', '--pyarg',
action='append',
dest='args',
type=_PyArg,
help="evaluate a python expression",
metavar="NAME=EXPRESSION",
)
group.add_argument('-n', '--next',
action='append_const',
dest='args',
const='--',
help="begin next argument group",
)
parser.add_argument(
dest='remainder',
nargs=argparse.REMAINDER,
help="possible input files and name-value pair groups "
"if not already specified through options",
)
args = parser.parse_args(args)
if args.engine == 'help':
dump_engines()
parser.exit(0)
if args.engine not in engines.engines:
parser.error("Engine '%s' is not available." % (args.engine,))
if args.vary:
if len(args.outfiles) != 1:
parser.error("need exactly one output file template")
if is_filelike(args.outfiles[0]):
parser.error("vary requires an output file template")
elif not args.outfiles:
args.outfiles = [sys.stdout]
if not args.infiles:
if args.args:
infiles = args.remainder
args.remainder = []
try:
infiles.remove('--')
except ValueError:
pass
else:
first = 1 if args.remainder and args.remainder[0] == '--' else 0
last = (len(args.remainder)
if args.vary or args.concatenate
else first + 1)
for split, infile in enumerate(args.remainder[first:last], first):
if infile == '--' or '=' in infile:
break
else:
split = last
infiles = args.remainder[first:split]
args.remainder = args.remainder[split:]
args.infiles = [path if path != '-' else sys.stdin
for path in infiles] if infiles else [sys.stdin]
if args.args:
flat_args = args.args
else:
flat_args = args.remainder
args.remainder = []
if flat_args and flat_args[0] == '--':
flat_args = flat_args[1:]
args.args = []
mapping = {}
for arg in flat_args:
if isinstance(arg, _PyArg):
name_value = arg.split('=', 1)
mapping[name_value[0]] = eval(name_value[1], {}, mapping)
elif arg == '--':
args.args.append(mapping)
mapping = {}
else:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
args.args.append(mapping)
if args.remainder:
parser.error("extraneous arguments left over")
else:
del args.remainder
return args
def dump_engines(target=sys.stderr):
"""Print successfully imported templating engines."""
print("Available templating engines:", file=target)
width = max(len(engine) for engine in engines.engines)
for handle, engine in sorted(engines.engines.items()):
description = engine.__doc__.split('\n', 0)[0]
print(" %-*s - %s" % (width, handle, description), file=target)
def check_engine(handle):
"""Check availability of requested template engine."""
if handle == 'help':
dump_engines()
sys.exit(0)
if handle not in engines.engines:
print('Engine "%s" is not available.' % (handle,), file=sys.stderr)
sys.exit(1)
def make_mapping(args):
"""Make a mapping from the name=value pairs."""
mapping = {}
if args:
for arg in args:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
return mapping
def make_path_properties(file_or_path, prefix=''):
"""Build useful properties from a file path."""
is_std = file_or_path in (sys.stdin, sys.stdout, sys.stderr)
if is_std:
path = '-'
elif is_filelike(file_or_path):
try:
path = str(file_or_path.name)
except AttributeError:
path = None
else:
path = str(file_or_path)
if is_std or not path:
abspath = dirname = basename = stem = ext = None
realpath = realdrive = realdir = realbase = realstem = realext = None
numbers = num = None
else:
abspath = os.path.abspath(path)
dirname, basename = os.path.split(path)
stem, ext = os.path.splitext(basename)
if not dirname:
dirname = os.curdir
realpath = os.path.realpath(path)
realdrive, tail = os.path.splitdrive(realpath)
realdir, realbase = os.path.split(tail)
realstem, realext = os.path.splitext(realbase)
numbers = [int(s) for s in re.findall(r'\d+', basename)]
num = numbers[-1] if numbers else None
return {
prefix + 'path': path,
prefix + 'abspath': abspath,
prefix + 'dirname': dirname,
prefix + 'basename': basename,
prefix + 'stem': stem,
prefix + 'ext': ext,
prefix + 'realpath': realpath,
prefix + 'realdrive': realdrive,
prefix + 'realdir': realdir,
prefix + 'realbase': realbase,
prefix + 'realstem': realstem,
prefix + 'realext': realext,
prefix + 'numbers': numbers,
prefix + 'num': num,
}
def constant_outfile_iterator(outfiles, infiles, arggroups):
"""Iterate over all output files."""
assert len(infiles) == 1
assert len(arggroups) == 1
return ((outfile, infiles[0], arggroups[0]) for outfile in outfiles)
def variable_outfile_iterator(outfiles, infiles, arggroups, engine):
"""Iterate over variable output file name template."""
assert len(outfiles) == 1
template = engine(outfiles[0], tolerant=False)
for infile in infiles:
properties = make_path_properties(infile, prefix='')
for arggroup in arggroups:
outfile = template.apply(dict(arggroup, **properties))
yield (outfile, infile, arggroup)
class CachedTemplateReader(object):
"""Read templates and cache them."""
def __init__(self, engine, tolerant=False):
"""Initialize reader."""
self._engine = engine
self._tolerant = tolerant
self._cached_templates = {}
def read(self, file_or_path):
"""Read template from cache or file."""
if file_or_path in self._cached_templates:
return self._cached_templates[file_or_path]
if is_filelike(file_or_path):
template = file_or_path.read()
dirname = None
else:
with open(file_or_path, 'r') as f:
template = f.read()
dirname = os.path.dirname(file_or_path)
template = self._engine(template,
dirname=dirname,
tolerant=self._tolerant)
self._cached_templates[file_or_path] = template
return template
def perform_templating(args):
"""Perform templating according to the given arguments."""
engine = engines.engines[args.engine]
if args.vary:
it = variable_outfile_iterator(args.outfiles,
args.infiles,
args.args,
engine)
else:
it = constant_outfile_iterator(args.outfiles,
args.infiles,
args.args)
process_combinations(it, engine,
tolerant=args.tolerant,
read_old=args.read_old,
delete_empty=args.delete_empty,
)
def main_command():
"""Parse command line arguments and perform main action."""
args = parse_args()
perform_templating(args)
if __name__ == '__main__':
sys.exit(main_command())
|
blubberdiblub/eztemplate | eztemplate/__main__.py | perform_templating | python | def perform_templating(args):
engine = engines.engines[args.engine]
if args.vary:
it = variable_outfile_iterator(args.outfiles,
args.infiles,
args.args,
engine)
else:
it = constant_outfile_iterator(args.outfiles,
args.infiles,
args.args)
process_combinations(it, engine,
tolerant=args.tolerant,
read_old=args.read_old,
delete_empty=args.delete_empty,
) | Perform templating according to the given arguments. | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/__main__.py#L403-L421 | [
"def constant_outfile_iterator(outfiles, infiles, arggroups):\n \"\"\"Iterate over all output files.\"\"\"\n assert len(infiles) == 1\n assert len(arggroups) == 1\n\n return ((outfile, infiles[0], arggroups[0]) for outfile in outfiles)\n",
"def variable_outfile_iterator(outfiles, infiles, arggroups, engine):\n \"\"\"Iterate over variable output file name template.\"\"\"\n assert len(outfiles) == 1\n\n template = engine(outfiles[0], tolerant=False)\n\n for infile in infiles:\n properties = make_path_properties(infile, prefix='')\n\n for arggroup in arggroups:\n outfile = template.apply(dict(arggroup, **properties))\n yield (outfile, infile, arggroup)\n",
"def process_combinations(combinations, engine,\n tolerant=False,\n read_old=False,\n delete_empty=False,\n ):\n \"\"\"Process outfile-infile-arggroup combinations.\"\"\"\n outfiles = set()\n\n templatereader = CachedTemplateReader(engine, tolerant=tolerant)\n\n for outfile, infile, arggroup in combinations:\n template = templatereader.read(infile)\n properties = make_path_properties(outfile, prefix='ez_')\n\n if read_old:\n if is_filelike(outfile):\n raise Exception(\"cannot read already open output streams\")\n try:\n with open(outfile, 'r') as f:\n properties['ez_content'] = f.read()\n except IOError:\n properties['ez_content'] = None\n\n result = template.apply(dict(arggroup, **properties))\n\n if is_filelike(outfile):\n if result:\n outfile.write(result)\n elif result or not delete_empty:\n if outfile in outfiles:\n raise IOError(\"trying to write twice to the same file\")\n outfiles.add(outfile)\n with open(outfile, 'w') as f:\n f.write(result)\n else:\n try:\n os.remove(outfile)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n"
] | #!/usr/bin/env python
"""Provide a simple templating system for text files."""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import errno
import os
import os.path
import re
import sys
from . import engines
from . import __version__
def is_filelike(ob):
"""Check for filelikeness of an object.
Needed to distinguish it from file names.
Returns true if it has a read or a write method.
"""
if hasattr(ob, 'read') and callable(ob.read):
return True
if hasattr(ob, 'write') and callable(ob.write):
return True
return False
class _PyArg(str):
"""Wrap a command line python argument.
Makes it distinguishable from a plain text argument.
"""
pass
def parse_args(args=None):
"""Parse command line arguments."""
# The argparse module provides a nice abstraction for argument parsing.
# It automatically builds up the help text, too.
parser = argparse.ArgumentParser(
prog=__package__,
description='Make substitutions in text files.',
)
parser.add_argument('-V', '--version',
action='version',
version="%%(prog)s %s" % (__version__,),
)
group = parser.add_argument_group("Engine")
group.add_argument('-e', '--engine',
dest='engine',
default='string.Template',
help="templating engine",
metavar="ENGINE",
)
group.add_argument('-t', '--tolerant',
action='store_true',
dest='tolerant',
help="don't fail on missing names",
)
group = parser.add_argument_group("Output")
group.add_argument('-s', '--stdout',
action='append_const',
dest='outfiles',
const=sys.stdout,
help="use standard output",
)
group.add_argument('-o', '--outfile',
action='append',
dest='outfiles',
help="output file",
metavar="FILE",
)
group.add_argument('--vary',
action='store_true',
dest='vary',
help="vary output file name according to template",
)
group.add_argument('-r', '--read-old',
action='store_true',
dest='read_old',
help="read preexisting output files and"
"hand the respective content to the template",
)
group.add_argument('-d', '--delete-empty',
action='store_true',
dest='delete_empty',
help="delete file if output is empty",
)
group = parser.add_argument_group("Input")
group.add_argument('--stdin',
action='append_const',
dest='infiles',
const=sys.stdin,
help="use standard input",
)
group.add_argument('-i', '--infile',
action='append',
dest='infiles',
help="any number of input files",
metavar="FILE",
)
group.add_argument('-c', '--concatenate',
action='store_true',
dest='concatenate',
help="concatenate multiple input files into one output",
)
group = parser.add_argument_group("Name-value pairs")
group.add_argument('-a', '--arg',
action='append',
dest='args',
help="any number of name-value pairs",
metavar="NAME=VALUE",
)
group.add_argument('-p', '--pyarg',
action='append',
dest='args',
type=_PyArg,
help="evaluate a python expression",
metavar="NAME=EXPRESSION",
)
group.add_argument('-n', '--next',
action='append_const',
dest='args',
const='--',
help="begin next argument group",
)
parser.add_argument(
dest='remainder',
nargs=argparse.REMAINDER,
help="possible input files and name-value pair groups "
"if not already specified through options",
)
args = parser.parse_args(args)
if args.engine == 'help':
dump_engines()
parser.exit(0)
if args.engine not in engines.engines:
parser.error("Engine '%s' is not available." % (args.engine,))
if args.vary:
if len(args.outfiles) != 1:
parser.error("need exactly one output file template")
if is_filelike(args.outfiles[0]):
parser.error("vary requires an output file template")
elif not args.outfiles:
args.outfiles = [sys.stdout]
if not args.infiles:
if args.args:
infiles = args.remainder
args.remainder = []
try:
infiles.remove('--')
except ValueError:
pass
else:
first = 1 if args.remainder and args.remainder[0] == '--' else 0
last = (len(args.remainder)
if args.vary or args.concatenate
else first + 1)
for split, infile in enumerate(args.remainder[first:last], first):
if infile == '--' or '=' in infile:
break
else:
split = last
infiles = args.remainder[first:split]
args.remainder = args.remainder[split:]
args.infiles = [path if path != '-' else sys.stdin
for path in infiles] if infiles else [sys.stdin]
if args.args:
flat_args = args.args
else:
flat_args = args.remainder
args.remainder = []
if flat_args and flat_args[0] == '--':
flat_args = flat_args[1:]
args.args = []
mapping = {}
for arg in flat_args:
if isinstance(arg, _PyArg):
name_value = arg.split('=', 1)
mapping[name_value[0]] = eval(name_value[1], {}, mapping)
elif arg == '--':
args.args.append(mapping)
mapping = {}
else:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
args.args.append(mapping)
if args.remainder:
parser.error("extraneous arguments left over")
else:
del args.remainder
return args
def dump_engines(target=sys.stderr):
"""Print successfully imported templating engines."""
print("Available templating engines:", file=target)
width = max(len(engine) for engine in engines.engines)
for handle, engine in sorted(engines.engines.items()):
description = engine.__doc__.split('\n', 0)[0]
print(" %-*s - %s" % (width, handle, description), file=target)
def check_engine(handle):
"""Check availability of requested template engine."""
if handle == 'help':
dump_engines()
sys.exit(0)
if handle not in engines.engines:
print('Engine "%s" is not available.' % (handle,), file=sys.stderr)
sys.exit(1)
def make_mapping(args):
"""Make a mapping from the name=value pairs."""
mapping = {}
if args:
for arg in args:
name_value = arg.split('=', 1)
mapping[name_value[0]] = (name_value[1]
if len(name_value) > 1
else None)
return mapping
def make_path_properties(file_or_path, prefix=''):
"""Build useful properties from a file path."""
is_std = file_or_path in (sys.stdin, sys.stdout, sys.stderr)
if is_std:
path = '-'
elif is_filelike(file_or_path):
try:
path = str(file_or_path.name)
except AttributeError:
path = None
else:
path = str(file_or_path)
if is_std or not path:
abspath = dirname = basename = stem = ext = None
realpath = realdrive = realdir = realbase = realstem = realext = None
numbers = num = None
else:
abspath = os.path.abspath(path)
dirname, basename = os.path.split(path)
stem, ext = os.path.splitext(basename)
if not dirname:
dirname = os.curdir
realpath = os.path.realpath(path)
realdrive, tail = os.path.splitdrive(realpath)
realdir, realbase = os.path.split(tail)
realstem, realext = os.path.splitext(realbase)
numbers = [int(s) for s in re.findall(r'\d+', basename)]
num = numbers[-1] if numbers else None
return {
prefix + 'path': path,
prefix + 'abspath': abspath,
prefix + 'dirname': dirname,
prefix + 'basename': basename,
prefix + 'stem': stem,
prefix + 'ext': ext,
prefix + 'realpath': realpath,
prefix + 'realdrive': realdrive,
prefix + 'realdir': realdir,
prefix + 'realbase': realbase,
prefix + 'realstem': realstem,
prefix + 'realext': realext,
prefix + 'numbers': numbers,
prefix + 'num': num,
}
def constant_outfile_iterator(outfiles, infiles, arggroups):
"""Iterate over all output files."""
assert len(infiles) == 1
assert len(arggroups) == 1
return ((outfile, infiles[0], arggroups[0]) for outfile in outfiles)
def variable_outfile_iterator(outfiles, infiles, arggroups, engine):
"""Iterate over variable output file name template."""
assert len(outfiles) == 1
template = engine(outfiles[0], tolerant=False)
for infile in infiles:
properties = make_path_properties(infile, prefix='')
for arggroup in arggroups:
outfile = template.apply(dict(arggroup, **properties))
yield (outfile, infile, arggroup)
class CachedTemplateReader(object):
"""Read templates and cache them."""
def __init__(self, engine, tolerant=False):
"""Initialize reader."""
self._engine = engine
self._tolerant = tolerant
self._cached_templates = {}
def read(self, file_or_path):
"""Read template from cache or file."""
if file_or_path in self._cached_templates:
return self._cached_templates[file_or_path]
if is_filelike(file_or_path):
template = file_or_path.read()
dirname = None
else:
with open(file_or_path, 'r') as f:
template = f.read()
dirname = os.path.dirname(file_or_path)
template = self._engine(template,
dirname=dirname,
tolerant=self._tolerant)
self._cached_templates[file_or_path] = template
return template
def process_combinations(combinations, engine,
tolerant=False,
read_old=False,
delete_empty=False,
):
"""Process outfile-infile-arggroup combinations."""
outfiles = set()
templatereader = CachedTemplateReader(engine, tolerant=tolerant)
for outfile, infile, arggroup in combinations:
template = templatereader.read(infile)
properties = make_path_properties(outfile, prefix='ez_')
if read_old:
if is_filelike(outfile):
raise Exception("cannot read already open output streams")
try:
with open(outfile, 'r') as f:
properties['ez_content'] = f.read()
except IOError:
properties['ez_content'] = None
result = template.apply(dict(arggroup, **properties))
if is_filelike(outfile):
if result:
outfile.write(result)
elif result or not delete_empty:
if outfile in outfiles:
raise IOError("trying to write twice to the same file")
outfiles.add(outfile)
with open(outfile, 'w') as f:
f.write(result)
else:
try:
os.remove(outfile)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def main_command():
"""Parse command line arguments and perform main action."""
args = parse_args()
perform_templating(args)
if __name__ == '__main__':
sys.exit(main_command())
|
blubberdiblub/eztemplate | eztemplate/__main__.py | CachedTemplateReader.read | python | def read(self, file_or_path):
if file_or_path in self._cached_templates:
return self._cached_templates[file_or_path]
if is_filelike(file_or_path):
template = file_or_path.read()
dirname = None
else:
with open(file_or_path, 'r') as f:
template = f.read()
dirname = os.path.dirname(file_or_path)
template = self._engine(template,
dirname=dirname,
tolerant=self._tolerant)
self._cached_templates[file_or_path] = template
return template | Read template from cache or file. | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/__main__.py#L340-L358 | [
"def is_filelike(ob):\n \"\"\"Check for filelikeness of an object.\n\n Needed to distinguish it from file names.\n Returns true if it has a read or a write method.\n \"\"\"\n if hasattr(ob, 'read') and callable(ob.read):\n return True\n\n if hasattr(ob, 'write') and callable(ob.write):\n return True\n\n return False\n"
] | class CachedTemplateReader(object):
"""Read templates and cache them."""
def __init__(self, engine, tolerant=False):
"""Initialize reader."""
self._engine = engine
self._tolerant = tolerant
self._cached_templates = {}
|
blubberdiblub/eztemplate | eztemplate/engines/empy_engine.py | SubsystemWrapper.open | python | def open(self, name, *args, **kwargs):
if self.basedir is not None:
name = os.path.join(self.basedir, name)
return em.Subsystem.open(self, name, *args, **kwargs) | Open file, possibly relative to a base directory. | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/engines/empy_engine.py#L32-L37 | null | class SubsystemWrapper(em.Subsystem):
"""Wrap EmPy's Subsystem class.
Allows to open files relative to a base directory.
"""
def __init__(self, basedir=None, **kwargs):
"""Initialize Subsystem plus a possible base directory."""
em.Subsystem.__init__(self, **kwargs)
self.basedir = basedir
|
blubberdiblub/eztemplate | eztemplate/engines/empy_engine.py | EmpyEngine.apply | python | def apply(self, mapping):
self.output.seek(0)
self.output.truncate(0)
self.interpreter.string(self.template, locals=mapping)
return self.output.getvalue() | Apply a mapping of name-value-pairs to a template. | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/engines/empy_engine.py#L59-L64 | null | class EmpyEngine(Engine):
"""Empy templating engine."""
handle = 'empy'
def __init__(self, template, dirname=None, **kwargs):
"""Initialize empy template."""
super(EmpyEngine, self).__init__(**kwargs)
if dirname is not None:
# FIXME: This is a really bad idea, as it works like a global.
# Blame EmPy.
em.theSubsystem = SubsystemWrapper(basedir=dirname)
self.output = StringIO()
self.interpreter = em.Interpreter(output=self.output)
self.template = template
|
blubberdiblub/eztemplate | setup.py | get_version | python | def get_version():
try:
f = open('eztemplate/version.py', 'r')
except IOError as e:
if e.errno != errno.ENOENT:
raise
m = None
else:
m = re.match('^\s*__version__\s*=\s*(?P<version>.*)$', f.read(), re.M)
f.close()
__version__ = ast.literal_eval(m.group('version')) if m else None
try:
git_version = subprocess.check_output(['git', 'describe', '--dirty'])
except:
if __version__ is None:
raise ValueError("cannot determine version number")
return __version__
m = re.match(r'^\s*'
r'(?P<version>\S+?)'
r'(-(?P<post>\d+)-(?P<commit>g[0-9a-f]+))?'
r'(-(?P<dirty>dirty))?'
r'\s*$', git_version.decode())
if not m:
raise ValueError("cannot parse git describe output")
git_version = m.group('version')
post = m.group('post')
commit = m.group('commit')
dirty = m.group('dirty')
local = []
if post:
post = int(post)
if post:
git_version += '.post%d' % (post,)
if commit:
local.append(commit)
if dirty:
local.append(dirty)
if local:
git_version += '+' + '.'.join(local)
if git_version != __version__:
with open('eztemplate/version.py', 'w') as f:
f.write("__version__ = %r\n" % (str(git_version),))
return git_version | Build version number from git repository tag. | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/setup.py#L15-L68 | null | #!/usr/bin/env python
"""Setup for eztemplate."""
import ast
import errno
import os
import os.path
import re
import subprocess
from setuptools import setup, find_packages
def get_long_description():
"""Provide README.md converted to reStructuredText format."""
try:
with open('README.md', 'r') as f:
description = f.read()
except OSError as e:
if e.errno != errno.ENOENT:
raise
return None
try:
process = subprocess.Popen([
'pandoc',
'-f', 'markdown_github',
'-t', 'rst',
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
)
except OSError as e:
if e.errno == errno.ENOENT:
return None
raise
description, __ = process.communicate(input=description)
if process.poll() is None:
process.kill()
raise Exception("pandoc did not terminate")
if process.poll():
raise Exception("pandoc terminated abnormally")
return description
if __name__ == '__main__':
os.chdir(os.path.dirname(__file__))
setup(
name='eztemplate',
version=get_version(),
author='Niels Boehm',
author_email='blubberdiblub@gmail.com',
description="Simple templating program to generate plain text"
" (like config files) from name-value pairs.",
long_description=get_long_description(),
license='MIT',
keywords=[
'templating',
'text',
],
url='https://github.com/blubberdiblub/eztemplate/',
install_requires=[
'argparse',
],
extras_require={
'empy': ['empy'],
'mako': ['mako'],
},
test_suite='tests',
packages=find_packages(exclude=[
'tests',
'tests.*',
'*.tests',
'*.tests.*',
]),
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'eztemplate = eztemplate.__main__:main_command',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: System :: Systems Administration',
'Topic :: Text Processing :: General',
'Topic :: Utilities',
],
)
|
blubberdiblub/eztemplate | setup.py | get_long_description | python | def get_long_description():
try:
with open('README.md', 'r') as f:
description = f.read()
except OSError as e:
if e.errno != errno.ENOENT:
raise
return None
try:
process = subprocess.Popen([
'pandoc',
'-f', 'markdown_github',
'-t', 'rst',
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
)
except OSError as e:
if e.errno == errno.ENOENT:
return None
raise
description, __ = process.communicate(input=description)
if process.poll() is None:
process.kill()
raise Exception("pandoc did not terminate")
if process.poll():
raise Exception("pandoc terminated abnormally")
return description | Provide README.md converted to reStructuredText format. | train | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/setup.py#L71-L103 | null | #!/usr/bin/env python
"""Setup for eztemplate."""
import ast
import errno
import os
import os.path
import re
import subprocess
from setuptools import setup, find_packages
def get_version():
"""Build version number from git repository tag."""
try:
f = open('eztemplate/version.py', 'r')
except IOError as e:
if e.errno != errno.ENOENT:
raise
m = None
else:
m = re.match('^\s*__version__\s*=\s*(?P<version>.*)$', f.read(), re.M)
f.close()
__version__ = ast.literal_eval(m.group('version')) if m else None
try:
git_version = subprocess.check_output(['git', 'describe', '--dirty'])
except:
if __version__ is None:
raise ValueError("cannot determine version number")
return __version__
m = re.match(r'^\s*'
r'(?P<version>\S+?)'
r'(-(?P<post>\d+)-(?P<commit>g[0-9a-f]+))?'
r'(-(?P<dirty>dirty))?'
r'\s*$', git_version.decode())
if not m:
raise ValueError("cannot parse git describe output")
git_version = m.group('version')
post = m.group('post')
commit = m.group('commit')
dirty = m.group('dirty')
local = []
if post:
post = int(post)
if post:
git_version += '.post%d' % (post,)
if commit:
local.append(commit)
if dirty:
local.append(dirty)
if local:
git_version += '+' + '.'.join(local)
if git_version != __version__:
with open('eztemplate/version.py', 'w') as f:
f.write("__version__ = %r\n" % (str(git_version),))
return git_version
if __name__ == '__main__':
os.chdir(os.path.dirname(__file__))
setup(
name='eztemplate',
version=get_version(),
author='Niels Boehm',
author_email='blubberdiblub@gmail.com',
description="Simple templating program to generate plain text"
" (like config files) from name-value pairs.",
long_description=get_long_description(),
license='MIT',
keywords=[
'templating',
'text',
],
url='https://github.com/blubberdiblub/eztemplate/',
install_requires=[
'argparse',
],
extras_require={
'empy': ['empy'],
'mako': ['mako'],
},
test_suite='tests',
packages=find_packages(exclude=[
'tests',
'tests.*',
'*.tests',
'*.tests.*',
]),
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'eztemplate = eztemplate.__main__:main_command',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: System :: Systems Administration',
'Topic :: Text Processing :: General',
'Topic :: Utilities',
],
)
|
zeaphoo/budoc | budoc/pydoc.py | import_module | python | def import_module(module_name):
if import_path != sys.path:
# Such a kludge. Only restrict imports if the `import_path` has
# been changed. We don't want to always restrict imports, since
# providing a path to `imp.find_module` stops it from searching
# in special locations for built ins or frozen modules.
#
# The problem here is that this relies on the `sys.path` not being
# independently changed since the initialization of this module.
# If it is changed, then some packages may fail.
#
# Any other options available?
# Raises an exception if the parent module cannot be imported.
# This hopefully ensures that we only explicitly import modules
# contained in `pydoc.import_path`.
imp.find_module(module_name.split('.')[0], import_path)
if module_name in sys.modules:
return sys.modules[module_name]
else:
__import__(module_name)
return sys.modules[module_name] | Imports a module. A single point of truth for importing modules to
be documented by `pydoc`. In particular, it makes sure that the top
module in `module_name` can be imported by using only the paths in
`pydoc.import_path`.
If a module has already been imported, then its corresponding entry
in `sys.modules` is returned. This means that modules that have
changed on disk cannot be re-imported in the same process and have
its documentation updated. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L26-L59 | null |
from __future__ import absolute_import, division, print_function
import ast
import imp
import inspect
import os
import os.path as path
import pkgutil
import re
import sys
import types
import_path = sys.path[:]
def _is_exported(ident_name):
"""
Returns `True` if `ident_name` matches the export criteria for an
identifier name.
This should not be used by clients. Instead, use
`pydoc.Module.is_public`.
"""
return not ident_name.startswith('_')
def _source(obj):
"""
Returns the source code of the Python object `obj` as a list of
lines. This tries to extract the source from the special
`__wrapped__` attribute if it exists. Otherwise, it falls back
to `inspect.getsourcelines`.
If neither works, then the empty list is returned.
"""
try:
return inspect.getsourcelines(obj.__wrapped__)[0]
except:
pass
try:
return inspect.getsourcelines(obj)[0]
except:
return []
def _safe_import(module_name):
"""
A function for safely importing `module_name`, where errors are
suppressed and `stdout` and `stderr` are redirected to a null
device. The obligation is on the caller to close `stdin` in order
to avoid impolite modules from blocking on `stdin` when imported.
"""
class _Null (object):
def write(self, *_):
pass
sout, serr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = _Null(), _Null()
try:
m = import_module(module_name)
except:
m = None
sys.stdout, sys.stderr = sout, serr
return m
def _var_docstrings(tree, module, cls=None, init=False):
"""
Extracts variable docstrings given `tree` as the abstract syntax,
`module` as a `pydoc.Module` containing `tree` and an option `cls`
as a `pydoc.Class` corresponding to the tree. In particular, `cls`
should be specified when extracting docstrings from a class or an
`__init__` method. Finally, `init` should be `True` when searching
the AST of an `__init__` method so that `_var_docstrings` will only
accept variables starting with `self.` as instance variables.
A dictionary mapping variable name to a `pydoc.Variable` object is
returned.
"""
vs = {}
children = list(ast.iter_child_nodes(tree))
for i, child in enumerate(children):
if isinstance(child, ast.Assign) and len(child.targets) == 1:
if not init and isinstance(child.targets[0], ast.Name):
name = child.targets[0].id
elif (isinstance(child.targets[0], ast.Attribute)
and isinstance(child.targets[0].value, ast.Name)
and child.targets[0].value.id == 'self'):
name = child.targets[0].attr
else:
continue
if not _is_exported(name) \
and name not in getattr(module, '__all__', []):
continue
docstring = ''
if (i+1 < len(children)
and isinstance(children[i+1], ast.Expr)
and isinstance(children[i+1].value, ast.Str)):
docstring = children[i+1].value.s
vs[name] = Variable(name, module, docstring, cls=cls)
return vs
class Doc (object):
"""
A base class for all documentation objects.
A documentation object corresponds to *something* in a Python module
that has a docstring associated with it. Typically, this only includes
modules, classes, functions and methods. However, `pydoc` adds support
for extracting docstrings from the abstract syntax tree, which means
that variables (module, class or instance) are supported too.
A special type of documentation object `pydoc.External` is used to
represent identifiers that are not part of the public interface of
a module. (The name "External" is a bit of a misnomer, since it can
also correspond to unexported members of the module, particularly in
a class's ancestor list.)
"""
def __init__(self, name, module, docstring):
"""
Initializes a documentation object, where `name` is the public
identifier name, `module` is a `budoc.pydoc.Module` object, and
`docstring` is a string containing the docstring for `name`.
"""
self.module = module
"""
The module documentation object that this object was defined
in.
"""
self.name = name
"""
The identifier name for this object.
"""
self.docstring = inspect.cleandoc(docstring or '')
"""
The docstring for this object. It has already been cleaned
by `inspect.cleandoc`.
"""
@property
def source(self):
"""
Returns the source code of the Python object `obj` as a list of
lines. This tries to extract the source from the special
`__wrapped__` attribute if it exists. Otherwise, it falls back
to `inspect.getsourcelines`.
If neither works, then the empty list is returned.
"""
assert False, 'subclass responsibility'
@property
def refname(self):
"""
Returns an appropriate reference name for this documentation
object. Usually this is its fully qualified path. Every
documentation object must provide this property.
"""
assert False, 'subclass responsibility'
def __lt__(self, other):
return self.name < other.name
def is_empty(self):
"""
Returns true if the docstring for this object is empty.
"""
return len(self.docstring.strip()) == 0
class External (Doc):
"""
A representation of an external identifier. The textual
representation is the same as an internal identifier, but without
any context. (Usually this makes linking more difficult.)
External identifiers are also used to represent something that is
not exported but appears somewhere in the public interface (like
the ancestor list of a class).
"""
__budoc__ = {}
__budoc__['External.docstring'] = \
"""
An empty string. External identifiers do not have
docstrings.
"""
__budoc__['External.module'] = \
"""
Always `None`. External identifiers have no associated
`pydoc.Module`.
"""
__budoc__['External.name'] = \
"""
Always equivalent to `pydoc.External.refname` since external
identifiers are always expressed in their fully qualified
form.
"""
def __init__(self, name):
"""
Initializes an external identifier with `name`, where `name`
should be a fully qualified name.
"""
super(External, self).__init__(name, None, '')
@property
def source(self):
return []
@property
def refname(self):
return self.name
class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
class Class (Doc):
"""
Representation of a class's documentation.
"""
def __init__(self, name, module, class_obj):
"""
Same as `pydoc.Doc.__init__`, except `class_obj` must be a
Python class object. The docstring is gathered automatically.
"""
super(Class, self).__init__(name, module, inspect.getdoc(class_obj))
self.cls = class_obj
"""The class Python object."""
self.doc = {}
"""A mapping from identifier name to a `pydoc.Doc` objects."""
self.doc_init = {}
"""
A special version of `pydoc.Class.doc` that contains
documentation for instance variables found in the `__init__`
method.
"""
public = self.__public_objs()
try:
# First try and find docstrings for class variables.
# Then move on to finding docstrings for instance variables.
# This must be optional, since not all modules have source
# code available.
cls_ast = ast.parse(inspect.getsource(self.cls)).body[0]
self.doc = _var_docstrings(cls_ast, self.module, cls=self)
for n in (cls_ast.body if '__init__' in public else []):
if isinstance(n, ast.FunctionDef) and n.name == '__init__':
self.doc_init = _var_docstrings(n, self.module,
cls=self, init=True)
break
except:
pass
# Convert the public Python objects to documentation objects.
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
if name in self.doc_init:
# Let instance members override class members.
continue
if inspect.ismethod(obj):
self.doc[name] = Function(name, self.module, obj.__func__,
cls=self, method=True)
elif inspect.isfunction(obj):
self.doc[name] = Function(name, self.module, obj,
cls=self, method=False)
elif isinstance(obj, property):
docstring = getattr(obj, '__doc__', '')
self.doc_init[name] = Variable(name, self.module, docstring,
cls=self)
elif not inspect.isbuiltin(obj) \
and not inspect.isroutine(obj):
if name in getattr(self.cls, '__slots__', []):
self.doc_init[name] = Variable(name, self.module,
'', cls=self)
else:
self.doc[name] = Variable(name, self.module, '', cls=self)
@property
def source(self):
return _source(self.cls)
@property
def refname(self):
return '%s.%s' % (self.module.refname, self.cls.__name__)
def class_variables(self):
"""
Returns all documented class variables in the class, sorted
alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc.values())
def instance_variables(self):
"""
Returns all instance variables in the class, sorted
alphabetically as a list of `pydoc.Variable`. Instance variables
are attributes of `self` defined in a class's `__init__`
method.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc_init.values())
def methods(self):
"""
Returns all documented methods as `pydoc.Function` objects in
the class, sorted alphabetically with `__init__` always coming
first.
Unfortunately, this also includes class methods.
"""
p = lambda o: (isinstance(o, Function)
and o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def functions(self):
"""
Returns all documented static functions as `pydoc.Function`
objects in the class, sorted alphabetically.
"""
p = lambda o: (isinstance(o, Function)
and not o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def init_method(self):
p = lambda o: (isinstance(o, Function)
and o.method and o.name == '__init__'
and self.module._docfilter(o))
fn = filter(p, self.doc.values())
return fn[0] if fn else None
def _fill_inheritance(self):
"""
Traverses this class's ancestor list and attempts to fill in
missing documentation from its ancestor's documentation.
The first pass connects variables, methods and functions with
their inherited couterparts. (The templates will decide how to
display docstrings.) The second pass attempts to add instance
variables to this class that were only explicitly declared in
a parent class. This second pass is necessary since instance
variables are only discoverable by traversing the abstract
syntax tree.
"""
mro = filter(lambda c: c != self and isinstance(c, Class),
self.module.mro(self))
def search(d, fdoc):
for c in mro:
doc = fdoc(c)
if d.name in doc and isinstance(d, type(doc[d.name])):
return doc[d.name]
return None
for fdoc in (lambda c: c.doc_init, lambda c: c.doc):
for d in fdoc(self).values():
dinherit = search(d, fdoc)
if dinherit is not None:
d.inherits = dinherit
# Since instance variables aren't part of a class's members,
# we need to manually deduce inheritance. Oh lawdy.
for c in mro:
for name in filter(lambda n: n not in self.doc_init, c.doc_init):
d = c.doc_init[name]
self.doc_init[name] = Variable(d.name, d.module, '', cls=self)
self.doc_init[name].inherits = d
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object. This counts the `__init__` method as being
public.
"""
_budoc = getattr(self.module.module, '__budoc__', {})
def forced_out(name):
return _budoc.get('%s.%s' % (self.name, name), False) is None
def exported(name):
exported = name == '__init__' or _is_exported(name)
return not forced_out(name) and exported
idents = dict(inspect.getmembers(self.cls))
return dict([(n, o) for n, o in idents.items() if exported(n)])
class Function (Doc):
"""
Representation of documentation for a Python function or method.
"""
def __init__(self, name, module, func_obj, cls=None, method=False):
"""
Same as `pydoc.Doc.__init__`, except `func_obj` must be a
Python function object. The docstring is gathered automatically.
`cls` should be set when this is a method or a static function
beloing to a class. `cls` should be a `pydoc.Class` object.
`method` should be `True` when the function is a method. In
all other cases, it should be `False`.
"""
super(Function, self).__init__(name, module, inspect.getdoc(func_obj))
self.func = func_obj
"""The Python function object."""
self.cls = cls
"""
The `pydoc.Class` documentation object if this is a method. If
not, this is None.
"""
self.method = method
"""
Whether this function is a method or not.
In particular, static class methods have this set to False.
"""
@property
def source(self):
return _source(self.func)
@property
def refname(self):
if self.cls is None:
return '%s.%s' % (self.module.refname, self.name)
else:
return '%s.%s' % (self.cls.refname, self.name)
def spec(self):
"""
Returns a nicely formatted spec of the function's parameter
list as a string. This includes argument lists, keyword
arguments and default values.
"""
return ', '.join(self.params())
def params(self):
"""
Returns a list where each element is a nicely formatted
parameter of this function. This includes argument lists,
keyword arguments and default values.
"""
def fmt_param(el):
if isinstance(el, str) or isinstance(el, unicode):
return el
else:
return '(%s)' % (', '.join(map(fmt_param, el)))
try:
getspec = getattr(inspect, 'getfullargspec', inspect.getargspec)
s = getspec(self.func)
except TypeError:
# I guess this is for C builtin functions?
return ['...']
params = []
for i, param in enumerate(s.args):
if param.lower() == 'self':
continue
if s.defaults is not None and len(s.args) - i <= len(s.defaults):
defind = len(s.defaults) - (len(s.args) - i)
default_value = s.defaults[defind]
value = repr(default_value).strip()
if value[0] == '<' and value[-1] == '>':
if type(default_value) == types.TypeType:
value = default_value.__name__
elif type(default_value) == types.ObjectType:
value = '%s()'%(default_value.__class__.__name__)
params.append('%s=%s' % (param, value))
else:
params.append(fmt_param(param))
if s.varargs is not None:
params.append('*%s' % s.varargs)
# TODO: This needs to be adjusted in Python 3. There's more stuff
# returned from getfullargspec than what we're looking at here.
keywords = getattr(s, 'varkw', getattr(s, 'keywords', None))
if keywords is not None:
params.append('**%s' % keywords)
return params
def __lt__(self, other):
# Push __init__ to the top.
if '__init__' in (self.name, other.name):
return self.name != other.name and self.name == '__init__'
else:
return self.name < other.name
class Variable (Doc):
"""
Representation of a variable's documentation. This includes
module, class and instance variables.
"""
def __init__(self, name, module, docstring, cls=None):
"""
Same as `pydoc.Doc.__init__`, except `cls` should be provided
as a `pydoc.Class` object when this is a class or instance
variable.
"""
super(Variable, self).__init__(name, module, docstring)
self.cls = cls
"""
The `podc.Class` object if this is a class or instance
variable. If not, this is None.
"""
@property
def source(self):
return []
@property
def refname(self):
if self.cls is None:
return '%s.%s' % (self.module.refname, self.name)
else:
return '%s.%s' % (self.cls.refname, self.name)
|
zeaphoo/budoc | budoc/pydoc.py | _safe_import | python | def _safe_import(module_name):
class _Null (object):
def write(self, *_):
pass
sout, serr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = _Null(), _Null()
try:
m = import_module(module_name)
except:
m = None
sys.stdout, sys.stderr = sout, serr
return m | A function for safely importing `module_name`, where errors are
suppressed and `stdout` and `stderr` are redirected to a null
device. The obligation is on the caller to close `stdin` in order
to avoid impolite modules from blocking on `stdin` when imported. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L80-L98 | null |
from __future__ import absolute_import, division, print_function
import ast
import imp
import inspect
import os
import os.path as path
import pkgutil
import re
import sys
import types
import_path = sys.path[:]
def _is_exported(ident_name):
"""
Returns `True` if `ident_name` matches the export criteria for an
identifier name.
This should not be used by clients. Instead, use
`pydoc.Module.is_public`.
"""
return not ident_name.startswith('_')
def import_module(module_name):
"""
Imports a module. A single point of truth for importing modules to
be documented by `pydoc`. In particular, it makes sure that the top
module in `module_name` can be imported by using only the paths in
`pydoc.import_path`.
If a module has already been imported, then its corresponding entry
in `sys.modules` is returned. This means that modules that have
changed on disk cannot be re-imported in the same process and have
its documentation updated.
"""
if import_path != sys.path:
# Such a kludge. Only restrict imports if the `import_path` has
# been changed. We don't want to always restrict imports, since
# providing a path to `imp.find_module` stops it from searching
# in special locations for built ins or frozen modules.
#
# The problem here is that this relies on the `sys.path` not being
# independently changed since the initialization of this module.
# If it is changed, then some packages may fail.
#
# Any other options available?
# Raises an exception if the parent module cannot be imported.
# This hopefully ensures that we only explicitly import modules
# contained in `pydoc.import_path`.
imp.find_module(module_name.split('.')[0], import_path)
if module_name in sys.modules:
return sys.modules[module_name]
else:
__import__(module_name)
return sys.modules[module_name]
def _source(obj):
"""
Returns the source code of the Python object `obj` as a list of
lines. This tries to extract the source from the special
`__wrapped__` attribute if it exists. Otherwise, it falls back
to `inspect.getsourcelines`.
If neither works, then the empty list is returned.
"""
try:
return inspect.getsourcelines(obj.__wrapped__)[0]
except:
pass
try:
return inspect.getsourcelines(obj)[0]
except:
return []
def _var_docstrings(tree, module, cls=None, init=False):
"""
Extracts variable docstrings given `tree` as the abstract syntax,
`module` as a `pydoc.Module` containing `tree` and an option `cls`
as a `pydoc.Class` corresponding to the tree. In particular, `cls`
should be specified when extracting docstrings from a class or an
`__init__` method. Finally, `init` should be `True` when searching
the AST of an `__init__` method so that `_var_docstrings` will only
accept variables starting with `self.` as instance variables.
A dictionary mapping variable name to a `pydoc.Variable` object is
returned.
"""
vs = {}
children = list(ast.iter_child_nodes(tree))
for i, child in enumerate(children):
if isinstance(child, ast.Assign) and len(child.targets) == 1:
if not init and isinstance(child.targets[0], ast.Name):
name = child.targets[0].id
elif (isinstance(child.targets[0], ast.Attribute)
and isinstance(child.targets[0].value, ast.Name)
and child.targets[0].value.id == 'self'):
name = child.targets[0].attr
else:
continue
if not _is_exported(name) \
and name not in getattr(module, '__all__', []):
continue
docstring = ''
if (i+1 < len(children)
and isinstance(children[i+1], ast.Expr)
and isinstance(children[i+1].value, ast.Str)):
docstring = children[i+1].value.s
vs[name] = Variable(name, module, docstring, cls=cls)
return vs
class Doc (object):
"""
A base class for all documentation objects.
A documentation object corresponds to *something* in a Python module
that has a docstring associated with it. Typically, this only includes
modules, classes, functions and methods. However, `pydoc` adds support
for extracting docstrings from the abstract syntax tree, which means
that variables (module, class or instance) are supported too.
A special type of documentation object `pydoc.External` is used to
represent identifiers that are not part of the public interface of
a module. (The name "External" is a bit of a misnomer, since it can
also correspond to unexported members of the module, particularly in
a class's ancestor list.)
"""
def __init__(self, name, module, docstring):
"""
Initializes a documentation object, where `name` is the public
identifier name, `module` is a `budoc.pydoc.Module` object, and
`docstring` is a string containing the docstring for `name`.
"""
self.module = module
"""
The module documentation object that this object was defined
in.
"""
self.name = name
"""
The identifier name for this object.
"""
self.docstring = inspect.cleandoc(docstring or '')
"""
The docstring for this object. It has already been cleaned
by `inspect.cleandoc`.
"""
@property
def source(self):
"""
Returns the source code of the Python object `obj` as a list of
lines. This tries to extract the source from the special
`__wrapped__` attribute if it exists. Otherwise, it falls back
to `inspect.getsourcelines`.
If neither works, then the empty list is returned.
"""
assert False, 'subclass responsibility'
@property
def refname(self):
"""
Returns an appropriate reference name for this documentation
object. Usually this is its fully qualified path. Every
documentation object must provide this property.
"""
assert False, 'subclass responsibility'
def __lt__(self, other):
return self.name < other.name
def is_empty(self):
"""
Returns true if the docstring for this object is empty.
"""
return len(self.docstring.strip()) == 0
class External (Doc):
"""
A representation of an external identifier. The textual
representation is the same as an internal identifier, but without
any context. (Usually this makes linking more difficult.)
External identifiers are also used to represent something that is
not exported but appears somewhere in the public interface (like
the ancestor list of a class).
"""
__budoc__ = {}
__budoc__['External.docstring'] = \
"""
An empty string. External identifiers do not have
docstrings.
"""
__budoc__['External.module'] = \
"""
Always `None`. External identifiers have no associated
`pydoc.Module`.
"""
__budoc__['External.name'] = \
"""
Always equivalent to `pydoc.External.refname` since external
identifiers are always expressed in their fully qualified
form.
"""
def __init__(self, name):
"""
Initializes an external identifier with `name`, where `name`
should be a fully qualified name.
"""
super(External, self).__init__(name, None, '')
@property
def source(self):
return []
@property
def refname(self):
return self.name
class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
class Class (Doc):
"""
Representation of a class's documentation.
"""
def __init__(self, name, module, class_obj):
"""
Same as `pydoc.Doc.__init__`, except `class_obj` must be a
Python class object. The docstring is gathered automatically.
"""
super(Class, self).__init__(name, module, inspect.getdoc(class_obj))
self.cls = class_obj
"""The class Python object."""
self.doc = {}
"""A mapping from identifier name to a `pydoc.Doc` objects."""
self.doc_init = {}
"""
A special version of `pydoc.Class.doc` that contains
documentation for instance variables found in the `__init__`
method.
"""
public = self.__public_objs()
try:
# First try and find docstrings for class variables.
# Then move on to finding docstrings for instance variables.
# This must be optional, since not all modules have source
# code available.
cls_ast = ast.parse(inspect.getsource(self.cls)).body[0]
self.doc = _var_docstrings(cls_ast, self.module, cls=self)
for n in (cls_ast.body if '__init__' in public else []):
if isinstance(n, ast.FunctionDef) and n.name == '__init__':
self.doc_init = _var_docstrings(n, self.module,
cls=self, init=True)
break
except:
pass
# Convert the public Python objects to documentation objects.
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
if name in self.doc_init:
# Let instance members override class members.
continue
if inspect.ismethod(obj):
self.doc[name] = Function(name, self.module, obj.__func__,
cls=self, method=True)
elif inspect.isfunction(obj):
self.doc[name] = Function(name, self.module, obj,
cls=self, method=False)
elif isinstance(obj, property):
docstring = getattr(obj, '__doc__', '')
self.doc_init[name] = Variable(name, self.module, docstring,
cls=self)
elif not inspect.isbuiltin(obj) \
and not inspect.isroutine(obj):
if name in getattr(self.cls, '__slots__', []):
self.doc_init[name] = Variable(name, self.module,
'', cls=self)
else:
self.doc[name] = Variable(name, self.module, '', cls=self)
@property
def source(self):
return _source(self.cls)
@property
def refname(self):
return '%s.%s' % (self.module.refname, self.cls.__name__)
def class_variables(self):
"""
Returns all documented class variables in the class, sorted
alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc.values())
def instance_variables(self):
"""
Returns all instance variables in the class, sorted
alphabetically as a list of `pydoc.Variable`. Instance variables
are attributes of `self` defined in a class's `__init__`
method.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc_init.values())
def methods(self):
"""
Returns all documented methods as `pydoc.Function` objects in
the class, sorted alphabetically with `__init__` always coming
first.
Unfortunately, this also includes class methods.
"""
p = lambda o: (isinstance(o, Function)
and o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def functions(self):
"""
Returns all documented static functions as `pydoc.Function`
objects in the class, sorted alphabetically.
"""
p = lambda o: (isinstance(o, Function)
and not o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def init_method(self):
p = lambda o: (isinstance(o, Function)
and o.method and o.name == '__init__'
and self.module._docfilter(o))
fn = filter(p, self.doc.values())
return fn[0] if fn else None
def _fill_inheritance(self):
"""
Traverses this class's ancestor list and attempts to fill in
missing documentation from its ancestor's documentation.
The first pass connects variables, methods and functions with
their inherited couterparts. (The templates will decide how to
display docstrings.) The second pass attempts to add instance
variables to this class that were only explicitly declared in
a parent class. This second pass is necessary since instance
variables are only discoverable by traversing the abstract
syntax tree.
"""
mro = filter(lambda c: c != self and isinstance(c, Class),
self.module.mro(self))
def search(d, fdoc):
for c in mro:
doc = fdoc(c)
if d.name in doc and isinstance(d, type(doc[d.name])):
return doc[d.name]
return None
for fdoc in (lambda c: c.doc_init, lambda c: c.doc):
for d in fdoc(self).values():
dinherit = search(d, fdoc)
if dinherit is not None:
d.inherits = dinherit
# Since instance variables aren't part of a class's members,
# we need to manually deduce inheritance. Oh lawdy.
for c in mro:
for name in filter(lambda n: n not in self.doc_init, c.doc_init):
d = c.doc_init[name]
self.doc_init[name] = Variable(d.name, d.module, '', cls=self)
self.doc_init[name].inherits = d
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object. This counts the `__init__` method as being
public.
"""
_budoc = getattr(self.module.module, '__budoc__', {})
def forced_out(name):
return _budoc.get('%s.%s' % (self.name, name), False) is None
def exported(name):
exported = name == '__init__' or _is_exported(name)
return not forced_out(name) and exported
idents = dict(inspect.getmembers(self.cls))
return dict([(n, o) for n, o in idents.items() if exported(n)])
class Function (Doc):
"""
Representation of documentation for a Python function or method.
"""
def __init__(self, name, module, func_obj, cls=None, method=False):
"""
Same as `pydoc.Doc.__init__`, except `func_obj` must be a
Python function object. The docstring is gathered automatically.
`cls` should be set when this is a method or a static function
beloing to a class. `cls` should be a `pydoc.Class` object.
`method` should be `True` when the function is a method. In
all other cases, it should be `False`.
"""
super(Function, self).__init__(name, module, inspect.getdoc(func_obj))
self.func = func_obj
"""The Python function object."""
self.cls = cls
"""
The `pydoc.Class` documentation object if this is a method. If
not, this is None.
"""
self.method = method
"""
Whether this function is a method or not.
In particular, static class methods have this set to False.
"""
@property
def source(self):
return _source(self.func)
@property
def refname(self):
if self.cls is None:
return '%s.%s' % (self.module.refname, self.name)
else:
return '%s.%s' % (self.cls.refname, self.name)
def spec(self):
"""
Returns a nicely formatted spec of the function's parameter
list as a string. This includes argument lists, keyword
arguments and default values.
"""
return ', '.join(self.params())
def params(self):
"""
Returns a list where each element is a nicely formatted
parameter of this function. This includes argument lists,
keyword arguments and default values.
"""
def fmt_param(el):
if isinstance(el, str) or isinstance(el, unicode):
return el
else:
return '(%s)' % (', '.join(map(fmt_param, el)))
try:
getspec = getattr(inspect, 'getfullargspec', inspect.getargspec)
s = getspec(self.func)
except TypeError:
# I guess this is for C builtin functions?
return ['...']
params = []
for i, param in enumerate(s.args):
if param.lower() == 'self':
continue
if s.defaults is not None and len(s.args) - i <= len(s.defaults):
defind = len(s.defaults) - (len(s.args) - i)
default_value = s.defaults[defind]
value = repr(default_value).strip()
if value[0] == '<' and value[-1] == '>':
if type(default_value) == types.TypeType:
value = default_value.__name__
elif type(default_value) == types.ObjectType:
value = '%s()'%(default_value.__class__.__name__)
params.append('%s=%s' % (param, value))
else:
params.append(fmt_param(param))
if s.varargs is not None:
params.append('*%s' % s.varargs)
# TODO: This needs to be adjusted in Python 3. There's more stuff
# returned from getfullargspec than what we're looking at here.
keywords = getattr(s, 'varkw', getattr(s, 'keywords', None))
if keywords is not None:
params.append('**%s' % keywords)
return params
def __lt__(self, other):
# Push __init__ to the top.
if '__init__' in (self.name, other.name):
return self.name != other.name and self.name == '__init__'
else:
return self.name < other.name
class Variable (Doc):
"""
Representation of a variable's documentation. This includes
module, class and instance variables.
"""
def __init__(self, name, module, docstring, cls=None):
"""
Same as `pydoc.Doc.__init__`, except `cls` should be provided
as a `pydoc.Class` object when this is a class or instance
variable.
"""
super(Variable, self).__init__(name, module, docstring)
self.cls = cls
"""
The `podc.Class` object if this is a class or instance
variable. If not, this is None.
"""
@property
def source(self):
return []
@property
def refname(self):
if self.cls is None:
return '%s.%s' % (self.module.refname, self.name)
else:
return '%s.%s' % (self.cls.refname, self.name)
|
zeaphoo/budoc | budoc/pydoc.py | _var_docstrings | python | def _var_docstrings(tree, module, cls=None, init=False):
vs = {}
children = list(ast.iter_child_nodes(tree))
for i, child in enumerate(children):
if isinstance(child, ast.Assign) and len(child.targets) == 1:
if not init and isinstance(child.targets[0], ast.Name):
name = child.targets[0].id
elif (isinstance(child.targets[0], ast.Attribute)
and isinstance(child.targets[0].value, ast.Name)
and child.targets[0].value.id == 'self'):
name = child.targets[0].attr
else:
continue
if not _is_exported(name) \
and name not in getattr(module, '__all__', []):
continue
docstring = ''
if (i+1 < len(children)
and isinstance(children[i+1], ast.Expr)
and isinstance(children[i+1].value, ast.Str)):
docstring = children[i+1].value.s
vs[name] = Variable(name, module, docstring, cls=cls)
return vs | Extracts variable docstrings given `tree` as the abstract syntax,
`module` as a `pydoc.Module` containing `tree` and an option `cls`
as a `pydoc.Class` corresponding to the tree. In particular, `cls`
should be specified when extracting docstrings from a class or an
`__init__` method. Finally, `init` should be `True` when searching
the AST of an `__init__` method so that `_var_docstrings` will only
accept variables starting with `self.` as instance variables.
A dictionary mapping variable name to a `pydoc.Variable` object is
returned. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L101-L137 | null |
from __future__ import absolute_import, division, print_function
import ast
import imp
import inspect
import os
import os.path as path
import pkgutil
import re
import sys
import types
import_path = sys.path[:]
def _is_exported(ident_name):
"""
Returns `True` if `ident_name` matches the export criteria for an
identifier name.
This should not be used by clients. Instead, use
`pydoc.Module.is_public`.
"""
return not ident_name.startswith('_')
def import_module(module_name):
"""
Imports a module. A single point of truth for importing modules to
be documented by `pydoc`. In particular, it makes sure that the top
module in `module_name` can be imported by using only the paths in
`pydoc.import_path`.
If a module has already been imported, then its corresponding entry
in `sys.modules` is returned. This means that modules that have
changed on disk cannot be re-imported in the same process and have
its documentation updated.
"""
if import_path != sys.path:
# Such a kludge. Only restrict imports if the `import_path` has
# been changed. We don't want to always restrict imports, since
# providing a path to `imp.find_module` stops it from searching
# in special locations for built ins or frozen modules.
#
# The problem here is that this relies on the `sys.path` not being
# independently changed since the initialization of this module.
# If it is changed, then some packages may fail.
#
# Any other options available?
# Raises an exception if the parent module cannot be imported.
# This hopefully ensures that we only explicitly import modules
# contained in `pydoc.import_path`.
imp.find_module(module_name.split('.')[0], import_path)
if module_name in sys.modules:
return sys.modules[module_name]
else:
__import__(module_name)
return sys.modules[module_name]
def _source(obj):
"""
Returns the source code of the Python object `obj` as a list of
lines. This tries to extract the source from the special
`__wrapped__` attribute if it exists. Otherwise, it falls back
to `inspect.getsourcelines`.
If neither works, then the empty list is returned.
"""
try:
return inspect.getsourcelines(obj.__wrapped__)[0]
except:
pass
try:
return inspect.getsourcelines(obj)[0]
except:
return []
def _safe_import(module_name):
"""
A function for safely importing `module_name`, where errors are
suppressed and `stdout` and `stderr` are redirected to a null
device. The obligation is on the caller to close `stdin` in order
to avoid impolite modules from blocking on `stdin` when imported.
"""
class _Null (object):
def write(self, *_):
pass
sout, serr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = _Null(), _Null()
try:
m = import_module(module_name)
except:
m = None
sys.stdout, sys.stderr = sout, serr
return m
class Doc (object):
"""
A base class for all documentation objects.
A documentation object corresponds to *something* in a Python module
that has a docstring associated with it. Typically, this only includes
modules, classes, functions and methods. However, `pydoc` adds support
for extracting docstrings from the abstract syntax tree, which means
that variables (module, class or instance) are supported too.
A special type of documentation object `pydoc.External` is used to
represent identifiers that are not part of the public interface of
a module. (The name "External" is a bit of a misnomer, since it can
also correspond to unexported members of the module, particularly in
a class's ancestor list.)
"""
def __init__(self, name, module, docstring):
"""
Initializes a documentation object, where `name` is the public
identifier name, `module` is a `budoc.pydoc.Module` object, and
`docstring` is a string containing the docstring for `name`.
"""
self.module = module
"""
The module documentation object that this object was defined
in.
"""
self.name = name
"""
The identifier name for this object.
"""
self.docstring = inspect.cleandoc(docstring or '')
"""
The docstring for this object. It has already been cleaned
by `inspect.cleandoc`.
"""
@property
def source(self):
"""
Returns the source code of the Python object `obj` as a list of
lines. This tries to extract the source from the special
`__wrapped__` attribute if it exists. Otherwise, it falls back
to `inspect.getsourcelines`.
If neither works, then the empty list is returned.
"""
assert False, 'subclass responsibility'
@property
def refname(self):
"""
Returns an appropriate reference name for this documentation
object. Usually this is its fully qualified path. Every
documentation object must provide this property.
"""
assert False, 'subclass responsibility'
def __lt__(self, other):
return self.name < other.name
def is_empty(self):
"""
Returns true if the docstring for this object is empty.
"""
return len(self.docstring.strip()) == 0
class External (Doc):
"""
A representation of an external identifier. The textual
representation is the same as an internal identifier, but without
any context. (Usually this makes linking more difficult.)
External identifiers are also used to represent something that is
not exported but appears somewhere in the public interface (like
the ancestor list of a class).
"""
__budoc__ = {}
__budoc__['External.docstring'] = \
"""
An empty string. External identifiers do not have
docstrings.
"""
__budoc__['External.module'] = \
"""
Always `None`. External identifiers have no associated
`pydoc.Module`.
"""
__budoc__['External.name'] = \
"""
Always equivalent to `pydoc.External.refname` since external
identifiers are always expressed in their fully qualified
form.
"""
def __init__(self, name):
"""
Initializes an external identifier with `name`, where `name`
should be a fully qualified name.
"""
super(External, self).__init__(name, None, '')
@property
def source(self):
return []
@property
def refname(self):
return self.name
class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
class Class (Doc):
"""
Representation of a class's documentation.
"""
def __init__(self, name, module, class_obj):
"""
Same as `pydoc.Doc.__init__`, except `class_obj` must be a
Python class object. The docstring is gathered automatically.
"""
super(Class, self).__init__(name, module, inspect.getdoc(class_obj))
self.cls = class_obj
"""The class Python object."""
self.doc = {}
"""A mapping from identifier name to a `pydoc.Doc` objects."""
self.doc_init = {}
"""
A special version of `pydoc.Class.doc` that contains
documentation for instance variables found in the `__init__`
method.
"""
public = self.__public_objs()
try:
# First try and find docstrings for class variables.
# Then move on to finding docstrings for instance variables.
# This must be optional, since not all modules have source
# code available.
cls_ast = ast.parse(inspect.getsource(self.cls)).body[0]
self.doc = _var_docstrings(cls_ast, self.module, cls=self)
for n in (cls_ast.body if '__init__' in public else []):
if isinstance(n, ast.FunctionDef) and n.name == '__init__':
self.doc_init = _var_docstrings(n, self.module,
cls=self, init=True)
break
except:
pass
# Convert the public Python objects to documentation objects.
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
if name in self.doc_init:
# Let instance members override class members.
continue
if inspect.ismethod(obj):
self.doc[name] = Function(name, self.module, obj.__func__,
cls=self, method=True)
elif inspect.isfunction(obj):
self.doc[name] = Function(name, self.module, obj,
cls=self, method=False)
elif isinstance(obj, property):
docstring = getattr(obj, '__doc__', '')
self.doc_init[name] = Variable(name, self.module, docstring,
cls=self)
elif not inspect.isbuiltin(obj) \
and not inspect.isroutine(obj):
if name in getattr(self.cls, '__slots__', []):
self.doc_init[name] = Variable(name, self.module,
'', cls=self)
else:
self.doc[name] = Variable(name, self.module, '', cls=self)
@property
def source(self):
return _source(self.cls)
@property
def refname(self):
return '%s.%s' % (self.module.refname, self.cls.__name__)
def class_variables(self):
"""
Returns all documented class variables in the class, sorted
alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc.values())
def instance_variables(self):
"""
Returns all instance variables in the class, sorted
alphabetically as a list of `pydoc.Variable`. Instance variables
are attributes of `self` defined in a class's `__init__`
method.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc_init.values())
def methods(self):
"""
Returns all documented methods as `pydoc.Function` objects in
the class, sorted alphabetically with `__init__` always coming
first.
Unfortunately, this also includes class methods.
"""
p = lambda o: (isinstance(o, Function)
and o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def functions(self):
"""
Returns all documented static functions as `pydoc.Function`
objects in the class, sorted alphabetically.
"""
p = lambda o: (isinstance(o, Function)
and not o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def init_method(self):
p = lambda o: (isinstance(o, Function)
and o.method and o.name == '__init__'
and self.module._docfilter(o))
fn = filter(p, self.doc.values())
return fn[0] if fn else None
def _fill_inheritance(self):
"""
Traverses this class's ancestor list and attempts to fill in
missing documentation from its ancestor's documentation.
The first pass connects variables, methods and functions with
their inherited couterparts. (The templates will decide how to
display docstrings.) The second pass attempts to add instance
variables to this class that were only explicitly declared in
a parent class. This second pass is necessary since instance
variables are only discoverable by traversing the abstract
syntax tree.
"""
mro = filter(lambda c: c != self and isinstance(c, Class),
self.module.mro(self))
def search(d, fdoc):
for c in mro:
doc = fdoc(c)
if d.name in doc and isinstance(d, type(doc[d.name])):
return doc[d.name]
return None
for fdoc in (lambda c: c.doc_init, lambda c: c.doc):
for d in fdoc(self).values():
dinherit = search(d, fdoc)
if dinherit is not None:
d.inherits = dinherit
# Since instance variables aren't part of a class's members,
# we need to manually deduce inheritance. Oh lawdy.
for c in mro:
for name in filter(lambda n: n not in self.doc_init, c.doc_init):
d = c.doc_init[name]
self.doc_init[name] = Variable(d.name, d.module, '', cls=self)
self.doc_init[name].inherits = d
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object. This counts the `__init__` method as being
public.
"""
_budoc = getattr(self.module.module, '__budoc__', {})
def forced_out(name):
return _budoc.get('%s.%s' % (self.name, name), False) is None
def exported(name):
exported = name == '__init__' or _is_exported(name)
return not forced_out(name) and exported
idents = dict(inspect.getmembers(self.cls))
return dict([(n, o) for n, o in idents.items() if exported(n)])
class Function (Doc):
"""
Representation of documentation for a Python function or method.
"""
def __init__(self, name, module, func_obj, cls=None, method=False):
"""
Same as `pydoc.Doc.__init__`, except `func_obj` must be a
Python function object. The docstring is gathered automatically.
`cls` should be set when this is a method or a static function
beloing to a class. `cls` should be a `pydoc.Class` object.
`method` should be `True` when the function is a method. In
all other cases, it should be `False`.
"""
super(Function, self).__init__(name, module, inspect.getdoc(func_obj))
self.func = func_obj
"""The Python function object."""
self.cls = cls
"""
The `pydoc.Class` documentation object if this is a method. If
not, this is None.
"""
self.method = method
"""
Whether this function is a method or not.
In particular, static class methods have this set to False.
"""
@property
def source(self):
return _source(self.func)
@property
def refname(self):
if self.cls is None:
return '%s.%s' % (self.module.refname, self.name)
else:
return '%s.%s' % (self.cls.refname, self.name)
def spec(self):
"""
Returns a nicely formatted spec of the function's parameter
list as a string. This includes argument lists, keyword
arguments and default values.
"""
return ', '.join(self.params())
def params(self):
"""
Returns a list where each element is a nicely formatted
parameter of this function. This includes argument lists,
keyword arguments and default values.
"""
def fmt_param(el):
if isinstance(el, str) or isinstance(el, unicode):
return el
else:
return '(%s)' % (', '.join(map(fmt_param, el)))
try:
getspec = getattr(inspect, 'getfullargspec', inspect.getargspec)
s = getspec(self.func)
except TypeError:
# I guess this is for C builtin functions?
return ['...']
params = []
for i, param in enumerate(s.args):
if param.lower() == 'self':
continue
if s.defaults is not None and len(s.args) - i <= len(s.defaults):
defind = len(s.defaults) - (len(s.args) - i)
default_value = s.defaults[defind]
value = repr(default_value).strip()
if value[0] == '<' and value[-1] == '>':
if type(default_value) == types.TypeType:
value = default_value.__name__
elif type(default_value) == types.ObjectType:
value = '%s()'%(default_value.__class__.__name__)
params.append('%s=%s' % (param, value))
else:
params.append(fmt_param(param))
if s.varargs is not None:
params.append('*%s' % s.varargs)
# TODO: This needs to be adjusted in Python 3. There's more stuff
# returned from getfullargspec than what we're looking at here.
keywords = getattr(s, 'varkw', getattr(s, 'keywords', None))
if keywords is not None:
params.append('**%s' % keywords)
return params
def __lt__(self, other):
# Push __init__ to the top.
if '__init__' in (self.name, other.name):
return self.name != other.name and self.name == '__init__'
else:
return self.name < other.name
class Variable (Doc):
"""
Representation of a variable's documentation. This includes
module, class and instance variables.
"""
def __init__(self, name, module, docstring, cls=None):
"""
Same as `pydoc.Doc.__init__`, except `cls` should be provided
as a `pydoc.Class` object when this is a class or instance
variable.
"""
super(Variable, self).__init__(name, module, docstring)
self.cls = cls
"""
The `podc.Class` object if this is a class or instance
variable. If not, this is None.
"""
@property
def source(self):
return []
@property
def refname(self):
if self.cls is None:
return '%s.%s' % (self.module.refname, self.name)
else:
return '%s.%s' % (self.cls.refname, self.name)
|
zeaphoo/budoc | budoc/pydoc.py | Module.mro | python | def mro(self, cls):
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups)) | Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L400-L410 | null | class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
|
zeaphoo/budoc | budoc/pydoc.py | Module.descendents | python | def descendents(self, cls):
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs)) | Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L412-L426 | null | class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
|
zeaphoo/budoc | budoc/pydoc.py | Module.find_class | python | def find_class(self, cls):
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__)) | Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L440-L452 | [
"def classes(self):\n \"\"\"\n Returns all documented module level classes in the module\n sorted alphabetically as a list of `pydoc.Class`.\n \"\"\"\n p = lambda o: isinstance(o, Class) and self._docfilter(o)\n return sorted(filter(p, self.doc.values()))\n",
"def submodules(self):\n \"\"\"\n Returns all documented sub-modules in the module sorted\n alphabetically as a list of `pydoc.Module`.\n \"\"\"\n p = lambda o: isinstance(o, Module) and self._docfilter(o)\n return sorted(filter(p, self.doc.values()))\n"
] | class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
|
zeaphoo/budoc | budoc/pydoc.py | Module.find_ident | python | def find_ident(self, name):
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name) | Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L454-L474 | [
"def submodules(self):\n \"\"\"\n Returns all documented sub-modules in the module sorted\n alphabetically as a list of `pydoc.Module`.\n \"\"\"\n p = lambda o: isinstance(o, Module) and self._docfilter(o)\n return sorted(filter(p, self.doc.values()))\n"
] | class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
|
zeaphoo/budoc | budoc/pydoc.py | Module.variables | python | def variables(self):
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values())) | Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L476-L482 | null | class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
|
zeaphoo/budoc | budoc/pydoc.py | Module.classes | python | def classes(self):
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values())) | Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L484-L490 | null | class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
|
zeaphoo/budoc | budoc/pydoc.py | Module.functions | python | def functions(self):
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values())) | Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L492-L498 | null | class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
|
zeaphoo/budoc | budoc/pydoc.py | Module.submodules | python | def submodules(self):
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values())) | Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L500-L506 | null | class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
|
zeaphoo/budoc | budoc/pydoc.py | Module.is_submodule | python | def is_submodule(self, name):
return self.name != name and name.startswith(self.name) | Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L508-L514 | null | class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
|
zeaphoo/budoc | budoc/pydoc.py | Module.__is_exported | python | def __is_exported(self, name, module):
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True | Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L516-L541 | null | class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
|
zeaphoo/budoc | budoc/pydoc.py | Module.__public_objs | python | def __public_objs(self):
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))]) | Returns a dictionary mapping a public identifier name to a
Python object. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L543-L551 | null | class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __new_submodule(self, name, obj):
"""
Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module.
"""
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules)
|
zeaphoo/budoc | budoc/pydoc.py | Module.__new_submodule | python | def __new_submodule(self, name, obj):
# Forcefully set the module name so that it is always the absolute
# import path. We can't rely on `obj.__name__`, since it doesn't
# necessarily correspond to the public exported name of the module.
obj.__dict__['__budoc_module_name'] = '%s.%s' % (self.refname, name)
return Module(obj,
docfilter=self._docfilter,
allsubmodules=self._allsubmodules) | Create a new submodule documentation object for this `obj`,
which must by a Python module object and pass along any
settings in this module. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L553-L565 | null | class Module (Doc):
"""
Representation of a module's documentation.
"""
__budoc__ = {}
__budoc__['Module.module'] = 'The Python module object.'
__budoc__['Module.name'] = \
"""
The name of this module with respect to the context in which
it was imported. It is always an absolute import path.
"""
def __init__(self, module, docfilter=None):
"""
Creates a `Module` documentation object given the actual
module Python object.
`docfilter` is an optional predicate that controls which
documentation objects are returned in the following
methods: `pydoc.Module.classes`, `pydoc.Module.functions`,
`pydoc.Module.variables` and `pydoc.Module.submodules`. The
filter is propagated to the analogous methods on a `pydoc.Class`
object.
"""
name = getattr(module, '__budoc_module_name', module.__name__)
super(Module, self).__init__(name, module, inspect.getdoc(module))
self._filtering = docfilter is not None
self._docfilter = (lambda _: True) if docfilter is None else docfilter
self._allsubmodules = False
self.doc = {}
"""A mapping from identifier name to a documentation object."""
self.refdoc = {}
"""
The same as `pydoc.Module.doc`, but maps fully qualified
identifier names to documentation objects.
"""
vardocs = {}
try:
tree = ast.parse(inspect.getsource(self.module))
vardocs = _var_docstrings(tree, self, cls=None)
except:
pass
self._declared_variables = vardocs.keys()
public = self.__public_objs()
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
# Functions and some weird builtins?, plus methods, classes,
# modules and module level variables.
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.ismethod(obj):
self.doc[name] = Function(name, self, obj)
elif inspect.isclass(obj):
self.doc[name] = Class(name, self, obj)
elif inspect.ismodule(obj):
# Only document modules that are submodules or are forcefully
# exported by __all__.
if obj is not self.module and \
(self.__is_exported(name, obj)
or self.is_submodule(obj.__name__)):
self.doc[name] = self.__new_submodule(name, obj)
elif name in vardocs:
self.doc[name] = vardocs[name]
else:
# Catch all for variables.
self.doc[name] = Variable(name, self, '', cls=None)
# Now scan the directory if this is a package for all modules.
if not hasattr(self.module, '__path__') \
and not hasattr(self.module, '__file__'):
pkgdir = []
else:
pkgdir = getattr(self.module, '__path__',
[path.dirname(self.module.__file__)])
if self.is_package():
for (_, root, _) in pkgutil.iter_modules(pkgdir):
# Ignore if this module was already doc'd.
if root in self.doc:
continue
# Ignore if it isn't exported, unless we've specifically
# requested to document all submodules.
if not self._allsubmodules \
and not self.__is_exported(root, self.module):
continue
fullname = '%s.%s' % (self.name, root)
m = _safe_import(fullname)
if m is None:
continue
self.doc[root] = self.__new_submodule(root, m)
# Now see if we can grab inheritance relationships between classes.
for docobj in self.doc.values():
if isinstance(docobj, Class):
docobj._fill_inheritance()
# Build the reference name dictionary.
for basename, docobj in self.doc.items():
self.refdoc[docobj.refname] = docobj
if isinstance(docobj, Class):
for v in docobj.class_variables():
self.refdoc[v.refname] = v
for v in docobj.instance_variables():
self.refdoc[v.refname] = v
for f in docobj.methods():
self.refdoc[f.refname] = f
for f in docobj.functions():
self.refdoc[f.refname] = f
# Finally look for more docstrings in the __budoc__ override.
for name, docstring in getattr(self.module, '__budoc__', {}).items():
refname = '%s.%s' % (self.refname, name)
if docstring is None:
self.doc.pop(name, None)
self.refdoc.pop(refname, None)
continue
dobj = self.find_ident(refname)
if isinstance(dobj, External):
continue
dobj.docstring = inspect.cleandoc(docstring)
def is_package(self):
"""
Returns `True` if this module is a package.
Works by checking if `__package__` is not `None` and whether it
has the `__path__` attribute.
"""
return hasattr(self.module, '__path__')
@property
def source(self):
return _source(self.module)
@property
def refname(self):
return self.name
def mro(self, cls):
"""
Returns a method resolution list of documentation objects
for `cls`, which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
ups = inspect.getmro(cls.cls)
return list(map(lambda c: self.find_class(c), ups))
def descendents(self, cls):
"""
Returns a descendent list of documentation objects for `cls`,
which must be a documentation object.
The list will contain objects belonging to `pydoc.Class` or
`pydoc.External`. Objects belonging to the former are exported
classes either in this module or in one of its sub-modules.
"""
if cls.cls == type or not hasattr(cls.cls, '__subclasses__'):
# Is this right?
return []
downs = cls.cls.__subclasses__()
return list(map(lambda c: self.find_class(c), downs))
def is_public(self, name):
"""
Returns `True` if and only if an identifier with name `name` is
part of the public interface of this module. While the names
of sub-modules are included, identifiers only exported by
sub-modules are not checked.
`name` should be a fully qualified name, e.g.,
<code>pydoc.Module.is_public</code>.
"""
return name in self.refdoc
def find_class(self, cls):
"""
Given a Python `cls` object, try to find it in this module
or in any of the exported identifiers of the submodules.
"""
for doc_cls in self.classes():
if cls is doc_cls.cls:
return doc_cls
for module in self.submodules():
doc_cls = module.find_class(cls)
if not isinstance(doc_cls, External):
return doc_cls
return External('%s.%s' % (cls.__module__, cls.__name__))
def find_ident(self, name):
"""
Searches this module and **all** of its sub-modules for an
identifier with name `name` in its list of exported
identifiers according to `pydoc`. Note that unexported
sub-modules are searched.
A bare identifier (without `.` separators) will only be checked
for in this module.
The documentation object corresponding to the identifier is
returned. If one cannot be found, then an instance of
`External` is returned populated with the given identifier.
"""
if name in self.refdoc:
return self.refdoc[name]
for module in self.submodules():
o = module.find_ident(name)
if not isinstance(o, External):
return o
return External(name)
def variables(self):
"""
Returns all documented module level variables in the module
sorted alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def classes(self):
"""
Returns all documented module level classes in the module
sorted alphabetically as a list of `pydoc.Class`.
"""
p = lambda o: isinstance(o, Class) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def functions(self):
"""
Returns all documented module level functions in the module
sorted alphabetically as a list of `pydoc.Function`.
"""
p = lambda o: isinstance(o, Function) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def submodules(self):
"""
Returns all documented sub-modules in the module sorted
alphabetically as a list of `pydoc.Module`.
"""
p = lambda o: isinstance(o, Module) and self._docfilter(o)
return sorted(filter(p, self.doc.values()))
def is_submodule(self, name):
"""
Returns `True` if and only if `name` starts with the full
import path of `self` and has length at least one greater than
`len(self.name)`.
"""
return self.name != name and name.startswith(self.name)
def __is_exported(self, name, module):
"""
Returns `True` if and only if `pydoc` considers `name` to be
a public identifier for this module where `name` was defined
in the Python module `module`.
If this module has an `__all__` attribute, then `name` is
considered to be exported if and only if it is a member of
this module's `__all__` list.
If `__all__` is not set, then whether `name` is exported or
not is heuristically determined. Firstly, if `name` starts
with an underscore, it will not be considered exported.
Secondly, if `name` was defined in a module other than this
one, it will not be considered exported. In all other cases,
`name` will be considered exported.
"""
if hasattr(self.module, '__all__'):
return name in self.module.__all__
if not _is_exported(name):
return False
if module is None:
return False
if module is not None and self.module.__name__ != module.__name__:
return name in self._declared_variables
return True
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object.
"""
members = dict(inspect.getmembers(self.module))
return dict([(name, obj)
for name, obj in members.items()
if self.__is_exported(name, inspect.getmodule(obj))])
|
zeaphoo/budoc | budoc/pydoc.py | Class.class_variables | python | def class_variables(self):
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc.values()) | Returns all documented class variables in the class, sorted
alphabetically as a list of `pydoc.Variable`. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L645-L651 | null | class Class (Doc):
"""
Representation of a class's documentation.
"""
def __init__(self, name, module, class_obj):
"""
Same as `pydoc.Doc.__init__`, except `class_obj` must be a
Python class object. The docstring is gathered automatically.
"""
super(Class, self).__init__(name, module, inspect.getdoc(class_obj))
self.cls = class_obj
"""The class Python object."""
self.doc = {}
"""A mapping from identifier name to a `pydoc.Doc` objects."""
self.doc_init = {}
"""
A special version of `pydoc.Class.doc` that contains
documentation for instance variables found in the `__init__`
method.
"""
public = self.__public_objs()
try:
# First try and find docstrings for class variables.
# Then move on to finding docstrings for instance variables.
# This must be optional, since not all modules have source
# code available.
cls_ast = ast.parse(inspect.getsource(self.cls)).body[0]
self.doc = _var_docstrings(cls_ast, self.module, cls=self)
for n in (cls_ast.body if '__init__' in public else []):
if isinstance(n, ast.FunctionDef) and n.name == '__init__':
self.doc_init = _var_docstrings(n, self.module,
cls=self, init=True)
break
except:
pass
# Convert the public Python objects to documentation objects.
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
if name in self.doc_init:
# Let instance members override class members.
continue
if inspect.ismethod(obj):
self.doc[name] = Function(name, self.module, obj.__func__,
cls=self, method=True)
elif inspect.isfunction(obj):
self.doc[name] = Function(name, self.module, obj,
cls=self, method=False)
elif isinstance(obj, property):
docstring = getattr(obj, '__doc__', '')
self.doc_init[name] = Variable(name, self.module, docstring,
cls=self)
elif not inspect.isbuiltin(obj) \
and not inspect.isroutine(obj):
if name in getattr(self.cls, '__slots__', []):
self.doc_init[name] = Variable(name, self.module,
'', cls=self)
else:
self.doc[name] = Variable(name, self.module, '', cls=self)
@property
def source(self):
return _source(self.cls)
@property
def refname(self):
return '%s.%s' % (self.module.refname, self.cls.__name__)
def instance_variables(self):
"""
Returns all instance variables in the class, sorted
alphabetically as a list of `pydoc.Variable`. Instance variables
are attributes of `self` defined in a class's `__init__`
method.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc_init.values())
def methods(self):
"""
Returns all documented methods as `pydoc.Function` objects in
the class, sorted alphabetically with `__init__` always coming
first.
Unfortunately, this also includes class methods.
"""
p = lambda o: (isinstance(o, Function)
and o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def functions(self):
"""
Returns all documented static functions as `pydoc.Function`
objects in the class, sorted alphabetically.
"""
p = lambda o: (isinstance(o, Function)
and not o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def init_method(self):
p = lambda o: (isinstance(o, Function)
and o.method and o.name == '__init__'
and self.module._docfilter(o))
fn = filter(p, self.doc.values())
return fn[0] if fn else None
def _fill_inheritance(self):
"""
Traverses this class's ancestor list and attempts to fill in
missing documentation from its ancestor's documentation.
The first pass connects variables, methods and functions with
their inherited couterparts. (The templates will decide how to
display docstrings.) The second pass attempts to add instance
variables to this class that were only explicitly declared in
a parent class. This second pass is necessary since instance
variables are only discoverable by traversing the abstract
syntax tree.
"""
mro = filter(lambda c: c != self and isinstance(c, Class),
self.module.mro(self))
def search(d, fdoc):
for c in mro:
doc = fdoc(c)
if d.name in doc and isinstance(d, type(doc[d.name])):
return doc[d.name]
return None
for fdoc in (lambda c: c.doc_init, lambda c: c.doc):
for d in fdoc(self).values():
dinherit = search(d, fdoc)
if dinherit is not None:
d.inherits = dinherit
# Since instance variables aren't part of a class's members,
# we need to manually deduce inheritance. Oh lawdy.
for c in mro:
for name in filter(lambda n: n not in self.doc_init, c.doc_init):
d = c.doc_init[name]
self.doc_init[name] = Variable(d.name, d.module, '', cls=self)
self.doc_init[name].inherits = d
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object. This counts the `__init__` method as being
public.
"""
_budoc = getattr(self.module.module, '__budoc__', {})
def forced_out(name):
return _budoc.get('%s.%s' % (self.name, name), False) is None
def exported(name):
exported = name == '__init__' or _is_exported(name)
return not forced_out(name) and exported
idents = dict(inspect.getmembers(self.cls))
return dict([(n, o) for n, o in idents.items() if exported(n)])
|
zeaphoo/budoc | budoc/pydoc.py | Class.instance_variables | python | def instance_variables(self):
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc_init.values()) | Returns all instance variables in the class, sorted
alphabetically as a list of `pydoc.Variable`. Instance variables
are attributes of `self` defined in a class's `__init__`
method. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L653-L661 | null | class Class (Doc):
"""
Representation of a class's documentation.
"""
def __init__(self, name, module, class_obj):
"""
Same as `pydoc.Doc.__init__`, except `class_obj` must be a
Python class object. The docstring is gathered automatically.
"""
super(Class, self).__init__(name, module, inspect.getdoc(class_obj))
self.cls = class_obj
"""The class Python object."""
self.doc = {}
"""A mapping from identifier name to a `pydoc.Doc` objects."""
self.doc_init = {}
"""
A special version of `pydoc.Class.doc` that contains
documentation for instance variables found in the `__init__`
method.
"""
public = self.__public_objs()
try:
# First try and find docstrings for class variables.
# Then move on to finding docstrings for instance variables.
# This must be optional, since not all modules have source
# code available.
cls_ast = ast.parse(inspect.getsource(self.cls)).body[0]
self.doc = _var_docstrings(cls_ast, self.module, cls=self)
for n in (cls_ast.body if '__init__' in public else []):
if isinstance(n, ast.FunctionDef) and n.name == '__init__':
self.doc_init = _var_docstrings(n, self.module,
cls=self, init=True)
break
except:
pass
# Convert the public Python objects to documentation objects.
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
if name in self.doc_init:
# Let instance members override class members.
continue
if inspect.ismethod(obj):
self.doc[name] = Function(name, self.module, obj.__func__,
cls=self, method=True)
elif inspect.isfunction(obj):
self.doc[name] = Function(name, self.module, obj,
cls=self, method=False)
elif isinstance(obj, property):
docstring = getattr(obj, '__doc__', '')
self.doc_init[name] = Variable(name, self.module, docstring,
cls=self)
elif not inspect.isbuiltin(obj) \
and not inspect.isroutine(obj):
if name in getattr(self.cls, '__slots__', []):
self.doc_init[name] = Variable(name, self.module,
'', cls=self)
else:
self.doc[name] = Variable(name, self.module, '', cls=self)
@property
def source(self):
return _source(self.cls)
@property
def refname(self):
return '%s.%s' % (self.module.refname, self.cls.__name__)
def class_variables(self):
"""
Returns all documented class variables in the class, sorted
alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc.values())
def methods(self):
"""
Returns all documented methods as `pydoc.Function` objects in
the class, sorted alphabetically with `__init__` always coming
first.
Unfortunately, this also includes class methods.
"""
p = lambda o: (isinstance(o, Function)
and o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def functions(self):
"""
Returns all documented static functions as `pydoc.Function`
objects in the class, sorted alphabetically.
"""
p = lambda o: (isinstance(o, Function)
and not o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def init_method(self):
p = lambda o: (isinstance(o, Function)
and o.method and o.name == '__init__'
and self.module._docfilter(o))
fn = filter(p, self.doc.values())
return fn[0] if fn else None
def _fill_inheritance(self):
"""
Traverses this class's ancestor list and attempts to fill in
missing documentation from its ancestor's documentation.
The first pass connects variables, methods and functions with
their inherited couterparts. (The templates will decide how to
display docstrings.) The second pass attempts to add instance
variables to this class that were only explicitly declared in
a parent class. This second pass is necessary since instance
variables are only discoverable by traversing the abstract
syntax tree.
"""
mro = filter(lambda c: c != self and isinstance(c, Class),
self.module.mro(self))
def search(d, fdoc):
for c in mro:
doc = fdoc(c)
if d.name in doc and isinstance(d, type(doc[d.name])):
return doc[d.name]
return None
for fdoc in (lambda c: c.doc_init, lambda c: c.doc):
for d in fdoc(self).values():
dinherit = search(d, fdoc)
if dinherit is not None:
d.inherits = dinherit
# Since instance variables aren't part of a class's members,
# we need to manually deduce inheritance. Oh lawdy.
for c in mro:
for name in filter(lambda n: n not in self.doc_init, c.doc_init):
d = c.doc_init[name]
self.doc_init[name] = Variable(d.name, d.module, '', cls=self)
self.doc_init[name].inherits = d
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object. This counts the `__init__` method as being
public.
"""
_budoc = getattr(self.module.module, '__budoc__', {})
def forced_out(name):
return _budoc.get('%s.%s' % (self.name, name), False) is None
def exported(name):
exported = name == '__init__' or _is_exported(name)
return not forced_out(name) and exported
idents = dict(inspect.getmembers(self.cls))
return dict([(n, o) for n, o in idents.items() if exported(n)])
|
zeaphoo/budoc | budoc/pydoc.py | Class.methods | python | def methods(self):
p = lambda o: (isinstance(o, Function)
and o.method
and self.module._docfilter(o))
return filter(p, self.doc.values()) | Returns all documented methods as `pydoc.Function` objects in
the class, sorted alphabetically with `__init__` always coming
first.
Unfortunately, this also includes class methods. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L663-L674 | null | class Class (Doc):
"""
Representation of a class's documentation.
"""
def __init__(self, name, module, class_obj):
"""
Same as `pydoc.Doc.__init__`, except `class_obj` must be a
Python class object. The docstring is gathered automatically.
"""
super(Class, self).__init__(name, module, inspect.getdoc(class_obj))
self.cls = class_obj
"""The class Python object."""
self.doc = {}
"""A mapping from identifier name to a `pydoc.Doc` objects."""
self.doc_init = {}
"""
A special version of `pydoc.Class.doc` that contains
documentation for instance variables found in the `__init__`
method.
"""
public = self.__public_objs()
try:
# First try and find docstrings for class variables.
# Then move on to finding docstrings for instance variables.
# This must be optional, since not all modules have source
# code available.
cls_ast = ast.parse(inspect.getsource(self.cls)).body[0]
self.doc = _var_docstrings(cls_ast, self.module, cls=self)
for n in (cls_ast.body if '__init__' in public else []):
if isinstance(n, ast.FunctionDef) and n.name == '__init__':
self.doc_init = _var_docstrings(n, self.module,
cls=self, init=True)
break
except:
pass
# Convert the public Python objects to documentation objects.
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
if name in self.doc_init:
# Let instance members override class members.
continue
if inspect.ismethod(obj):
self.doc[name] = Function(name, self.module, obj.__func__,
cls=self, method=True)
elif inspect.isfunction(obj):
self.doc[name] = Function(name, self.module, obj,
cls=self, method=False)
elif isinstance(obj, property):
docstring = getattr(obj, '__doc__', '')
self.doc_init[name] = Variable(name, self.module, docstring,
cls=self)
elif not inspect.isbuiltin(obj) \
and not inspect.isroutine(obj):
if name in getattr(self.cls, '__slots__', []):
self.doc_init[name] = Variable(name, self.module,
'', cls=self)
else:
self.doc[name] = Variable(name, self.module, '', cls=self)
@property
def source(self):
return _source(self.cls)
@property
def refname(self):
return '%s.%s' % (self.module.refname, self.cls.__name__)
def class_variables(self):
"""
Returns all documented class variables in the class, sorted
alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc.values())
def instance_variables(self):
"""
Returns all instance variables in the class, sorted
alphabetically as a list of `pydoc.Variable`. Instance variables
are attributes of `self` defined in a class's `__init__`
method.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc_init.values())
def functions(self):
"""
Returns all documented static functions as `pydoc.Function`
objects in the class, sorted alphabetically.
"""
p = lambda o: (isinstance(o, Function)
and not o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def init_method(self):
p = lambda o: (isinstance(o, Function)
and o.method and o.name == '__init__'
and self.module._docfilter(o))
fn = filter(p, self.doc.values())
return fn[0] if fn else None
def _fill_inheritance(self):
"""
Traverses this class's ancestor list and attempts to fill in
missing documentation from its ancestor's documentation.
The first pass connects variables, methods and functions with
their inherited couterparts. (The templates will decide how to
display docstrings.) The second pass attempts to add instance
variables to this class that were only explicitly declared in
a parent class. This second pass is necessary since instance
variables are only discoverable by traversing the abstract
syntax tree.
"""
mro = filter(lambda c: c != self and isinstance(c, Class),
self.module.mro(self))
def search(d, fdoc):
for c in mro:
doc = fdoc(c)
if d.name in doc and isinstance(d, type(doc[d.name])):
return doc[d.name]
return None
for fdoc in (lambda c: c.doc_init, lambda c: c.doc):
for d in fdoc(self).values():
dinherit = search(d, fdoc)
if dinherit is not None:
d.inherits = dinherit
# Since instance variables aren't part of a class's members,
# we need to manually deduce inheritance. Oh lawdy.
for c in mro:
for name in filter(lambda n: n not in self.doc_init, c.doc_init):
d = c.doc_init[name]
self.doc_init[name] = Variable(d.name, d.module, '', cls=self)
self.doc_init[name].inherits = d
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object. This counts the `__init__` method as being
public.
"""
_budoc = getattr(self.module.module, '__budoc__', {})
def forced_out(name):
return _budoc.get('%s.%s' % (self.name, name), False) is None
def exported(name):
exported = name == '__init__' or _is_exported(name)
return not forced_out(name) and exported
idents = dict(inspect.getmembers(self.cls))
return dict([(n, o) for n, o in idents.items() if exported(n)])
|
zeaphoo/budoc | budoc/pydoc.py | Class._fill_inheritance | python | def _fill_inheritance(self):
mro = filter(lambda c: c != self and isinstance(c, Class),
self.module.mro(self))
def search(d, fdoc):
for c in mro:
doc = fdoc(c)
if d.name in doc and isinstance(d, type(doc[d.name])):
return doc[d.name]
return None
for fdoc in (lambda c: c.doc_init, lambda c: c.doc):
for d in fdoc(self).values():
dinherit = search(d, fdoc)
if dinherit is not None:
d.inherits = dinherit
# Since instance variables aren't part of a class's members,
# we need to manually deduce inheritance. Oh lawdy.
for c in mro:
for name in filter(lambda n: n not in self.doc_init, c.doc_init):
d = c.doc_init[name]
self.doc_init[name] = Variable(d.name, d.module, '', cls=self)
self.doc_init[name].inherits = d | Traverses this class's ancestor list and attempts to fill in
missing documentation from its ancestor's documentation.
The first pass connects variables, methods and functions with
their inherited couterparts. (The templates will decide how to
display docstrings.) The second pass attempts to add instance
variables to this class that were only explicitly declared in
a parent class. This second pass is necessary since instance
variables are only discoverable by traversing the abstract
syntax tree. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L694-L728 | null | class Class (Doc):
"""
Representation of a class's documentation.
"""
def __init__(self, name, module, class_obj):
"""
Same as `pydoc.Doc.__init__`, except `class_obj` must be a
Python class object. The docstring is gathered automatically.
"""
super(Class, self).__init__(name, module, inspect.getdoc(class_obj))
self.cls = class_obj
"""The class Python object."""
self.doc = {}
"""A mapping from identifier name to a `pydoc.Doc` objects."""
self.doc_init = {}
"""
A special version of `pydoc.Class.doc` that contains
documentation for instance variables found in the `__init__`
method.
"""
public = self.__public_objs()
try:
# First try and find docstrings for class variables.
# Then move on to finding docstrings for instance variables.
# This must be optional, since not all modules have source
# code available.
cls_ast = ast.parse(inspect.getsource(self.cls)).body[0]
self.doc = _var_docstrings(cls_ast, self.module, cls=self)
for n in (cls_ast.body if '__init__' in public else []):
if isinstance(n, ast.FunctionDef) and n.name == '__init__':
self.doc_init = _var_docstrings(n, self.module,
cls=self, init=True)
break
except:
pass
# Convert the public Python objects to documentation objects.
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
if name in self.doc_init:
# Let instance members override class members.
continue
if inspect.ismethod(obj):
self.doc[name] = Function(name, self.module, obj.__func__,
cls=self, method=True)
elif inspect.isfunction(obj):
self.doc[name] = Function(name, self.module, obj,
cls=self, method=False)
elif isinstance(obj, property):
docstring = getattr(obj, '__doc__', '')
self.doc_init[name] = Variable(name, self.module, docstring,
cls=self)
elif not inspect.isbuiltin(obj) \
and not inspect.isroutine(obj):
if name in getattr(self.cls, '__slots__', []):
self.doc_init[name] = Variable(name, self.module,
'', cls=self)
else:
self.doc[name] = Variable(name, self.module, '', cls=self)
@property
def source(self):
return _source(self.cls)
@property
def refname(self):
return '%s.%s' % (self.module.refname, self.cls.__name__)
def class_variables(self):
"""
Returns all documented class variables in the class, sorted
alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc.values())
def instance_variables(self):
"""
Returns all instance variables in the class, sorted
alphabetically as a list of `pydoc.Variable`. Instance variables
are attributes of `self` defined in a class's `__init__`
method.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc_init.values())
def methods(self):
"""
Returns all documented methods as `pydoc.Function` objects in
the class, sorted alphabetically with `__init__` always coming
first.
Unfortunately, this also includes class methods.
"""
p = lambda o: (isinstance(o, Function)
and o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def functions(self):
"""
Returns all documented static functions as `pydoc.Function`
objects in the class, sorted alphabetically.
"""
p = lambda o: (isinstance(o, Function)
and not o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def init_method(self):
p = lambda o: (isinstance(o, Function)
and o.method and o.name == '__init__'
and self.module._docfilter(o))
fn = filter(p, self.doc.values())
return fn[0] if fn else None
def __public_objs(self):
"""
Returns a dictionary mapping a public identifier name to a
Python object. This counts the `__init__` method as being
public.
"""
_budoc = getattr(self.module.module, '__budoc__', {})
def forced_out(name):
return _budoc.get('%s.%s' % (self.name, name), False) is None
def exported(name):
exported = name == '__init__' or _is_exported(name)
return not forced_out(name) and exported
idents = dict(inspect.getmembers(self.cls))
return dict([(n, o) for n, o in idents.items() if exported(n)])
|
zeaphoo/budoc | budoc/pydoc.py | Class.__public_objs | python | def __public_objs(self):
_budoc = getattr(self.module.module, '__budoc__', {})
def forced_out(name):
return _budoc.get('%s.%s' % (self.name, name), False) is None
def exported(name):
exported = name == '__init__' or _is_exported(name)
return not forced_out(name) and exported
idents = dict(inspect.getmembers(self.cls))
return dict([(n, o) for n, o in idents.items() if exported(n)]) | Returns a dictionary mapping a public identifier name to a
Python object. This counts the `__init__` method as being
public. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L730-L746 | null | class Class (Doc):
"""
Representation of a class's documentation.
"""
def __init__(self, name, module, class_obj):
"""
Same as `pydoc.Doc.__init__`, except `class_obj` must be a
Python class object. The docstring is gathered automatically.
"""
super(Class, self).__init__(name, module, inspect.getdoc(class_obj))
self.cls = class_obj
"""The class Python object."""
self.doc = {}
"""A mapping from identifier name to a `pydoc.Doc` objects."""
self.doc_init = {}
"""
A special version of `pydoc.Class.doc` that contains
documentation for instance variables found in the `__init__`
method.
"""
public = self.__public_objs()
try:
# First try and find docstrings for class variables.
# Then move on to finding docstrings for instance variables.
# This must be optional, since not all modules have source
# code available.
cls_ast = ast.parse(inspect.getsource(self.cls)).body[0]
self.doc = _var_docstrings(cls_ast, self.module, cls=self)
for n in (cls_ast.body if '__init__' in public else []):
if isinstance(n, ast.FunctionDef) and n.name == '__init__':
self.doc_init = _var_docstrings(n, self.module,
cls=self, init=True)
break
except:
pass
# Convert the public Python objects to documentation objects.
for name, obj in public.items():
# Skip any identifiers that already have doco.
if name in self.doc and not self.doc[name].is_empty():
continue
if name in self.doc_init:
# Let instance members override class members.
continue
if inspect.ismethod(obj):
self.doc[name] = Function(name, self.module, obj.__func__,
cls=self, method=True)
elif inspect.isfunction(obj):
self.doc[name] = Function(name, self.module, obj,
cls=self, method=False)
elif isinstance(obj, property):
docstring = getattr(obj, '__doc__', '')
self.doc_init[name] = Variable(name, self.module, docstring,
cls=self)
elif not inspect.isbuiltin(obj) \
and not inspect.isroutine(obj):
if name in getattr(self.cls, '__slots__', []):
self.doc_init[name] = Variable(name, self.module,
'', cls=self)
else:
self.doc[name] = Variable(name, self.module, '', cls=self)
@property
def source(self):
return _source(self.cls)
@property
def refname(self):
return '%s.%s' % (self.module.refname, self.cls.__name__)
def class_variables(self):
"""
Returns all documented class variables in the class, sorted
alphabetically as a list of `pydoc.Variable`.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc.values())
def instance_variables(self):
"""
Returns all instance variables in the class, sorted
alphabetically as a list of `pydoc.Variable`. Instance variables
are attributes of `self` defined in a class's `__init__`
method.
"""
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc_init.values())
def methods(self):
"""
Returns all documented methods as `pydoc.Function` objects in
the class, sorted alphabetically with `__init__` always coming
first.
Unfortunately, this also includes class methods.
"""
p = lambda o: (isinstance(o, Function)
and o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def functions(self):
"""
Returns all documented static functions as `pydoc.Function`
objects in the class, sorted alphabetically.
"""
p = lambda o: (isinstance(o, Function)
and not o.method
and self.module._docfilter(o))
return filter(p, self.doc.values())
def init_method(self):
p = lambda o: (isinstance(o, Function)
and o.method and o.name == '__init__'
and self.module._docfilter(o))
fn = filter(p, self.doc.values())
return fn[0] if fn else None
def _fill_inheritance(self):
"""
Traverses this class's ancestor list and attempts to fill in
missing documentation from its ancestor's documentation.
The first pass connects variables, methods and functions with
their inherited couterparts. (The templates will decide how to
display docstrings.) The second pass attempts to add instance
variables to this class that were only explicitly declared in
a parent class. This second pass is necessary since instance
variables are only discoverable by traversing the abstract
syntax tree.
"""
mro = filter(lambda c: c != self and isinstance(c, Class),
self.module.mro(self))
def search(d, fdoc):
for c in mro:
doc = fdoc(c)
if d.name in doc and isinstance(d, type(doc[d.name])):
return doc[d.name]
return None
for fdoc in (lambda c: c.doc_init, lambda c: c.doc):
for d in fdoc(self).values():
dinherit = search(d, fdoc)
if dinherit is not None:
d.inherits = dinherit
# Since instance variables aren't part of a class's members,
# we need to manually deduce inheritance. Oh lawdy.
for c in mro:
for name in filter(lambda n: n not in self.doc_init, c.doc_init):
d = c.doc_init[name]
self.doc_init[name] = Variable(d.name, d.module, '', cls=self)
self.doc_init[name].inherits = d
|
zeaphoo/budoc | budoc/pydoc.py | Function.params | python | def params(self):
def fmt_param(el):
if isinstance(el, str) or isinstance(el, unicode):
return el
else:
return '(%s)' % (', '.join(map(fmt_param, el)))
try:
getspec = getattr(inspect, 'getfullargspec', inspect.getargspec)
s = getspec(self.func)
except TypeError:
# I guess this is for C builtin functions?
return ['...']
params = []
for i, param in enumerate(s.args):
if param.lower() == 'self':
continue
if s.defaults is not None and len(s.args) - i <= len(s.defaults):
defind = len(s.defaults) - (len(s.args) - i)
default_value = s.defaults[defind]
value = repr(default_value).strip()
if value[0] == '<' and value[-1] == '>':
if type(default_value) == types.TypeType:
value = default_value.__name__
elif type(default_value) == types.ObjectType:
value = '%s()'%(default_value.__class__.__name__)
params.append('%s=%s' % (param, value))
else:
params.append(fmt_param(param))
if s.varargs is not None:
params.append('*%s' % s.varargs)
# TODO: This needs to be adjusted in Python 3. There's more stuff
# returned from getfullargspec than what we're looking at here.
keywords = getattr(s, 'varkw', getattr(s, 'keywords', None))
if keywords is not None:
params.append('**%s' % keywords)
return params | Returns a list where each element is a nicely formatted
parameter of this function. This includes argument lists,
keyword arguments and default values. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L802-L844 | [
"def fmt_param(el):\n if isinstance(el, str) or isinstance(el, unicode):\n return el\n else:\n return '(%s)' % (', '.join(map(fmt_param, el)))\n"
] | class Function (Doc):
"""
Representation of documentation for a Python function or method.
"""
def __init__(self, name, module, func_obj, cls=None, method=False):
"""
Same as `pydoc.Doc.__init__`, except `func_obj` must be a
Python function object. The docstring is gathered automatically.
`cls` should be set when this is a method or a static function
beloing to a class. `cls` should be a `pydoc.Class` object.
`method` should be `True` when the function is a method. In
all other cases, it should be `False`.
"""
super(Function, self).__init__(name, module, inspect.getdoc(func_obj))
self.func = func_obj
"""The Python function object."""
self.cls = cls
"""
The `pydoc.Class` documentation object if this is a method. If
not, this is None.
"""
self.method = method
"""
Whether this function is a method or not.
In particular, static class methods have this set to False.
"""
@property
def source(self):
return _source(self.func)
@property
def refname(self):
if self.cls is None:
return '%s.%s' % (self.module.refname, self.name)
else:
return '%s.%s' % (self.cls.refname, self.name)
def spec(self):
"""
Returns a nicely formatted spec of the function's parameter
list as a string. This includes argument lists, keyword
arguments and default values.
"""
return ', '.join(self.params())
def __lt__(self, other):
# Push __init__ to the top.
if '__init__' in (self.name, other.name):
return self.name != other.name and self.name == '__init__'
else:
return self.name < other.name
|
zeaphoo/budoc | budoc/budoc.py | indent | python | def indent(s, spaces=4):
new = re.sub('(\n+)', '\\1%s' % (' ' * spaces), s)
return (' ' * spaces) + new.strip() | Inserts `spaces` after each string of new lines in `s`
and before the start of the string. | train | https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/budoc.py#L15-L21 | null |
from __future__ import absolute_import, division, print_function
import ast
import imp
import inspect
import os
import os.path as path
import pkgutil
import re
import sys
from . import pydoc
import re
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def output(text):
sys.stdout.write(text)
sys.stdout.flush()
def budoc_all(bu_config, ident_name = None, **kwargs):
for doc in bu_config.docs:
module_name = doc['module']
ident = doc.get('ident')
dest = doc.get('dest')
output('Generating %s%s api docs to %s\n'%(module_name, ':%s'%(ident) if ident else '', dest))
try:
md = budoc_one(module_name, ident_name=ident)
except:
output(' Error in generating.\n')
continue
output(' OK.\n')
if dest and md:
try:
output(' Writing to %s.\n'%(dest))
ensure_dir(dest)
with open(dest, 'wb') as f:
f.write(md)
output(' Done.\n')
except:
output(' Error in writing.\n')
continue
def budoc_one(module_name, ident_name = None, **kwargs):
stdout = kwargs.get('stdout', False)
show_module = kwargs.get('show_module', False)
docfilter = None
if ident_name and len(ident_name.strip()) > 0:
search = ident_name.strip()
def docfilter(o):
rname = o.refname
if rname.find(search) > -1 or search.find(o.name) > -1:
return True
if isinstance(o, pydoc.Class):
return search in o.doc or search in o.doc_init
return False
# Try to do a real import first. I think it's better to prefer
# import paths over files. If a file is really necessary, then
# specify the absolute path, which is guaranteed not to be a
# Python import path.
try:
module = pydoc.import_module(module_name)
except Exception as e:
module = None
# Get the module that we're documenting. Accommodate for import paths,
# files and directories.
if module is None:
isdir = path.isdir(module_name)
isfile = path.isfile(module_name)
if isdir or isfile:
fp = path.realpath(module_name)
module_name = path.basename(fp)
if isdir:
fp = path.join(fp, '__init__.py')
else:
module_name, _ = path.splitext(module_name)
# Use a special module name to avoid import conflicts.
# It is hidden from view via the `Module` class.
with open(fp) as f:
module = imp.load_source('__budoc_file_module__', fp, f)
if isdir:
module.__path__ = [path.realpath(module_name)]
module.__pydoc_module_name = module_name
else:
module = pydoc.import_module(module_name)
module = pydoc.Module(module, docfilter=docfilter)
doc = MarkdownGenerator(module).gen(module_doc=show_module)
if stdout:
sys.stdout.write(doc)
sys.stdout.flush()
return doc
class NoneFunction(object):
def __init__(self):
self.docstring = ''
def spec(self):
return ''
class MarkdownGenerator(object):
def __init__(self, module):
self.lines = []
self.write = self.lines.append
self.module = module
def gen_variable(self, var, title_level=2):
self.write('')
self.write('%svar **%s**'%('#'*title_level, var.name))
self.write('')
self.write(var.docstring)
def gen_function(self, func, title_level=2):
write = self.write
write('')
write('%sdef **%s**(%s)'%('#'*title_level, func.name, func.spec()))
write('')
write(func.docstring)
def gen_class(self, aclass, title_level=2):
write = self.write
init_method = aclass.init_method() or NoneFunction()
write('%sclass %s(%s)'%('#'*title_level, aclass.name, init_method.spec()))
write('')
write(aclass.docstring)
write(init_method.docstring)
class_vars = aclass.class_variables()
static_methods = aclass.functions()
methods = aclass.methods()
inst_vars = aclass.instance_variables()
if class_vars:
for var in class_vars:
write('')
write('%svar **%s**'%('#'*(title_level+1), var.name))
write('')
write(var.docstring)
if inst_vars:
for var in inst_vars:
write('')
write('%svar **%s**'%('#'*(title_level+1), var.name))
write('')
write(var.docstring)
if static_methods:
for func in static_methods:
write('')
write('%sdef **%s**(%s)'%('#'*(title_level+1), func.name, func.spec()))
write('')
write(func.docstring)
if methods:
for func in methods:
if func.name == '__init__':
continue
write('')
write('%sdef **%s**(%s)'%('#'*(title_level+1), func.name, func.spec()))
write('')
write(func.docstring)
def gen(self, module_doc=True):
module = self.module
write = self.write
if module_doc:
write('#Module %s'%(module.name))
if not module._filtering:
write(module.docstring)
title_level = 2 if module_doc else 1
variables = module.variables()
for var in variables:
self.gen_variable(var, title_level=title_level)
functions = module.functions()
for func in functions:
self.gen_function(func, title_level=title_level)
classes = module.classes()
for aclass in classes:
self.gen_class(aclass, title_level=title_level)
return '\n'.join(self.lines)
|
cbrand/vpnchooser | src/vpnchooser/db/user.py | User.check | python | def check(self, password: str) -> bool:
return (
pbkdf2_sha512.verify(password, self.password) or
pbkdf2_sha512.verify(password,
pbkdf2_sha512.encrypt(self.api_key))
) | Checks the given password with the one stored
in the database | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/db/user.py#L31-L40 | null | class User(db.Model):
"""
A user resource to be able to authenticate.
"""
__tablename__ = 'user'
name = db.Column(db.Unicode(255), primary_key=True)
_password = db.Column('password', db.Unicode(512), nullable=False)
@hybrid_property
def password(self):
return self._password
@password.setter
def password(self, password: str):
self._password = pbkdf2_sha512.encrypt(password)
is_admin = db.Column(db.Boolean, default=False,
server_default=db.text('false'))
_api_key = db.Column('api_key', db.Unicode(512), nullable=False)
def generate_api_key(self):
self._api_key = hashlib.new('sha512',
os.urandom(512)).hexdigest()
@hybrid_property
def api_key(self):
if self._api_key is None:
self.generate_api_key()
return self._api_key
@api_key.setter
def api_key(self, api_key):
self._api_key = api_key
@api_key.expression
def api_key(cls):
return cls._api_key
|
cbrand/vpnchooser | src/vpnchooser/connection/client.py | Client.connect | python | def connect(self):
key = paramiko.RSAKey(data=base64.b64decode(
app.config['SSH_HOST_KEY']
))
client = paramiko.SSHClient()
client.get_host_keys().add(
app.config['SSH_HOST'],
'ssh-rsa',
key
)
client.connect(
app.config['SSH_HOST'],
username=app.config['SSH_USER'],
password=app.config['SSH_PASSWORD'],
)
return client | Connects the client to the server and
returns it. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/connection/client.py#L20-L39 | null | class Client(object):
"""
Client to connect to the router and
synchronise with the data.
"""
def __init__(self):
self.rule_location = '/tmp/vpnchooser_rules/rules.txt'
def _create_directory_structure(self):
sftp = self.client.open_sftp()
rules = [
rule_string
for rule_string in
self.rule_location.split("/")
if len(rule_string) > 0
]
checked_rules = []
for rule in rules[:-1]:
check_path = '/' + '/'.join(checked_rules)
checked_rules.append(rule)
if rule not in sftp.listdir(
check_path
):
sftp.mkdir('/' + '/'.join(checked_rules))
sftp.close()
@property
def server_rules(self):
"""
Reads the server rules from the client
and returns it.
"""
sftp = self.client.open_sftp()
try:
rule_path = self.rule_location
try:
stat_entry = sftp.stat(rule_path)
if stat.S_ISDIR(stat_entry.st_mode):
sftp.rmdir(rule_path)
return []
except IOError:
return []
with sftp.open(rule_path, 'r') as file_handle:
data = file_handle.read()
return self._parse(data)
finally:
sftp.close()
@staticmethod
def _parse(data: str) -> list:
"""
Parses the given data string and returns
a list of rule objects.
"""
if isinstance(data, bytes):
data = data.decode('utf-8')
lines = (
item for item in
(item.strip() for item in data.split('\n'))
if len(item) and not item.startswith('#')
)
rules = []
for line in lines:
rules.append(
Rule.parse(line)
)
return rules
def sync(self, rules: list):
"""
Synchronizes the given ruleset with the
one on the server and adds the not yet
existing rules to the server.
:type rules: collections.Iterable[Rule]
"""
self.client = self.connect()
try:
server_rules = set(self.server_rules)
rules = set(rules)
to_remove_rules = server_rules.difference(rules)
to_add_rules = rules.difference(server_rules)
for to_remove_rule in to_remove_rules:
stdin, stdout, stderr = self.client.exec_command(
to_remove_rule.remove_command
)
stdout.read()
stderr.read()
for to_add_rule in to_add_rules:
stdin, stdout, stderr = self.client.exec_command(
to_add_rule.add_command
)
stdout.read()
stderr.read()
if len(to_remove_rules) or len(to_add_rules):
self._write_to_server(rules)
stdin, stdout, stderr = self.client.exec_command(
'ip route flush cache'
)
stdout.read()
stderr.read()
finally:
self.client.close()
def _write_to_server(self, rules: list):
"""
Writes the given ruleset to the
server configuration file.
:type rules: collections.Iterable[Rule]
"""
self._create_directory_structure()
config_data = '\n'.join(rule.config_string for rule in rules)
sftp = self.client.open_sftp()
try:
with sftp.open(self.rule_location, 'w') as file_handle:
file_handle.write(config_data)
file_handle.write('\n')
finally:
sftp.close()
|
cbrand/vpnchooser | src/vpnchooser/connection/client.py | Client.server_rules | python | def server_rules(self):
sftp = self.client.open_sftp()
try:
rule_path = self.rule_location
try:
stat_entry = sftp.stat(rule_path)
if stat.S_ISDIR(stat_entry.st_mode):
sftp.rmdir(rule_path)
return []
except IOError:
return []
with sftp.open(rule_path, 'r') as file_handle:
data = file_handle.read()
return self._parse(data)
finally:
sftp.close() | Reads the server rules from the client
and returns it. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/connection/client.py#L60-L79 | [
"def _parse(data: str) -> list:\n \"\"\"\n Parses the given data string and returns\n a list of rule objects.\n \"\"\"\n if isinstance(data, bytes):\n data = data.decode('utf-8')\n lines = (\n item for item in\n (item.strip() for item in data.split('\\n'))\n if len(item) and not item.startswith('#')\n )\n rules = []\n for line in lines:\n rules.append(\n Rule.parse(line)\n )\n return rules\n"
] | class Client(object):
"""
Client to connect to the router and
synchronise with the data.
"""
def __init__(self):
self.rule_location = '/tmp/vpnchooser_rules/rules.txt'
def connect(self):
"""
Connects the client to the server and
returns it.
"""
key = paramiko.RSAKey(data=base64.b64decode(
app.config['SSH_HOST_KEY']
))
client = paramiko.SSHClient()
client.get_host_keys().add(
app.config['SSH_HOST'],
'ssh-rsa',
key
)
client.connect(
app.config['SSH_HOST'],
username=app.config['SSH_USER'],
password=app.config['SSH_PASSWORD'],
)
return client
def _create_directory_structure(self):
sftp = self.client.open_sftp()
rules = [
rule_string
for rule_string in
self.rule_location.split("/")
if len(rule_string) > 0
]
checked_rules = []
for rule in rules[:-1]:
check_path = '/' + '/'.join(checked_rules)
checked_rules.append(rule)
if rule not in sftp.listdir(
check_path
):
sftp.mkdir('/' + '/'.join(checked_rules))
sftp.close()
@property
@staticmethod
def _parse(data: str) -> list:
"""
Parses the given data string and returns
a list of rule objects.
"""
if isinstance(data, bytes):
data = data.decode('utf-8')
lines = (
item for item in
(item.strip() for item in data.split('\n'))
if len(item) and not item.startswith('#')
)
rules = []
for line in lines:
rules.append(
Rule.parse(line)
)
return rules
def sync(self, rules: list):
"""
Synchronizes the given ruleset with the
one on the server and adds the not yet
existing rules to the server.
:type rules: collections.Iterable[Rule]
"""
self.client = self.connect()
try:
server_rules = set(self.server_rules)
rules = set(rules)
to_remove_rules = server_rules.difference(rules)
to_add_rules = rules.difference(server_rules)
for to_remove_rule in to_remove_rules:
stdin, stdout, stderr = self.client.exec_command(
to_remove_rule.remove_command
)
stdout.read()
stderr.read()
for to_add_rule in to_add_rules:
stdin, stdout, stderr = self.client.exec_command(
to_add_rule.add_command
)
stdout.read()
stderr.read()
if len(to_remove_rules) or len(to_add_rules):
self._write_to_server(rules)
stdin, stdout, stderr = self.client.exec_command(
'ip route flush cache'
)
stdout.read()
stderr.read()
finally:
self.client.close()
def _write_to_server(self, rules: list):
"""
Writes the given ruleset to the
server configuration file.
:type rules: collections.Iterable[Rule]
"""
self._create_directory_structure()
config_data = '\n'.join(rule.config_string for rule in rules)
sftp = self.client.open_sftp()
try:
with sftp.open(self.rule_location, 'w') as file_handle:
file_handle.write(config_data)
file_handle.write('\n')
finally:
sftp.close()
|
cbrand/vpnchooser | src/vpnchooser/connection/client.py | Client._parse | python | def _parse(data: str) -> list:
if isinstance(data, bytes):
data = data.decode('utf-8')
lines = (
item for item in
(item.strip() for item in data.split('\n'))
if len(item) and not item.startswith('#')
)
rules = []
for line in lines:
rules.append(
Rule.parse(line)
)
return rules | Parses the given data string and returns
a list of rule objects. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/connection/client.py#L82-L99 | [
"def parse(cls, line: str):\n match = parse_rule.match(line)\n if match is None:\n return None\n\n rule = cls()\n rule.ip = match.group('ip')\n rule.table = match.group('table')\n return rule\n"
] | class Client(object):
"""
Client to connect to the router and
synchronise with the data.
"""
def __init__(self):
self.rule_location = '/tmp/vpnchooser_rules/rules.txt'
def connect(self):
"""
Connects the client to the server and
returns it.
"""
key = paramiko.RSAKey(data=base64.b64decode(
app.config['SSH_HOST_KEY']
))
client = paramiko.SSHClient()
client.get_host_keys().add(
app.config['SSH_HOST'],
'ssh-rsa',
key
)
client.connect(
app.config['SSH_HOST'],
username=app.config['SSH_USER'],
password=app.config['SSH_PASSWORD'],
)
return client
def _create_directory_structure(self):
sftp = self.client.open_sftp()
rules = [
rule_string
for rule_string in
self.rule_location.split("/")
if len(rule_string) > 0
]
checked_rules = []
for rule in rules[:-1]:
check_path = '/' + '/'.join(checked_rules)
checked_rules.append(rule)
if rule not in sftp.listdir(
check_path
):
sftp.mkdir('/' + '/'.join(checked_rules))
sftp.close()
@property
def server_rules(self):
"""
Reads the server rules from the client
and returns it.
"""
sftp = self.client.open_sftp()
try:
rule_path = self.rule_location
try:
stat_entry = sftp.stat(rule_path)
if stat.S_ISDIR(stat_entry.st_mode):
sftp.rmdir(rule_path)
return []
except IOError:
return []
with sftp.open(rule_path, 'r') as file_handle:
data = file_handle.read()
return self._parse(data)
finally:
sftp.close()
@staticmethod
def sync(self, rules: list):
"""
Synchronizes the given ruleset with the
one on the server and adds the not yet
existing rules to the server.
:type rules: collections.Iterable[Rule]
"""
self.client = self.connect()
try:
server_rules = set(self.server_rules)
rules = set(rules)
to_remove_rules = server_rules.difference(rules)
to_add_rules = rules.difference(server_rules)
for to_remove_rule in to_remove_rules:
stdin, stdout, stderr = self.client.exec_command(
to_remove_rule.remove_command
)
stdout.read()
stderr.read()
for to_add_rule in to_add_rules:
stdin, stdout, stderr = self.client.exec_command(
to_add_rule.add_command
)
stdout.read()
stderr.read()
if len(to_remove_rules) or len(to_add_rules):
self._write_to_server(rules)
stdin, stdout, stderr = self.client.exec_command(
'ip route flush cache'
)
stdout.read()
stderr.read()
finally:
self.client.close()
def _write_to_server(self, rules: list):
"""
Writes the given ruleset to the
server configuration file.
:type rules: collections.Iterable[Rule]
"""
self._create_directory_structure()
config_data = '\n'.join(rule.config_string for rule in rules)
sftp = self.client.open_sftp()
try:
with sftp.open(self.rule_location, 'w') as file_handle:
file_handle.write(config_data)
file_handle.write('\n')
finally:
sftp.close()
|
cbrand/vpnchooser | src/vpnchooser/connection/client.py | Client.sync | python | def sync(self, rules: list):
self.client = self.connect()
try:
server_rules = set(self.server_rules)
rules = set(rules)
to_remove_rules = server_rules.difference(rules)
to_add_rules = rules.difference(server_rules)
for to_remove_rule in to_remove_rules:
stdin, stdout, stderr = self.client.exec_command(
to_remove_rule.remove_command
)
stdout.read()
stderr.read()
for to_add_rule in to_add_rules:
stdin, stdout, stderr = self.client.exec_command(
to_add_rule.add_command
)
stdout.read()
stderr.read()
if len(to_remove_rules) or len(to_add_rules):
self._write_to_server(rules)
stdin, stdout, stderr = self.client.exec_command(
'ip route flush cache'
)
stdout.read()
stderr.read()
finally:
self.client.close() | Synchronizes the given ruleset with the
one on the server and adds the not yet
existing rules to the server.
:type rules: collections.Iterable[Rule] | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/connection/client.py#L101-L135 | [
"def connect(self):\n \"\"\"\n Connects the client to the server and\n returns it.\n \"\"\"\n key = paramiko.RSAKey(data=base64.b64decode(\n app.config['SSH_HOST_KEY']\n ))\n client = paramiko.SSHClient()\n client.get_host_keys().add(\n app.config['SSH_HOST'],\n 'ssh-rsa',\n key\n )\n client.connect(\n app.config['SSH_HOST'],\n username=app.config['SSH_USER'],\n password=app.config['SSH_PASSWORD'],\n )\n return client\n",
"def _write_to_server(self, rules: list):\n \"\"\"\n Writes the given ruleset to the\n server configuration file.\n :type rules: collections.Iterable[Rule]\n \"\"\"\n self._create_directory_structure()\n config_data = '\\n'.join(rule.config_string for rule in rules)\n sftp = self.client.open_sftp()\n try:\n with sftp.open(self.rule_location, 'w') as file_handle:\n file_handle.write(config_data)\n file_handle.write('\\n')\n finally:\n sftp.close()\n"
] | class Client(object):
"""
Client to connect to the router and
synchronise with the data.
"""
def __init__(self):
self.rule_location = '/tmp/vpnchooser_rules/rules.txt'
def connect(self):
"""
Connects the client to the server and
returns it.
"""
key = paramiko.RSAKey(data=base64.b64decode(
app.config['SSH_HOST_KEY']
))
client = paramiko.SSHClient()
client.get_host_keys().add(
app.config['SSH_HOST'],
'ssh-rsa',
key
)
client.connect(
app.config['SSH_HOST'],
username=app.config['SSH_USER'],
password=app.config['SSH_PASSWORD'],
)
return client
def _create_directory_structure(self):
sftp = self.client.open_sftp()
rules = [
rule_string
for rule_string in
self.rule_location.split("/")
if len(rule_string) > 0
]
checked_rules = []
for rule in rules[:-1]:
check_path = '/' + '/'.join(checked_rules)
checked_rules.append(rule)
if rule not in sftp.listdir(
check_path
):
sftp.mkdir('/' + '/'.join(checked_rules))
sftp.close()
@property
def server_rules(self):
"""
Reads the server rules from the client
and returns it.
"""
sftp = self.client.open_sftp()
try:
rule_path = self.rule_location
try:
stat_entry = sftp.stat(rule_path)
if stat.S_ISDIR(stat_entry.st_mode):
sftp.rmdir(rule_path)
return []
except IOError:
return []
with sftp.open(rule_path, 'r') as file_handle:
data = file_handle.read()
return self._parse(data)
finally:
sftp.close()
@staticmethod
def _parse(data: str) -> list:
"""
Parses the given data string and returns
a list of rule objects.
"""
if isinstance(data, bytes):
data = data.decode('utf-8')
lines = (
item for item in
(item.strip() for item in data.split('\n'))
if len(item) and not item.startswith('#')
)
rules = []
for line in lines:
rules.append(
Rule.parse(line)
)
return rules
def _write_to_server(self, rules: list):
"""
Writes the given ruleset to the
server configuration file.
:type rules: collections.Iterable[Rule]
"""
self._create_directory_structure()
config_data = '\n'.join(rule.config_string for rule in rules)
sftp = self.client.open_sftp()
try:
with sftp.open(self.rule_location, 'w') as file_handle:
file_handle.write(config_data)
file_handle.write('\n')
finally:
sftp.close()
|
cbrand/vpnchooser | src/vpnchooser/connection/client.py | Client._write_to_server | python | def _write_to_server(self, rules: list):
self._create_directory_structure()
config_data = '\n'.join(rule.config_string for rule in rules)
sftp = self.client.open_sftp()
try:
with sftp.open(self.rule_location, 'w') as file_handle:
file_handle.write(config_data)
file_handle.write('\n')
finally:
sftp.close() | Writes the given ruleset to the
server configuration file.
:type rules: collections.Iterable[Rule] | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/connection/client.py#L137-L151 | [
"def _create_directory_structure(self):\n sftp = self.client.open_sftp()\n rules = [\n rule_string\n for rule_string in\n self.rule_location.split(\"/\")\n if len(rule_string) > 0\n ]\n checked_rules = []\n for rule in rules[:-1]:\n check_path = '/' + '/'.join(checked_rules)\n checked_rules.append(rule)\n if rule not in sftp.listdir(\n check_path\n ):\n sftp.mkdir('/' + '/'.join(checked_rules))\n sftp.close()\n"
] | class Client(object):
"""
Client to connect to the router and
synchronise with the data.
"""
def __init__(self):
self.rule_location = '/tmp/vpnchooser_rules/rules.txt'
def connect(self):
"""
Connects the client to the server and
returns it.
"""
key = paramiko.RSAKey(data=base64.b64decode(
app.config['SSH_HOST_KEY']
))
client = paramiko.SSHClient()
client.get_host_keys().add(
app.config['SSH_HOST'],
'ssh-rsa',
key
)
client.connect(
app.config['SSH_HOST'],
username=app.config['SSH_USER'],
password=app.config['SSH_PASSWORD'],
)
return client
def _create_directory_structure(self):
sftp = self.client.open_sftp()
rules = [
rule_string
for rule_string in
self.rule_location.split("/")
if len(rule_string) > 0
]
checked_rules = []
for rule in rules[:-1]:
check_path = '/' + '/'.join(checked_rules)
checked_rules.append(rule)
if rule not in sftp.listdir(
check_path
):
sftp.mkdir('/' + '/'.join(checked_rules))
sftp.close()
@property
def server_rules(self):
"""
Reads the server rules from the client
and returns it.
"""
sftp = self.client.open_sftp()
try:
rule_path = self.rule_location
try:
stat_entry = sftp.stat(rule_path)
if stat.S_ISDIR(stat_entry.st_mode):
sftp.rmdir(rule_path)
return []
except IOError:
return []
with sftp.open(rule_path, 'r') as file_handle:
data = file_handle.read()
return self._parse(data)
finally:
sftp.close()
@staticmethod
def _parse(data: str) -> list:
"""
Parses the given data string and returns
a list of rule objects.
"""
if isinstance(data, bytes):
data = data.decode('utf-8')
lines = (
item for item in
(item.strip() for item in data.split('\n'))
if len(item) and not item.startswith('#')
)
rules = []
for line in lines:
rules.append(
Rule.parse(line)
)
return rules
def sync(self, rules: list):
"""
Synchronizes the given ruleset with the
one on the server and adds the not yet
existing rules to the server.
:type rules: collections.Iterable[Rule]
"""
self.client = self.connect()
try:
server_rules = set(self.server_rules)
rules = set(rules)
to_remove_rules = server_rules.difference(rules)
to_add_rules = rules.difference(server_rules)
for to_remove_rule in to_remove_rules:
stdin, stdout, stderr = self.client.exec_command(
to_remove_rule.remove_command
)
stdout.read()
stderr.read()
for to_add_rule in to_add_rules:
stdin, stdout, stderr = self.client.exec_command(
to_add_rule.add_command
)
stdout.read()
stderr.read()
if len(to_remove_rules) or len(to_add_rules):
self._write_to_server(rules)
stdin, stdout, stderr = self.client.exec_command(
'ip route flush cache'
)
stdout.read()
stderr.read()
finally:
self.client.close()
|
cbrand/vpnchooser | src/vpnchooser/resources/device.py | DeviceResource.put | python | def put(self, device_id: int) -> Device:
device = self._get_or_abort(device_id)
self.update(device)
session.commit()
session.add(device)
return device | Updates the Device Resource with the
name. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/resources/device.py#L114-L123 | null | class DeviceResource(AbstractDeviceResource):
"""
The resource to access a device resource.
"""
@staticmethod
def _get_by_id(device_id: int) -> Device:
return session.query(Device).filter(
Device.id == device_id
).first()
def _get_or_abort(self, device_id: int):
device = self._get_by_id(device_id)
if device is None:
abort(404)
else:
pass
return device
@require_login
@marshal_with(resource_fields)
def get(self, device_id: int) -> Device:
"""
Gets the Device Resource.
"""
return self._get_or_abort(device_id)
@require_login
@marshal_with(resource_fields)
@require_login
def delete(self, device_id: int):
"""
Deletes the resource with the given name.
"""
device = self._get_or_abort(device_id)
session.delete(device)
session.commit()
return '', 204
|
cbrand/vpnchooser | src/vpnchooser/resources/device.py | DeviceResource.delete | python | def delete(self, device_id: int):
device = self._get_or_abort(device_id)
session.delete(device)
session.commit()
return '', 204 | Deletes the resource with the given name. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/resources/device.py#L126-L133 | null | class DeviceResource(AbstractDeviceResource):
"""
The resource to access a device resource.
"""
@staticmethod
def _get_by_id(device_id: int) -> Device:
return session.query(Device).filter(
Device.id == device_id
).first()
def _get_or_abort(self, device_id: int):
device = self._get_by_id(device_id)
if device is None:
abort(404)
else:
pass
return device
@require_login
@marshal_with(resource_fields)
def get(self, device_id: int) -> Device:
"""
Gets the Device Resource.
"""
return self._get_or_abort(device_id)
@require_login
@marshal_with(resource_fields)
def put(self, device_id: int) -> Device:
"""
Updates the Device Resource with the
name.
"""
device = self._get_or_abort(device_id)
self.update(device)
session.commit()
session.add(device)
return device
@require_login
|
cbrand/vpnchooser | src/vpnchooser/query/rules_query.py | RulesQuery._load_rules | python | def _load_rules(self):
with self._sftp_connection.open(self.RULE_PATH) as file:
data = file.read()
lines = (
line.strip()
for line in data.split('\n')
)
rule_strings = (
line for line in lines
if len(line) > 0
)
rules = (
Rule.parse(rule_string)
for rule_string in rule_strings
)
self._rules = [
rule
for rule in rules
if rule is not None
] | Loads the rules from the SSH-Connection | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/query/rules_query.py#L17-L39 | null | class RulesQuery(object):
RULE_PATH = None
def __init__(self, connection: SSHClient):
self._ssh = connection
self._sftp = None
self._reset()
def _reset(self):
"""
Resets the internal state to download
the data from the router of the currently
valid rules.
"""
self._rules = None
@property
def rules(self):
if self._rules is None:
self._load_rules()
return self._rules
@property
def _sftp_connection(self) -> SFTPClient:
if self._sftp is None:
self._sftp = self._ssh.open_sftp()
return self._sftp
def __del__(self):
try:
self._sftp.close()
except Exception:
pass
try:
self._ssh.close()
except Exception:
pass
def _exec_command(self, command: str):
"""
Executes the command and closes the handles
afterwards.
"""
stdin, stdout, stderr = self._ssh.exec_command(command)
# Clearing the buffers
stdout.read()
stderr.read()
stdin.close()
def sync(self, rules: list):
"""
Synchronizes the given rules with the server
and ensures that there are no old rules active
which are not in the given list.
"""
self._reset()
old_rules = self.rules
to_delete_rules = [
rule for rule in old_rules
if rule not in rules
]
new_rules = [
rule for rule in rules
if rule not in old_rules
]
for new_rule in new_rules:
assert isinstance(new_rule, Rule)
self._exec_command(new_rule.add_command)
for to_delete_rule in to_delete_rules:
assert isinstance(to_delete_rule, Rule)
self._exec_command(
to_delete_rule.remove_command
)
self._update(rules)
def _update(self, rules: list):
"""
Updates the given rules and stores
them on the router.
"""
self._rules = rules
to_store = '\n'.join(
rule.config_string
for rule in rules
)
sftp_connection = self._sftp_connection
with sftp_connection.open(self.RULE_PATH, mode='w') as file_handle:
file_handle.write(to_store)
|
cbrand/vpnchooser | src/vpnchooser/query/rules_query.py | RulesQuery._exec_command | python | def _exec_command(self, command: str):
stdin, stdout, stderr = self._ssh.exec_command(command)
# Clearing the buffers
stdout.read()
stderr.read()
stdin.close() | Executes the command and closes the handles
afterwards. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/query/rules_query.py#L71-L80 | null | class RulesQuery(object):
RULE_PATH = None
def __init__(self, connection: SSHClient):
self._ssh = connection
self._sftp = None
self._reset()
def _load_rules(self):
"""
Loads the rules from the SSH-Connection
"""
with self._sftp_connection.open(self.RULE_PATH) as file:
data = file.read()
lines = (
line.strip()
for line in data.split('\n')
)
rule_strings = (
line for line in lines
if len(line) > 0
)
rules = (
Rule.parse(rule_string)
for rule_string in rule_strings
)
self._rules = [
rule
for rule in rules
if rule is not None
]
def _reset(self):
"""
Resets the internal state to download
the data from the router of the currently
valid rules.
"""
self._rules = None
@property
def rules(self):
if self._rules is None:
self._load_rules()
return self._rules
@property
def _sftp_connection(self) -> SFTPClient:
if self._sftp is None:
self._sftp = self._ssh.open_sftp()
return self._sftp
def __del__(self):
try:
self._sftp.close()
except Exception:
pass
try:
self._ssh.close()
except Exception:
pass
def sync(self, rules: list):
"""
Synchronizes the given rules with the server
and ensures that there are no old rules active
which are not in the given list.
"""
self._reset()
old_rules = self.rules
to_delete_rules = [
rule for rule in old_rules
if rule not in rules
]
new_rules = [
rule for rule in rules
if rule not in old_rules
]
for new_rule in new_rules:
assert isinstance(new_rule, Rule)
self._exec_command(new_rule.add_command)
for to_delete_rule in to_delete_rules:
assert isinstance(to_delete_rule, Rule)
self._exec_command(
to_delete_rule.remove_command
)
self._update(rules)
def _update(self, rules: list):
"""
Updates the given rules and stores
them on the router.
"""
self._rules = rules
to_store = '\n'.join(
rule.config_string
for rule in rules
)
sftp_connection = self._sftp_connection
with sftp_connection.open(self.RULE_PATH, mode='w') as file_handle:
file_handle.write(to_store)
|
cbrand/vpnchooser | src/vpnchooser/query/rules_query.py | RulesQuery.sync | python | def sync(self, rules: list):
self._reset()
old_rules = self.rules
to_delete_rules = [
rule for rule in old_rules
if rule not in rules
]
new_rules = [
rule for rule in rules
if rule not in old_rules
]
for new_rule in new_rules:
assert isinstance(new_rule, Rule)
self._exec_command(new_rule.add_command)
for to_delete_rule in to_delete_rules:
assert isinstance(to_delete_rule, Rule)
self._exec_command(
to_delete_rule.remove_command
)
self._update(rules) | Synchronizes the given rules with the server
and ensures that there are no old rules active
which are not in the given list. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/query/rules_query.py#L82-L107 | [
"def _reset(self):\n \"\"\"\n Resets the internal state to download\n the data from the router of the currently\n valid rules.\n \"\"\"\n self._rules = None\n",
"def _exec_command(self, command: str):\n \"\"\"\n Executes the command and closes the handles\n afterwards.\n \"\"\"\n stdin, stdout, stderr = self._ssh.exec_command(command)\n # Clearing the buffers\n stdout.read()\n stderr.read()\n stdin.close()\n",
"def _update(self, rules: list):\n \"\"\"\n Updates the given rules and stores\n them on the router.\n \"\"\"\n self._rules = rules\n to_store = '\\n'.join(\n rule.config_string\n for rule in rules\n )\n sftp_connection = self._sftp_connection\n with sftp_connection.open(self.RULE_PATH, mode='w') as file_handle:\n file_handle.write(to_store)\n"
] | class RulesQuery(object):
RULE_PATH = None
def __init__(self, connection: SSHClient):
self._ssh = connection
self._sftp = None
self._reset()
def _load_rules(self):
"""
Loads the rules from the SSH-Connection
"""
with self._sftp_connection.open(self.RULE_PATH) as file:
data = file.read()
lines = (
line.strip()
for line in data.split('\n')
)
rule_strings = (
line for line in lines
if len(line) > 0
)
rules = (
Rule.parse(rule_string)
for rule_string in rule_strings
)
self._rules = [
rule
for rule in rules
if rule is not None
]
def _reset(self):
"""
Resets the internal state to download
the data from the router of the currently
valid rules.
"""
self._rules = None
@property
def rules(self):
if self._rules is None:
self._load_rules()
return self._rules
@property
def _sftp_connection(self) -> SFTPClient:
if self._sftp is None:
self._sftp = self._ssh.open_sftp()
return self._sftp
def __del__(self):
try:
self._sftp.close()
except Exception:
pass
try:
self._ssh.close()
except Exception:
pass
def _exec_command(self, command: str):
"""
Executes the command and closes the handles
afterwards.
"""
stdin, stdout, stderr = self._ssh.exec_command(command)
# Clearing the buffers
stdout.read()
stderr.read()
stdin.close()
def _update(self, rules: list):
"""
Updates the given rules and stores
them on the router.
"""
self._rules = rules
to_store = '\n'.join(
rule.config_string
for rule in rules
)
sftp_connection = self._sftp_connection
with sftp_connection.open(self.RULE_PATH, mode='w') as file_handle:
file_handle.write(to_store)
|
cbrand/vpnchooser | src/vpnchooser/query/rules_query.py | RulesQuery._update | python | def _update(self, rules: list):
self._rules = rules
to_store = '\n'.join(
rule.config_string
for rule in rules
)
sftp_connection = self._sftp_connection
with sftp_connection.open(self.RULE_PATH, mode='w') as file_handle:
file_handle.write(to_store) | Updates the given rules and stores
them on the router. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/query/rules_query.py#L109-L121 | null | class RulesQuery(object):
RULE_PATH = None
def __init__(self, connection: SSHClient):
self._ssh = connection
self._sftp = None
self._reset()
def _load_rules(self):
"""
Loads the rules from the SSH-Connection
"""
with self._sftp_connection.open(self.RULE_PATH) as file:
data = file.read()
lines = (
line.strip()
for line in data.split('\n')
)
rule_strings = (
line for line in lines
if len(line) > 0
)
rules = (
Rule.parse(rule_string)
for rule_string in rule_strings
)
self._rules = [
rule
for rule in rules
if rule is not None
]
def _reset(self):
"""
Resets the internal state to download
the data from the router of the currently
valid rules.
"""
self._rules = None
@property
def rules(self):
if self._rules is None:
self._load_rules()
return self._rules
@property
def _sftp_connection(self) -> SFTPClient:
if self._sftp is None:
self._sftp = self._ssh.open_sftp()
return self._sftp
def __del__(self):
try:
self._sftp.close()
except Exception:
pass
try:
self._ssh.close()
except Exception:
pass
def _exec_command(self, command: str):
"""
Executes the command and closes the handles
afterwards.
"""
stdin, stdout, stderr = self._ssh.exec_command(command)
# Clearing the buffers
stdout.read()
stderr.read()
stdin.close()
def sync(self, rules: list):
"""
Synchronizes the given rules with the server
and ensures that there are no old rules active
which are not in the given list.
"""
self._reset()
old_rules = self.rules
to_delete_rules = [
rule for rule in old_rules
if rule not in rules
]
new_rules = [
rule for rule in rules
if rule not in old_rules
]
for new_rule in new_rules:
assert isinstance(new_rule, Rule)
self._exec_command(new_rule.add_command)
for to_delete_rule in to_delete_rules:
assert isinstance(to_delete_rule, Rule)
self._exec_command(
to_delete_rule.remove_command
)
self._update(rules)
|
cbrand/vpnchooser | src/vpnchooser/cli/configuration_generator.py | ConfigurationGenerator.missing_host_key | python | def missing_host_key(self, client, hostname, key):
self.host_key = key.get_base64()
print("Fetched key is: %s" % self.host_key)
return | Called when an `.SSHClient` receives a server key for a server that
isn't in either the system or local `.HostKeys` object. To accept
the key, simply return. To reject, raised an exception (which will
be passed to the calling application). | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/cli/configuration_generator.py#L77-L86 | null | class ConfigurationGenerator(MissingHostKeyPolicy):
"""
Generates a config file from given input.
"""
def __init__(self, file_location):
self.file_location = file_location
def request_host(self):
self.host = input("Router SSH host: ")
def request_username(self):
self.username = input("Router SSH username: ") or "root"
def request_password(self):
self.password = getpass("Router SSH password: ")
def request_database(self):
print("(see http://docs.sqlalchemy.org/en/latest/core/engines.html )")
self.database = input("Database url (sqlite:///test.db): ")
def request_redis(self):
redis = input("Host and Port for redis (127.0.0.1:6379): ")
self.redis = redis or "127.0.0.1:6379"
def request_data(self):
self.request_host()
self.request_username()
self.request_password()
self.request_database()
self.request_redis()
def generate(self):
print("Generating Configuration file")
self.request_data()
print("Connecting host to get ssh host key and check your "
"credentials.")
self.verify()
self.write_config()
def write_config(self):
config_bytes = resource_string('vpnchooser', 'template.cfg')
config = config_bytes.decode('utf-8')
written_config = config % dict(
username=self.username,
password=self.password,
host=self.host,
host_key=self.host_key,
broker_url=self.redis,
database=self.database
)
with open(self.file_location, 'w') as f:
f.write(written_config)
def verify(self):
client = SSHClient()
client.set_missing_host_key_policy(self)
client.connect(
self.host,
username=self.username,
password=self.password,
)
|
cbrand/vpnchooser | src/vpnchooser/resources/vpn.py | VpnResource.put | python | def put(self, vpn_id: int) -> Vpn:
vpn = self._get_or_abort(vpn_id)
self.update(vpn)
session.commit()
return vpn | Updates the Vpn Resource with the
name. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/resources/vpn.py#L88-L96 | null | class VpnResource(AbstractVpnResource):
"""
The resource to access a vpn resource.
"""
@staticmethod
def _get_by_name(vpn_id: int) -> Vpn:
return session.query(Vpn).filter(
Vpn.id == vpn_id
).first()
def _get_or_abort(self, vpn_id: int):
vpn = self._get_by_name(vpn_id)
if vpn is None:
abort(404)
else:
pass
return vpn
@require_login
@marshal_with(resource_fields)
def get(self, vpn_id: int) -> Vpn:
"""
Gets the VPN Resource.
"""
return self._get_or_abort(vpn_id)
@require_admin
@marshal_with(resource_fields)
@require_admin
def delete(self, vpn_id: int):
"""
Deletes the resource with the given name.
"""
vpn = self._get_or_abort(vpn_id)
session.delete(vpn)
session.commit()
return '', 204
|
cbrand/vpnchooser | src/vpnchooser/resources/vpn.py | VpnResource.delete | python | def delete(self, vpn_id: int):
vpn = self._get_or_abort(vpn_id)
session.delete(vpn)
session.commit()
return '', 204 | Deletes the resource with the given name. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/resources/vpn.py#L99-L106 | null | class VpnResource(AbstractVpnResource):
"""
The resource to access a vpn resource.
"""
@staticmethod
def _get_by_name(vpn_id: int) -> Vpn:
return session.query(Vpn).filter(
Vpn.id == vpn_id
).first()
def _get_or_abort(self, vpn_id: int):
vpn = self._get_by_name(vpn_id)
if vpn is None:
abort(404)
else:
pass
return vpn
@require_login
@marshal_with(resource_fields)
def get(self, vpn_id: int) -> Vpn:
"""
Gets the VPN Resource.
"""
return self._get_or_abort(vpn_id)
@require_admin
@marshal_with(resource_fields)
def put(self, vpn_id: int) -> Vpn:
"""
Updates the Vpn Resource with the
name.
"""
vpn = self._get_or_abort(vpn_id)
self.update(vpn)
session.commit()
return vpn
@require_admin
|
cbrand/vpnchooser | src/vpnchooser/resources/vpn.py | VpnListResource.post | python | def post(self) -> Vpn:
vpn = Vpn()
session.add(vpn)
self.update(vpn)
session.flush()
session.commit()
return vpn, 201, {
'Location': url_for('vpn', vpn_id=vpn.id)
} | Creates the vpn with the given data. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/resources/vpn.py#L121-L132 | null | class VpnListResource(AbstractVpnResource):
"""
List resource for the vpn.
"""
@require_login
@marshal_with(resource_fields)
def get(self):
return list(session.query(Vpn))
@require_admin
@marshal_with(resource_fields)
|
cbrand/vpnchooser | src/vpnchooser/helpers/parser.py | id_from_url | python | def id_from_url(url, param_name: str) -> int:
if url is None:
raise ValueError('url is none')
elif isinstance(url, int):
# Seems to already be the url.
return url
if not url:
raise ValueError('Seems to be empty')
try:
return int(url)
except ValueError:
pass
parsed = urlparse(url)
try:
resource_url = app.url_map.bind(parsed.netloc).match(
parsed.path
)
except NotFound:
raise ValueError('No URL found')
if param_name in resource_url[1]:
return resource_url[1][param_name]
else:
raise ValueError(
'Parameter {name} could not be extracted'.format(
name=param_name
)
) | Parses an object and tries to extract a url.
Tries to parse if a resource_url has been given
it as a url.
:raise ValueError: If no id could be extracted. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/helpers/parser.py#L9-L44 | null | # -*- encoding: utf-8 -*-
from urllib.parse import urlparse
from werkzeug.exceptions import NotFound
from vpnchooser.applicaton import app
|
cbrand/vpnchooser | src/vpnchooser/resources/user.py | UserResource.get | python | def get(self, user_name: str) -> User:
user = current_user()
if user.is_admin or user.name == user_name:
return self._get_or_abort(user_name)
else:
abort(403) | Gets the User Resource. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/resources/user.py#L83-L91 | [
"def current_user() -> User:\n return getattr(g, 'user', None)\n"
] | class UserResource(AbstractUserResource):
"""
The resource to access a user resource.
"""
def _get_or_abort(self, user_name: str):
user = self._get_by_username(user_name)
if user is None:
abort(404)
else:
pass
return user
@require_login
@marshal_with(resource_fields)
@require_login
@marshal_with(resource_fields)
def put(self, user_name: str) -> User:
"""
Updates the User Resource with the
name.
"""
current = current_user()
if current.name == user_name or current.is_admin:
user = self._get_or_abort(user_name)
self.update(user)
session.commit()
session.add(user)
return user
else:
abort(403)
@require_admin
def delete(self, user_name: str):
"""
Deletes the resource with the given name.
"""
user = self._get_or_abort(user_name)
session.delete(user)
session.commit()
return '', 204
|
cbrand/vpnchooser | src/vpnchooser/resources/user.py | UserResource.put | python | def put(self, user_name: str) -> User:
current = current_user()
if current.name == user_name or current.is_admin:
user = self._get_or_abort(user_name)
self.update(user)
session.commit()
session.add(user)
return user
else:
abort(403) | Updates the User Resource with the
name. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/resources/user.py#L95-L108 | [
"def current_user() -> User:\n return getattr(g, 'user', None)\n"
] | class UserResource(AbstractUserResource):
"""
The resource to access a user resource.
"""
def _get_or_abort(self, user_name: str):
user = self._get_by_username(user_name)
if user is None:
abort(404)
else:
pass
return user
@require_login
@marshal_with(resource_fields)
def get(self, user_name: str) -> User:
"""
Gets the User Resource.
"""
user = current_user()
if user.is_admin or user.name == user_name:
return self._get_or_abort(user_name)
else:
abort(403)
@require_login
@marshal_with(resource_fields)
@require_admin
def delete(self, user_name: str):
"""
Deletes the resource with the given name.
"""
user = self._get_or_abort(user_name)
session.delete(user)
session.commit()
return '', 204
|
cbrand/vpnchooser | src/vpnchooser/resources/user.py | UserResource.delete | python | def delete(self, user_name: str):
user = self._get_or_abort(user_name)
session.delete(user)
session.commit()
return '', 204 | Deletes the resource with the given name. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/resources/user.py#L111-L118 | null | class UserResource(AbstractUserResource):
"""
The resource to access a user resource.
"""
def _get_or_abort(self, user_name: str):
user = self._get_by_username(user_name)
if user is None:
abort(404)
else:
pass
return user
@require_login
@marshal_with(resource_fields)
def get(self, user_name: str) -> User:
"""
Gets the User Resource.
"""
user = current_user()
if user.is_admin or user.name == user_name:
return self._get_or_abort(user_name)
else:
abort(403)
@require_login
@marshal_with(resource_fields)
def put(self, user_name: str) -> User:
"""
Updates the User Resource with the
name.
"""
current = current_user()
if current.name == user_name or current.is_admin:
user = self._get_or_abort(user_name)
self.update(user)
session.commit()
session.add(user)
return user
else:
abort(403)
@require_admin
|
cbrand/vpnchooser | src/vpnchooser/helpers/decorators.py | require_login | python | def require_login(func):
@wraps(func)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth:
return authenticate()
user = session.query(User).filter(
User.name == auth.username
).first()
if user and user.check(auth.password):
g.user = user
return func(*args, **kwargs)
else:
return authenticate()
return decorated | Function wrapper to signalize that a login is required. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/helpers/decorators.py#L21-L41 | null | # -*- encoding: utf-8 -*-
from functools import wraps
from flask import request, Response, g
from vpnchooser.db import session, User
from .auth import current_user
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
# {'WWW-Authenticate': 'Basic realm="Login Required"'}
{}
)
def require_admin(func):
"""
Requires an admin user to access this resource.
"""
@wraps(func)
@require_login
def decorated(*args, **kwargs):
user = current_user()
if user and user.is_admin:
return func(*args, **kwargs)
else:
return Response(
'Forbidden', 403
)
return decorated
|
cbrand/vpnchooser | src/vpnchooser/helpers/decorators.py | require_admin | python | def require_admin(func):
@wraps(func)
@require_login
def decorated(*args, **kwargs):
user = current_user()
if user and user.is_admin:
return func(*args, **kwargs)
else:
return Response(
'Forbidden', 403
)
return decorated | Requires an admin user to access this resource. | train | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/helpers/decorators.py#L44-L60 | [
"def require_login(func):\n \"\"\"\n Function wrapper to signalize that a login is required.\n \"\"\"\n\n @wraps(func)\n def decorated(*args, **kwargs):\n auth = request.authorization\n if not auth:\n return authenticate()\n\n user = session.query(User).filter(\n User.name == auth.username\n ).first()\n if user and user.check(auth.password):\n g.user = user\n return func(*args, **kwargs)\n else:\n return authenticate()\n\n return decorated\n"
] | # -*- encoding: utf-8 -*-
from functools import wraps
from flask import request, Response, g
from vpnchooser.db import session, User
from .auth import current_user
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
# {'WWW-Authenticate': 'Basic realm="Login Required"'}
{}
)
def require_login(func):
"""
Function wrapper to signalize that a login is required.
"""
@wraps(func)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth:
return authenticate()
user = session.query(User).filter(
User.name == auth.username
).first()
if user and user.check(auth.password):
g.user = user
return func(*args, **kwargs)
else:
return authenticate()
return decorated
|
Cologler/fsoopify-python | fsoopify/paths.py | Path.from_caller_file | python | def from_caller_file():
'''return a `Path` from the path of caller file'''
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
filename = calframe[1].filename
if not os.path.isfile(filename):
raise RuntimeError('caller is not a file')
return Path(filename) | return a `Path` from the path of caller file | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/paths.py#L152-L160 | null | class Path(PathComponent):
join = staticmethod(_join)
def __new__(cls, value):
if not isinstance(value, str):
raise TypeError
if cls is Path:
if _is_abspath(value):
cls = _AbsPath
else:
cls = _RelPath
path = str.__new__(cls, value)
return path
def __init__(self, val):
super().__init__(val)
# sub attrs
self._dirname = None
self._name = None
@staticmethod
def from_cwd():
'''return a `Path` from `os.getcwd()`'''
return Path(os.getcwd())
@staticmethod
def from_home():
'''return a `Path` from `os.path.expanduser("~")`'''
return Path(os.path.expanduser("~"))
@staticmethod
def from_argv(index=0):
'''return a `Path` from `sys.argv`'''
return Path(sys.argv[index])
@staticmethod
@staticmethod
def from_caller_module_root():
'''return a `Path` from module root which include the caller'''
import inspect
all_stack = list(inspect.stack())
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
module = inspect.getmodule(calframe[1].frame)
if not module:
raise RuntimeError('caller is not a module')
root_module_name = module.__name__.partition('.')[0]
fullpath = sys.modules[root_module_name].__file__
return Path(fullpath)
def __repr__(self):
return 'Path(\'{}\')'.format(self)
def __truediv__(self, right: str):
if not isinstance(right, str):
raise TypeError
path = type(self)(os.path.join(self, right))
dn, fn = os.path.split(right)
if not dn:
path._dirname = self
path._name = Name(right)
return path
@property
def dirname(self):
'''
get directory component from path.
return `None` if no parent.
'''
self._init_dirname_attr()
return self._dirname
@property
def name(self) -> Name:
''' get name component from path. '''
self._init_dirname_attr()
return self._name
@property
def pure_name(self) -> PathComponent:
''' get name without ext from path. '''
return self.name.pure_name
@property
def ext(self) -> PathComponent:
''' get ext from path. '''
return self.name.ext
def replace_dirname(self, val):
if not isinstance(val, str):
raise TypeError
return Path(os.path.join(val, self.name))
def replace_name(self, val):
if not isinstance(val, str):
raise TypeError
return Path(os.path.join(self.dirname, val))
def replace_pure_name(self, val):
return Path(os.path.join(self.dirname, self.name.replace_pure_name(val)))
def replace_ext(self, val):
return Path(os.path.join(self.dirname, self.name.replace_ext(val)))
def as_file(self):
from .nodes import FileInfo
return FileInfo(self)
def as_dir(self):
from .nodes import DirectoryInfo
return DirectoryInfo(self)
def get_parent(self, level: int = 1):
if not isinstance(level, int):
raise TypeError
if level < 1:
raise ValueError('level must large then 1')
return self._get_parent(level)
def _get_parent(self, level: int):
raise NotImplementedError
def _init_dirname_attr(self):
raise NotImplementedError
def is_abspath(self):
raise NotImplementedError
def get_abspath(self):
raise NotImplementedError
|
Cologler/fsoopify-python | fsoopify/paths.py | Path.from_caller_module_root | python | def from_caller_module_root():
'''return a `Path` from module root which include the caller'''
import inspect
all_stack = list(inspect.stack())
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
module = inspect.getmodule(calframe[1].frame)
if not module:
raise RuntimeError('caller is not a module')
root_module_name = module.__name__.partition('.')[0]
fullpath = sys.modules[root_module_name].__file__
return Path(fullpath) | return a `Path` from module root which include the caller | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/paths.py#L163-L174 | null | class Path(PathComponent):
join = staticmethod(_join)
def __new__(cls, value):
if not isinstance(value, str):
raise TypeError
if cls is Path:
if _is_abspath(value):
cls = _AbsPath
else:
cls = _RelPath
path = str.__new__(cls, value)
return path
def __init__(self, val):
super().__init__(val)
# sub attrs
self._dirname = None
self._name = None
@staticmethod
def from_cwd():
'''return a `Path` from `os.getcwd()`'''
return Path(os.getcwd())
@staticmethod
def from_home():
'''return a `Path` from `os.path.expanduser("~")`'''
return Path(os.path.expanduser("~"))
@staticmethod
def from_argv(index=0):
'''return a `Path` from `sys.argv`'''
return Path(sys.argv[index])
@staticmethod
def from_caller_file():
'''return a `Path` from the path of caller file'''
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
filename = calframe[1].filename
if not os.path.isfile(filename):
raise RuntimeError('caller is not a file')
return Path(filename)
@staticmethod
def __repr__(self):
return 'Path(\'{}\')'.format(self)
def __truediv__(self, right: str):
if not isinstance(right, str):
raise TypeError
path = type(self)(os.path.join(self, right))
dn, fn = os.path.split(right)
if not dn:
path._dirname = self
path._name = Name(right)
return path
@property
def dirname(self):
'''
get directory component from path.
return `None` if no parent.
'''
self._init_dirname_attr()
return self._dirname
@property
def name(self) -> Name:
''' get name component from path. '''
self._init_dirname_attr()
return self._name
@property
def pure_name(self) -> PathComponent:
''' get name without ext from path. '''
return self.name.pure_name
@property
def ext(self) -> PathComponent:
''' get ext from path. '''
return self.name.ext
def replace_dirname(self, val):
if not isinstance(val, str):
raise TypeError
return Path(os.path.join(val, self.name))
def replace_name(self, val):
if not isinstance(val, str):
raise TypeError
return Path(os.path.join(self.dirname, val))
def replace_pure_name(self, val):
return Path(os.path.join(self.dirname, self.name.replace_pure_name(val)))
def replace_ext(self, val):
return Path(os.path.join(self.dirname, self.name.replace_ext(val)))
def as_file(self):
from .nodes import FileInfo
return FileInfo(self)
def as_dir(self):
from .nodes import DirectoryInfo
return DirectoryInfo(self)
def get_parent(self, level: int = 1):
if not isinstance(level, int):
raise TypeError
if level < 1:
raise ValueError('level must large then 1')
return self._get_parent(level)
def _get_parent(self, level: int):
raise NotImplementedError
def _init_dirname_attr(self):
raise NotImplementedError
def is_abspath(self):
raise NotImplementedError
def get_abspath(self):
raise NotImplementedError
|
Cologler/fsoopify-python | fsoopify/nodes.py | NodeInfo.rename | python | def rename(self, dest_path: str):
'''
use `os.rename()` to move the node.
'''
if not isinstance(dest_path, str):
raise TypeError
os.rename(self._path, dest_path)
self._path = Path(dest_path).get_abspath() | use `os.rename()` to move the node. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L43-L50 | null | class NodeInfo(ABC):
''' the abstract base class for file system node. '''
def __init__(self, path):
# path alwasys be abs
self._path: Path = Path(path).get_abspath()
def __str__(self):
return str(self._path)
def __repr__(self):
return '{}(\'{}\')'.format(type(self).__name__, self._path)
@property
def path(self) -> Path:
'''
return a Path object.
'''
return self._path
def get_parent(self, level=1):
'''
get parent dir as a `DirectoryInfo`.
return `None` if self is top.
'''
try:
parent_path = self.path.get_parent(level)
except ValueError: # abspath cannot get parent
return None
assert parent_path
return DirectoryInfo(parent_path)
@staticmethod
def from_path(path):
'''
create from path.
return `None` if path is not exists.
'''
if os.path.isdir(path):
return DirectoryInfo(path)
if os.path.isfile(path):
return FileInfo(path)
return None
@staticmethod
def from_cwd():
'''
get a `DirectoryInfo` by `os.getcwd()`
'''
return DirectoryInfo(os.getcwd())
@staticmethod
def from_argv0():
'''
get a `FileInfo` by `sys.argv[0]`
'''
return FileInfo(sys.argv[0])
# common methods
@property
@abstractmethod
def node_type(self):
raise NotImplementedError
def is_exists(self):
'''
get whether the node is exists on disk.
'''
return os.path.exists(self._path)
def is_directory(self):
'''
get whether the node is a exists directory.
'''
return False
def is_file(self):
'''
get whether the node is a exists file.
'''
return False
# abstract methods
@abstractmethod
def delete(self):
''' remove the node from disk. '''
raise NotImplementedError
@abstractmethod
def create_hardlink(self, dest_path: str):
''' create hardlink for the node. '''
raise NotImplementedError
|
Cologler/fsoopify-python | fsoopify/nodes.py | NodeInfo.get_parent | python | def get_parent(self, level=1):
'''
get parent dir as a `DirectoryInfo`.
return `None` if self is top.
'''
try:
parent_path = self.path.get_parent(level)
except ValueError: # abspath cannot get parent
return None
assert parent_path
return DirectoryInfo(parent_path) | get parent dir as a `DirectoryInfo`.
return `None` if self is top. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L52-L63 | null | class NodeInfo(ABC):
''' the abstract base class for file system node. '''
def __init__(self, path):
# path alwasys be abs
self._path: Path = Path(path).get_abspath()
def __str__(self):
return str(self._path)
def __repr__(self):
return '{}(\'{}\')'.format(type(self).__name__, self._path)
@property
def path(self) -> Path:
'''
return a Path object.
'''
return self._path
def rename(self, dest_path: str):
'''
use `os.rename()` to move the node.
'''
if not isinstance(dest_path, str):
raise TypeError
os.rename(self._path, dest_path)
self._path = Path(dest_path).get_abspath()
@staticmethod
def from_path(path):
'''
create from path.
return `None` if path is not exists.
'''
if os.path.isdir(path):
return DirectoryInfo(path)
if os.path.isfile(path):
return FileInfo(path)
return None
@staticmethod
def from_cwd():
'''
get a `DirectoryInfo` by `os.getcwd()`
'''
return DirectoryInfo(os.getcwd())
@staticmethod
def from_argv0():
'''
get a `FileInfo` by `sys.argv[0]`
'''
return FileInfo(sys.argv[0])
# common methods
@property
@abstractmethod
def node_type(self):
raise NotImplementedError
def is_exists(self):
'''
get whether the node is exists on disk.
'''
return os.path.exists(self._path)
def is_directory(self):
'''
get whether the node is a exists directory.
'''
return False
def is_file(self):
'''
get whether the node is a exists file.
'''
return False
# abstract methods
@abstractmethod
def delete(self):
''' remove the node from disk. '''
raise NotImplementedError
@abstractmethod
def create_hardlink(self, dest_path: str):
''' create hardlink for the node. '''
raise NotImplementedError
|
Cologler/fsoopify-python | fsoopify/nodes.py | NodeInfo.from_path | python | def from_path(path):
'''
create from path.
return `None` if path is not exists.
'''
if os.path.isdir(path):
return DirectoryInfo(path)
if os.path.isfile(path):
return FileInfo(path)
return None | create from path.
return `None` if path is not exists. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L66-L78 | null | class NodeInfo(ABC):
''' the abstract base class for file system node. '''
def __init__(self, path):
# path alwasys be abs
self._path: Path = Path(path).get_abspath()
def __str__(self):
return str(self._path)
def __repr__(self):
return '{}(\'{}\')'.format(type(self).__name__, self._path)
@property
def path(self) -> Path:
'''
return a Path object.
'''
return self._path
def rename(self, dest_path: str):
'''
use `os.rename()` to move the node.
'''
if not isinstance(dest_path, str):
raise TypeError
os.rename(self._path, dest_path)
self._path = Path(dest_path).get_abspath()
def get_parent(self, level=1):
'''
get parent dir as a `DirectoryInfo`.
return `None` if self is top.
'''
try:
parent_path = self.path.get_parent(level)
except ValueError: # abspath cannot get parent
return None
assert parent_path
return DirectoryInfo(parent_path)
@staticmethod
@staticmethod
def from_cwd():
'''
get a `DirectoryInfo` by `os.getcwd()`
'''
return DirectoryInfo(os.getcwd())
@staticmethod
def from_argv0():
'''
get a `FileInfo` by `sys.argv[0]`
'''
return FileInfo(sys.argv[0])
# common methods
@property
@abstractmethod
def node_type(self):
raise NotImplementedError
def is_exists(self):
'''
get whether the node is exists on disk.
'''
return os.path.exists(self._path)
def is_directory(self):
'''
get whether the node is a exists directory.
'''
return False
def is_file(self):
'''
get whether the node is a exists file.
'''
return False
# abstract methods
@abstractmethod
def delete(self):
''' remove the node from disk. '''
raise NotImplementedError
@abstractmethod
def create_hardlink(self, dest_path: str):
''' create hardlink for the node. '''
raise NotImplementedError
|
Cologler/fsoopify-python | fsoopify/nodes.py | FileInfo.open | python | def open(self, mode='r', *, buffering=-1, encoding=None, newline=None, closefd=True):
''' open the file. '''
return open(self._path,
mode=mode,
buffering=buffering,
encoding=encoding,
newline=newline,
closefd=closefd) | open the file. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L134-L141 | null | class FileInfo(NodeInfo):
@property
def size(self):
''' get file size. '''
return Size(os.path.getsize(self.path))
def write(self, data, *, mode=None, buffering=-1, encoding=None, newline=None):
''' write data into the file. '''
if mode is None:
mode = 'w' if isinstance(data, str) else 'wb'
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.write(data)
def read(self, mode='r', *, buffering=-1, encoding=None, newline=None):
''' read data from the file. '''
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.read()
def write_text(self, text: str, *, encoding='utf-8', append=True):
''' write text into the file. '''
mode = 'a' if append else 'w'
return self.write(text, mode=mode, encoding=encoding)
def write_bytes(self, data: bytes, *, append=True):
''' write bytes into the file. '''
mode = 'ab' if append else 'wb'
return self.write(data, mode=mode)
def copy_to(self, dest, buffering: int = -1):
'''
copy the file to dest path.
`dest` canbe `str`, `FileInfo` or `DirectoryInfo`.
if `dest` is `DirectoryInfo`, that mean copy into the dir with same name.
'''
if isinstance(dest, str):
dest_path = dest
elif isinstance(dest, FileInfo):
dest_path = dest.path
elif isinstance(dest, DirectoryInfo):
dest_path = dest.path / self.path.name
else:
raise TypeError('dest is not one of `str`, `FileInfo`, `DirectoryInfo`')
with open(self._path, 'rb', buffering=buffering) as source:
# use x mode to ensure dest does not exists.
with open(dest_path, 'xb') as dest_file:
for buffer in source:
dest_file.write(buffer)
def read_text(self, encoding='utf-8') -> str:
''' read all text into memory. '''
with self.open('r', encoding=encoding) as fp:
return fp.read()
def read_bytes(self) -> bytes:
''' read all bytes into memory. '''
with self.open('rb') as fp:
return fp.read()
# override common methods
@property
def node_type(self):
return NodeType.file
def is_exists(self) -> bool:
return self.is_file()
def is_file(self) -> bool:
''' check if this is a exists file. '''
return os.path.isfile(self._path)
# override @abstractmethod
def delete(self):
''' remove the file from disk. '''
os.remove(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the file. '''
os.link(self._path, dest_path)
# load/dump system.
def load(self, format=None, *, kwargs={}):
'''
deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return load(self, format=format, kwargs=kwargs)
def dump(self, obj, format=None, *, kwargs={}):
'''
serialize the `obj` into file.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return dump(self, obj, format=format, kwargs=kwargs)
# hash system
def get_file_hash(self, *algorithms: str):
'''
get lower case hash of file.
return value is a tuple, you may need to unpack it.
for example: `get_file_hash('md5', 'sha1')` return `('XXXX1', 'XXXX2')`
'''
from .hashs import hashfile_hexdigest
return hashfile_hexdigest(self._path, algorithms)
|
Cologler/fsoopify-python | fsoopify/nodes.py | FileInfo.write | python | def write(self, data, *, mode=None, buffering=-1, encoding=None, newline=None):
''' write data into the file. '''
if mode is None:
mode = 'w' if isinstance(data, str) else 'wb'
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.write(data) | write data into the file. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L148-L153 | [
"def open(self, mode='r', *, buffering=-1, encoding=None, newline=None, closefd=True):\n ''' open the file. '''\n return open(self._path,\n mode=mode,\n buffering=buffering,\n encoding=encoding,\n newline=newline,\n closefd=closefd)\n"
] | class FileInfo(NodeInfo):
def open(self, mode='r', *, buffering=-1, encoding=None, newline=None, closefd=True):
''' open the file. '''
return open(self._path,
mode=mode,
buffering=buffering,
encoding=encoding,
newline=newline,
closefd=closefd)
@property
def size(self):
''' get file size. '''
return Size(os.path.getsize(self.path))
def read(self, mode='r', *, buffering=-1, encoding=None, newline=None):
''' read data from the file. '''
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.read()
def write_text(self, text: str, *, encoding='utf-8', append=True):
''' write text into the file. '''
mode = 'a' if append else 'w'
return self.write(text, mode=mode, encoding=encoding)
def write_bytes(self, data: bytes, *, append=True):
''' write bytes into the file. '''
mode = 'ab' if append else 'wb'
return self.write(data, mode=mode)
def copy_to(self, dest, buffering: int = -1):
'''
copy the file to dest path.
`dest` canbe `str`, `FileInfo` or `DirectoryInfo`.
if `dest` is `DirectoryInfo`, that mean copy into the dir with same name.
'''
if isinstance(dest, str):
dest_path = dest
elif isinstance(dest, FileInfo):
dest_path = dest.path
elif isinstance(dest, DirectoryInfo):
dest_path = dest.path / self.path.name
else:
raise TypeError('dest is not one of `str`, `FileInfo`, `DirectoryInfo`')
with open(self._path, 'rb', buffering=buffering) as source:
# use x mode to ensure dest does not exists.
with open(dest_path, 'xb') as dest_file:
for buffer in source:
dest_file.write(buffer)
def read_text(self, encoding='utf-8') -> str:
''' read all text into memory. '''
with self.open('r', encoding=encoding) as fp:
return fp.read()
def read_bytes(self) -> bytes:
''' read all bytes into memory. '''
with self.open('rb') as fp:
return fp.read()
# override common methods
@property
def node_type(self):
return NodeType.file
def is_exists(self) -> bool:
return self.is_file()
def is_file(self) -> bool:
''' check if this is a exists file. '''
return os.path.isfile(self._path)
# override @abstractmethod
def delete(self):
''' remove the file from disk. '''
os.remove(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the file. '''
os.link(self._path, dest_path)
# load/dump system.
def load(self, format=None, *, kwargs={}):
'''
deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return load(self, format=format, kwargs=kwargs)
def dump(self, obj, format=None, *, kwargs={}):
'''
serialize the `obj` into file.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return dump(self, obj, format=format, kwargs=kwargs)
# hash system
def get_file_hash(self, *algorithms: str):
'''
get lower case hash of file.
return value is a tuple, you may need to unpack it.
for example: `get_file_hash('md5', 'sha1')` return `('XXXX1', 'XXXX2')`
'''
from .hashs import hashfile_hexdigest
return hashfile_hexdigest(self._path, algorithms)
|
Cologler/fsoopify-python | fsoopify/nodes.py | FileInfo.read | python | def read(self, mode='r', *, buffering=-1, encoding=None, newline=None):
''' read data from the file. '''
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.read() | read data from the file. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L155-L158 | [
"def open(self, mode='r', *, buffering=-1, encoding=None, newline=None, closefd=True):\n ''' open the file. '''\n return open(self._path,\n mode=mode,\n buffering=buffering,\n encoding=encoding,\n newline=newline,\n closefd=closefd)\n"
] | class FileInfo(NodeInfo):
def open(self, mode='r', *, buffering=-1, encoding=None, newline=None, closefd=True):
''' open the file. '''
return open(self._path,
mode=mode,
buffering=buffering,
encoding=encoding,
newline=newline,
closefd=closefd)
@property
def size(self):
''' get file size. '''
return Size(os.path.getsize(self.path))
def write(self, data, *, mode=None, buffering=-1, encoding=None, newline=None):
''' write data into the file. '''
if mode is None:
mode = 'w' if isinstance(data, str) else 'wb'
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.write(data)
def write_text(self, text: str, *, encoding='utf-8', append=True):
''' write text into the file. '''
mode = 'a' if append else 'w'
return self.write(text, mode=mode, encoding=encoding)
def write_bytes(self, data: bytes, *, append=True):
''' write bytes into the file. '''
mode = 'ab' if append else 'wb'
return self.write(data, mode=mode)
def copy_to(self, dest, buffering: int = -1):
'''
copy the file to dest path.
`dest` canbe `str`, `FileInfo` or `DirectoryInfo`.
if `dest` is `DirectoryInfo`, that mean copy into the dir with same name.
'''
if isinstance(dest, str):
dest_path = dest
elif isinstance(dest, FileInfo):
dest_path = dest.path
elif isinstance(dest, DirectoryInfo):
dest_path = dest.path / self.path.name
else:
raise TypeError('dest is not one of `str`, `FileInfo`, `DirectoryInfo`')
with open(self._path, 'rb', buffering=buffering) as source:
# use x mode to ensure dest does not exists.
with open(dest_path, 'xb') as dest_file:
for buffer in source:
dest_file.write(buffer)
def read_text(self, encoding='utf-8') -> str:
''' read all text into memory. '''
with self.open('r', encoding=encoding) as fp:
return fp.read()
def read_bytes(self) -> bytes:
''' read all bytes into memory. '''
with self.open('rb') as fp:
return fp.read()
# override common methods
@property
def node_type(self):
return NodeType.file
def is_exists(self) -> bool:
return self.is_file()
def is_file(self) -> bool:
''' check if this is a exists file. '''
return os.path.isfile(self._path)
# override @abstractmethod
def delete(self):
''' remove the file from disk. '''
os.remove(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the file. '''
os.link(self._path, dest_path)
# load/dump system.
def load(self, format=None, *, kwargs={}):
'''
deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return load(self, format=format, kwargs=kwargs)
def dump(self, obj, format=None, *, kwargs={}):
'''
serialize the `obj` into file.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return dump(self, obj, format=format, kwargs=kwargs)
# hash system
def get_file_hash(self, *algorithms: str):
'''
get lower case hash of file.
return value is a tuple, you may need to unpack it.
for example: `get_file_hash('md5', 'sha1')` return `('XXXX1', 'XXXX2')`
'''
from .hashs import hashfile_hexdigest
return hashfile_hexdigest(self._path, algorithms)
|
Cologler/fsoopify-python | fsoopify/nodes.py | FileInfo.write_text | python | def write_text(self, text: str, *, encoding='utf-8', append=True):
''' write text into the file. '''
mode = 'a' if append else 'w'
return self.write(text, mode=mode, encoding=encoding) | write text into the file. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L160-L163 | [
"def write(self, data, *, mode=None, buffering=-1, encoding=None, newline=None):\n ''' write data into the file. '''\n if mode is None:\n mode = 'w' if isinstance(data, str) else 'wb'\n with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:\n return fp.write(data)\n"
] | class FileInfo(NodeInfo):
def open(self, mode='r', *, buffering=-1, encoding=None, newline=None, closefd=True):
''' open the file. '''
return open(self._path,
mode=mode,
buffering=buffering,
encoding=encoding,
newline=newline,
closefd=closefd)
@property
def size(self):
''' get file size. '''
return Size(os.path.getsize(self.path))
def write(self, data, *, mode=None, buffering=-1, encoding=None, newline=None):
''' write data into the file. '''
if mode is None:
mode = 'w' if isinstance(data, str) else 'wb'
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.write(data)
def read(self, mode='r', *, buffering=-1, encoding=None, newline=None):
''' read data from the file. '''
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.read()
def write_bytes(self, data: bytes, *, append=True):
''' write bytes into the file. '''
mode = 'ab' if append else 'wb'
return self.write(data, mode=mode)
def copy_to(self, dest, buffering: int = -1):
'''
copy the file to dest path.
`dest` canbe `str`, `FileInfo` or `DirectoryInfo`.
if `dest` is `DirectoryInfo`, that mean copy into the dir with same name.
'''
if isinstance(dest, str):
dest_path = dest
elif isinstance(dest, FileInfo):
dest_path = dest.path
elif isinstance(dest, DirectoryInfo):
dest_path = dest.path / self.path.name
else:
raise TypeError('dest is not one of `str`, `FileInfo`, `DirectoryInfo`')
with open(self._path, 'rb', buffering=buffering) as source:
# use x mode to ensure dest does not exists.
with open(dest_path, 'xb') as dest_file:
for buffer in source:
dest_file.write(buffer)
def read_text(self, encoding='utf-8') -> str:
''' read all text into memory. '''
with self.open('r', encoding=encoding) as fp:
return fp.read()
def read_bytes(self) -> bytes:
''' read all bytes into memory. '''
with self.open('rb') as fp:
return fp.read()
# override common methods
@property
def node_type(self):
return NodeType.file
def is_exists(self) -> bool:
return self.is_file()
def is_file(self) -> bool:
''' check if this is a exists file. '''
return os.path.isfile(self._path)
# override @abstractmethod
def delete(self):
''' remove the file from disk. '''
os.remove(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the file. '''
os.link(self._path, dest_path)
# load/dump system.
def load(self, format=None, *, kwargs={}):
'''
deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return load(self, format=format, kwargs=kwargs)
def dump(self, obj, format=None, *, kwargs={}):
'''
serialize the `obj` into file.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return dump(self, obj, format=format, kwargs=kwargs)
# hash system
def get_file_hash(self, *algorithms: str):
'''
get lower case hash of file.
return value is a tuple, you may need to unpack it.
for example: `get_file_hash('md5', 'sha1')` return `('XXXX1', 'XXXX2')`
'''
from .hashs import hashfile_hexdigest
return hashfile_hexdigest(self._path, algorithms)
|
Cologler/fsoopify-python | fsoopify/nodes.py | FileInfo.write_bytes | python | def write_bytes(self, data: bytes, *, append=True):
''' write bytes into the file. '''
mode = 'ab' if append else 'wb'
return self.write(data, mode=mode) | write bytes into the file. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L165-L168 | [
"def write(self, data, *, mode=None, buffering=-1, encoding=None, newline=None):\n ''' write data into the file. '''\n if mode is None:\n mode = 'w' if isinstance(data, str) else 'wb'\n with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:\n return fp.write(data)\n"
] | class FileInfo(NodeInfo):
def open(self, mode='r', *, buffering=-1, encoding=None, newline=None, closefd=True):
''' open the file. '''
return open(self._path,
mode=mode,
buffering=buffering,
encoding=encoding,
newline=newline,
closefd=closefd)
@property
def size(self):
''' get file size. '''
return Size(os.path.getsize(self.path))
def write(self, data, *, mode=None, buffering=-1, encoding=None, newline=None):
''' write data into the file. '''
if mode is None:
mode = 'w' if isinstance(data, str) else 'wb'
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.write(data)
def read(self, mode='r', *, buffering=-1, encoding=None, newline=None):
''' read data from the file. '''
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.read()
def write_text(self, text: str, *, encoding='utf-8', append=True):
''' write text into the file. '''
mode = 'a' if append else 'w'
return self.write(text, mode=mode, encoding=encoding)
def copy_to(self, dest, buffering: int = -1):
'''
copy the file to dest path.
`dest` canbe `str`, `FileInfo` or `DirectoryInfo`.
if `dest` is `DirectoryInfo`, that mean copy into the dir with same name.
'''
if isinstance(dest, str):
dest_path = dest
elif isinstance(dest, FileInfo):
dest_path = dest.path
elif isinstance(dest, DirectoryInfo):
dest_path = dest.path / self.path.name
else:
raise TypeError('dest is not one of `str`, `FileInfo`, `DirectoryInfo`')
with open(self._path, 'rb', buffering=buffering) as source:
# use x mode to ensure dest does not exists.
with open(dest_path, 'xb') as dest_file:
for buffer in source:
dest_file.write(buffer)
def read_text(self, encoding='utf-8') -> str:
''' read all text into memory. '''
with self.open('r', encoding=encoding) as fp:
return fp.read()
def read_bytes(self) -> bytes:
''' read all bytes into memory. '''
with self.open('rb') as fp:
return fp.read()
# override common methods
@property
def node_type(self):
return NodeType.file
def is_exists(self) -> bool:
return self.is_file()
def is_file(self) -> bool:
''' check if this is a exists file. '''
return os.path.isfile(self._path)
# override @abstractmethod
def delete(self):
''' remove the file from disk. '''
os.remove(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the file. '''
os.link(self._path, dest_path)
# load/dump system.
def load(self, format=None, *, kwargs={}):
'''
deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return load(self, format=format, kwargs=kwargs)
def dump(self, obj, format=None, *, kwargs={}):
'''
serialize the `obj` into file.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return dump(self, obj, format=format, kwargs=kwargs)
# hash system
def get_file_hash(self, *algorithms: str):
'''
get lower case hash of file.
return value is a tuple, you may need to unpack it.
for example: `get_file_hash('md5', 'sha1')` return `('XXXX1', 'XXXX2')`
'''
from .hashs import hashfile_hexdigest
return hashfile_hexdigest(self._path, algorithms)
|
Cologler/fsoopify-python | fsoopify/nodes.py | FileInfo.copy_to | python | def copy_to(self, dest, buffering: int = -1):
'''
copy the file to dest path.
`dest` canbe `str`, `FileInfo` or `DirectoryInfo`.
if `dest` is `DirectoryInfo`, that mean copy into the dir with same name.
'''
if isinstance(dest, str):
dest_path = dest
elif isinstance(dest, FileInfo):
dest_path = dest.path
elif isinstance(dest, DirectoryInfo):
dest_path = dest.path / self.path.name
else:
raise TypeError('dest is not one of `str`, `FileInfo`, `DirectoryInfo`')
with open(self._path, 'rb', buffering=buffering) as source:
# use x mode to ensure dest does not exists.
with open(dest_path, 'xb') as dest_file:
for buffer in source:
dest_file.write(buffer) | copy the file to dest path.
`dest` canbe `str`, `FileInfo` or `DirectoryInfo`.
if `dest` is `DirectoryInfo`, that mean copy into the dir with same name. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L170-L191 | null | class FileInfo(NodeInfo):
def open(self, mode='r', *, buffering=-1, encoding=None, newline=None, closefd=True):
''' open the file. '''
return open(self._path,
mode=mode,
buffering=buffering,
encoding=encoding,
newline=newline,
closefd=closefd)
@property
def size(self):
''' get file size. '''
return Size(os.path.getsize(self.path))
def write(self, data, *, mode=None, buffering=-1, encoding=None, newline=None):
''' write data into the file. '''
if mode is None:
mode = 'w' if isinstance(data, str) else 'wb'
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.write(data)
def read(self, mode='r', *, buffering=-1, encoding=None, newline=None):
''' read data from the file. '''
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.read()
def write_text(self, text: str, *, encoding='utf-8', append=True):
''' write text into the file. '''
mode = 'a' if append else 'w'
return self.write(text, mode=mode, encoding=encoding)
def write_bytes(self, data: bytes, *, append=True):
''' write bytes into the file. '''
mode = 'ab' if append else 'wb'
return self.write(data, mode=mode)
def read_text(self, encoding='utf-8') -> str:
''' read all text into memory. '''
with self.open('r', encoding=encoding) as fp:
return fp.read()
def read_bytes(self) -> bytes:
''' read all bytes into memory. '''
with self.open('rb') as fp:
return fp.read()
# override common methods
@property
def node_type(self):
return NodeType.file
def is_exists(self) -> bool:
return self.is_file()
def is_file(self) -> bool:
''' check if this is a exists file. '''
return os.path.isfile(self._path)
# override @abstractmethod
def delete(self):
''' remove the file from disk. '''
os.remove(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the file. '''
os.link(self._path, dest_path)
# load/dump system.
def load(self, format=None, *, kwargs={}):
'''
deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return load(self, format=format, kwargs=kwargs)
def dump(self, obj, format=None, *, kwargs={}):
'''
serialize the `obj` into file.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return dump(self, obj, format=format, kwargs=kwargs)
# hash system
def get_file_hash(self, *algorithms: str):
'''
get lower case hash of file.
return value is a tuple, you may need to unpack it.
for example: `get_file_hash('md5', 'sha1')` return `('XXXX1', 'XXXX2')`
'''
from .hashs import hashfile_hexdigest
return hashfile_hexdigest(self._path, algorithms)
|
Cologler/fsoopify-python | fsoopify/nodes.py | FileInfo.read_text | python | def read_text(self, encoding='utf-8') -> str:
''' read all text into memory. '''
with self.open('r', encoding=encoding) as fp:
return fp.read() | read all text into memory. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L193-L196 | [
"def open(self, mode='r', *, buffering=-1, encoding=None, newline=None, closefd=True):\n ''' open the file. '''\n return open(self._path,\n mode=mode,\n buffering=buffering,\n encoding=encoding,\n newline=newline,\n closefd=closefd)\n"
] | class FileInfo(NodeInfo):
def open(self, mode='r', *, buffering=-1, encoding=None, newline=None, closefd=True):
''' open the file. '''
return open(self._path,
mode=mode,
buffering=buffering,
encoding=encoding,
newline=newline,
closefd=closefd)
@property
def size(self):
''' get file size. '''
return Size(os.path.getsize(self.path))
def write(self, data, *, mode=None, buffering=-1, encoding=None, newline=None):
''' write data into the file. '''
if mode is None:
mode = 'w' if isinstance(data, str) else 'wb'
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.write(data)
def read(self, mode='r', *, buffering=-1, encoding=None, newline=None):
''' read data from the file. '''
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.read()
def write_text(self, text: str, *, encoding='utf-8', append=True):
''' write text into the file. '''
mode = 'a' if append else 'w'
return self.write(text, mode=mode, encoding=encoding)
def write_bytes(self, data: bytes, *, append=True):
''' write bytes into the file. '''
mode = 'ab' if append else 'wb'
return self.write(data, mode=mode)
def copy_to(self, dest, buffering: int = -1):
'''
copy the file to dest path.
`dest` canbe `str`, `FileInfo` or `DirectoryInfo`.
if `dest` is `DirectoryInfo`, that mean copy into the dir with same name.
'''
if isinstance(dest, str):
dest_path = dest
elif isinstance(dest, FileInfo):
dest_path = dest.path
elif isinstance(dest, DirectoryInfo):
dest_path = dest.path / self.path.name
else:
raise TypeError('dest is not one of `str`, `FileInfo`, `DirectoryInfo`')
with open(self._path, 'rb', buffering=buffering) as source:
# use x mode to ensure dest does not exists.
with open(dest_path, 'xb') as dest_file:
for buffer in source:
dest_file.write(buffer)
def read_bytes(self) -> bytes:
''' read all bytes into memory. '''
with self.open('rb') as fp:
return fp.read()
# override common methods
@property
def node_type(self):
return NodeType.file
def is_exists(self) -> bool:
return self.is_file()
def is_file(self) -> bool:
''' check if this is a exists file. '''
return os.path.isfile(self._path)
# override @abstractmethod
def delete(self):
''' remove the file from disk. '''
os.remove(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the file. '''
os.link(self._path, dest_path)
# load/dump system.
def load(self, format=None, *, kwargs={}):
'''
deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return load(self, format=format, kwargs=kwargs)
def dump(self, obj, format=None, *, kwargs={}):
'''
serialize the `obj` into file.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return dump(self, obj, format=format, kwargs=kwargs)
# hash system
def get_file_hash(self, *algorithms: str):
'''
get lower case hash of file.
return value is a tuple, you may need to unpack it.
for example: `get_file_hash('md5', 'sha1')` return `('XXXX1', 'XXXX2')`
'''
from .hashs import hashfile_hexdigest
return hashfile_hexdigest(self._path, algorithms)
|
Cologler/fsoopify-python | fsoopify/nodes.py | FileInfo.load | python | def load(self, format=None, *, kwargs={}):
'''
deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return load(self, format=format, kwargs=kwargs) | deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L228-L238 | [
"def load(file_info, format=None, *, kwargs={}):\n if format is None:\n format = _detect_format(file_info)\n serializer = _load_serializer(format)\n try:\n return serializer.load(file_info, kwargs)\n except Exception as err:\n raise SerializeError(err)\n"
] | class FileInfo(NodeInfo):
def open(self, mode='r', *, buffering=-1, encoding=None, newline=None, closefd=True):
''' open the file. '''
return open(self._path,
mode=mode,
buffering=buffering,
encoding=encoding,
newline=newline,
closefd=closefd)
@property
def size(self):
''' get file size. '''
return Size(os.path.getsize(self.path))
def write(self, data, *, mode=None, buffering=-1, encoding=None, newline=None):
''' write data into the file. '''
if mode is None:
mode = 'w' if isinstance(data, str) else 'wb'
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.write(data)
def read(self, mode='r', *, buffering=-1, encoding=None, newline=None):
''' read data from the file. '''
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.read()
def write_text(self, text: str, *, encoding='utf-8', append=True):
''' write text into the file. '''
mode = 'a' if append else 'w'
return self.write(text, mode=mode, encoding=encoding)
def write_bytes(self, data: bytes, *, append=True):
''' write bytes into the file. '''
mode = 'ab' if append else 'wb'
return self.write(data, mode=mode)
def copy_to(self, dest, buffering: int = -1):
'''
copy the file to dest path.
`dest` canbe `str`, `FileInfo` or `DirectoryInfo`.
if `dest` is `DirectoryInfo`, that mean copy into the dir with same name.
'''
if isinstance(dest, str):
dest_path = dest
elif isinstance(dest, FileInfo):
dest_path = dest.path
elif isinstance(dest, DirectoryInfo):
dest_path = dest.path / self.path.name
else:
raise TypeError('dest is not one of `str`, `FileInfo`, `DirectoryInfo`')
with open(self._path, 'rb', buffering=buffering) as source:
# use x mode to ensure dest does not exists.
with open(dest_path, 'xb') as dest_file:
for buffer in source:
dest_file.write(buffer)
def read_text(self, encoding='utf-8') -> str:
''' read all text into memory. '''
with self.open('r', encoding=encoding) as fp:
return fp.read()
def read_bytes(self) -> bytes:
''' read all bytes into memory. '''
with self.open('rb') as fp:
return fp.read()
# override common methods
@property
def node_type(self):
return NodeType.file
def is_exists(self) -> bool:
return self.is_file()
def is_file(self) -> bool:
''' check if this is a exists file. '''
return os.path.isfile(self._path)
# override @abstractmethod
def delete(self):
''' remove the file from disk. '''
os.remove(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the file. '''
os.link(self._path, dest_path)
# load/dump system.
def dump(self, obj, format=None, *, kwargs={}):
'''
serialize the `obj` into file.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return dump(self, obj, format=format, kwargs=kwargs)
# hash system
def get_file_hash(self, *algorithms: str):
'''
get lower case hash of file.
return value is a tuple, you may need to unpack it.
for example: `get_file_hash('md5', 'sha1')` return `('XXXX1', 'XXXX2')`
'''
from .hashs import hashfile_hexdigest
return hashfile_hexdigest(self._path, algorithms)
|
Cologler/fsoopify-python | fsoopify/nodes.py | FileInfo.dump | python | def dump(self, obj, format=None, *, kwargs={}):
'''
serialize the `obj` into file.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return dump(self, obj, format=format, kwargs=kwargs) | serialize the `obj` into file.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L240-L247 | [
"def dump(file_info, obj, format=None, *, kwargs={}):\n if format is None:\n format = _detect_format(file_info)\n serializer = _load_serializer(format)\n try:\n return serializer.dump(file_info, obj, kwargs)\n except NotImplementedError:\n raise\n except Exception as err:\n raise SerializeError(err)\n"
] | class FileInfo(NodeInfo):
def open(self, mode='r', *, buffering=-1, encoding=None, newline=None, closefd=True):
''' open the file. '''
return open(self._path,
mode=mode,
buffering=buffering,
encoding=encoding,
newline=newline,
closefd=closefd)
@property
def size(self):
''' get file size. '''
return Size(os.path.getsize(self.path))
def write(self, data, *, mode=None, buffering=-1, encoding=None, newline=None):
''' write data into the file. '''
if mode is None:
mode = 'w' if isinstance(data, str) else 'wb'
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.write(data)
def read(self, mode='r', *, buffering=-1, encoding=None, newline=None):
''' read data from the file. '''
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.read()
def write_text(self, text: str, *, encoding='utf-8', append=True):
''' write text into the file. '''
mode = 'a' if append else 'w'
return self.write(text, mode=mode, encoding=encoding)
def write_bytes(self, data: bytes, *, append=True):
''' write bytes into the file. '''
mode = 'ab' if append else 'wb'
return self.write(data, mode=mode)
def copy_to(self, dest, buffering: int = -1):
'''
copy the file to dest path.
`dest` canbe `str`, `FileInfo` or `DirectoryInfo`.
if `dest` is `DirectoryInfo`, that mean copy into the dir with same name.
'''
if isinstance(dest, str):
dest_path = dest
elif isinstance(dest, FileInfo):
dest_path = dest.path
elif isinstance(dest, DirectoryInfo):
dest_path = dest.path / self.path.name
else:
raise TypeError('dest is not one of `str`, `FileInfo`, `DirectoryInfo`')
with open(self._path, 'rb', buffering=buffering) as source:
# use x mode to ensure dest does not exists.
with open(dest_path, 'xb') as dest_file:
for buffer in source:
dest_file.write(buffer)
def read_text(self, encoding='utf-8') -> str:
''' read all text into memory. '''
with self.open('r', encoding=encoding) as fp:
return fp.read()
def read_bytes(self) -> bytes:
''' read all bytes into memory. '''
with self.open('rb') as fp:
return fp.read()
# override common methods
@property
def node_type(self):
return NodeType.file
def is_exists(self) -> bool:
return self.is_file()
def is_file(self) -> bool:
''' check if this is a exists file. '''
return os.path.isfile(self._path)
# override @abstractmethod
def delete(self):
''' remove the file from disk. '''
os.remove(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the file. '''
os.link(self._path, dest_path)
# load/dump system.
def load(self, format=None, *, kwargs={}):
'''
deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return load(self, format=format, kwargs=kwargs)
# hash system
def get_file_hash(self, *algorithms: str):
'''
get lower case hash of file.
return value is a tuple, you may need to unpack it.
for example: `get_file_hash('md5', 'sha1')` return `('XXXX1', 'XXXX2')`
'''
from .hashs import hashfile_hexdigest
return hashfile_hexdigest(self._path, algorithms)
|
Cologler/fsoopify-python | fsoopify/nodes.py | FileInfo.get_file_hash | python | def get_file_hash(self, *algorithms: str):
'''
get lower case hash of file.
return value is a tuple, you may need to unpack it.
for example: `get_file_hash('md5', 'sha1')` return `('XXXX1', 'XXXX2')`
'''
from .hashs import hashfile_hexdigest
return hashfile_hexdigest(self._path, algorithms) | get lower case hash of file.
return value is a tuple, you may need to unpack it.
for example: `get_file_hash('md5', 'sha1')` return `('XXXX1', 'XXXX2')` | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L251-L260 | [
"def hashfile_hexdigest(path: str, algorithms: tuple, *, blocksize=1024 * 64):\n for algorithm in algorithms:\n if not algorithm in ALGORITHMS:\n raise ValueError(f'unsupport algorithm: {algorithm}')\n ms = [_create(x) for x in algorithms]\n with open(path, 'rb') as stream:\n while True:\n buffer = stream.read(blocksize)\n if not buffer:\n break\n for m in ms:\n m.update(buffer)\n return tuple(m.hexdigest() for m in ms)\n"
] | class FileInfo(NodeInfo):
def open(self, mode='r', *, buffering=-1, encoding=None, newline=None, closefd=True):
''' open the file. '''
return open(self._path,
mode=mode,
buffering=buffering,
encoding=encoding,
newline=newline,
closefd=closefd)
@property
def size(self):
''' get file size. '''
return Size(os.path.getsize(self.path))
def write(self, data, *, mode=None, buffering=-1, encoding=None, newline=None):
''' write data into the file. '''
if mode is None:
mode = 'w' if isinstance(data, str) else 'wb'
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.write(data)
def read(self, mode='r', *, buffering=-1, encoding=None, newline=None):
''' read data from the file. '''
with self.open(mode=mode, buffering=buffering, encoding=encoding, newline=newline) as fp:
return fp.read()
def write_text(self, text: str, *, encoding='utf-8', append=True):
''' write text into the file. '''
mode = 'a' if append else 'w'
return self.write(text, mode=mode, encoding=encoding)
def write_bytes(self, data: bytes, *, append=True):
''' write bytes into the file. '''
mode = 'ab' if append else 'wb'
return self.write(data, mode=mode)
def copy_to(self, dest, buffering: int = -1):
'''
copy the file to dest path.
`dest` canbe `str`, `FileInfo` or `DirectoryInfo`.
if `dest` is `DirectoryInfo`, that mean copy into the dir with same name.
'''
if isinstance(dest, str):
dest_path = dest
elif isinstance(dest, FileInfo):
dest_path = dest.path
elif isinstance(dest, DirectoryInfo):
dest_path = dest.path / self.path.name
else:
raise TypeError('dest is not one of `str`, `FileInfo`, `DirectoryInfo`')
with open(self._path, 'rb', buffering=buffering) as source:
# use x mode to ensure dest does not exists.
with open(dest_path, 'xb') as dest_file:
for buffer in source:
dest_file.write(buffer)
def read_text(self, encoding='utf-8') -> str:
''' read all text into memory. '''
with self.open('r', encoding=encoding) as fp:
return fp.read()
def read_bytes(self) -> bytes:
''' read all bytes into memory. '''
with self.open('rb') as fp:
return fp.read()
# override common methods
@property
def node_type(self):
return NodeType.file
def is_exists(self) -> bool:
return self.is_file()
def is_file(self) -> bool:
''' check if this is a exists file. '''
return os.path.isfile(self._path)
# override @abstractmethod
def delete(self):
''' remove the file from disk. '''
os.remove(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the file. '''
os.link(self._path, dest_path)
# load/dump system.
def load(self, format=None, *, kwargs={}):
'''
deserialize object from the file.
auto detect format by file extension name if `format` is None.
for example, `.json` will detect as `json`.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return load(self, format=format, kwargs=kwargs)
def dump(self, obj, format=None, *, kwargs={}):
'''
serialize the `obj` into file.
* raise `FormatNotFoundError` on unknown format.
* raise `SerializeError` on any serialize exceptions.
'''
return dump(self, obj, format=format, kwargs=kwargs)
# hash system
|
Cologler/fsoopify-python | fsoopify/nodes.py | DirectoryInfo.iter_items | python | def iter_items(self, depth: int = 1):
'''
get items from directory.
'''
if depth is not None and not isinstance(depth, int):
raise TypeError
def itor(root, d):
if d is not None:
d -= 1
if d < 0:
return
for name in os.listdir(root):
path = os.path.join(root, name)
node = NodeInfo.from_path(path)
yield node
if isinstance(node, DirectoryInfo):
yield from itor(path, d)
yield from itor(self._path, depth) | get items from directory. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L274-L291 | [
"def itor(root, d):\n if d is not None:\n d -= 1\n if d < 0:\n return\n for name in os.listdir(root):\n path = os.path.join(root, name)\n node = NodeInfo.from_path(path)\n yield node\n if isinstance(node, DirectoryInfo):\n yield from itor(path, d)\n"
] | class DirectoryInfo(NodeInfo):
def create(self):
''' create directory. '''
os.mkdir(self.path)
def ensure_created(self):
''' ensure the directory was created. '''
if not self.is_directory():
self.create()
def list_items(self, depth: int = 1):
'''
get items from directory.
'''
return list(self.iter_items(depth))
def has_file(self, name: str):
'''
check whether this directory contains the file.
'''
return os.path.isfile(self._path / name)
def has_directory(self, name: str):
'''
check whether this directory contains the directory.
'''
return os.path.isdir(self._path / name)
def get_fileinfo(self, name: str):
'''
get a `FileInfo` for a file (without create actual file).
'''
return FileInfo(os.path.join(self._path, name))
def get_dirinfo(self, name: str):
'''
get a `DirectoryInfo` for a directory (without create actual directory).
'''
return DirectoryInfo(os.path.join(self._path, name))
def create_file(self, name: str, generate_unique_name: bool = False):
'''
create a `FileInfo` for a new file.
if the file was exists, and `generate_unique_name` if `False`, raise `FileExistsError`.
the op does mean the file is created on disk.
'''
def enumerate_name():
yield name
index = 0
while True:
index += 1
yield f'{name} ({index})'
for n in enumerate_name():
path = os.path.join(self._path, n)
if os.path.exists(path):
if not generate_unique_name:
raise FileExistsError
return FileInfo(path)
create_fileinfo = create_file # keep old name
# override common methods
@property
def node_type(self):
return NodeType.dir
def is_exists(self) -> bool:
return self.is_directory()
def is_directory(self) -> bool:
''' check if this is a exists directory. '''
return os.path.isdir(self._path)
# override @abstractmethod
def delete(self):
''' remove the directory from disk. '''
os.rmdir(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the directory (includes childs). '''
# self
dirinfo = DirectoryInfo(dest_path)
dirinfo.ensure_created()
# child
for item in self.list_items():
item.create_hardlink(os.path.join(dest_path, item.path.name))
|
Cologler/fsoopify-python | fsoopify/nodes.py | DirectoryInfo.has_file | python | def has_file(self, name: str):
'''
check whether this directory contains the file.
'''
return os.path.isfile(self._path / name) | check whether this directory contains the file. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L299-L303 | null | class DirectoryInfo(NodeInfo):
def create(self):
''' create directory. '''
os.mkdir(self.path)
def ensure_created(self):
''' ensure the directory was created. '''
if not self.is_directory():
self.create()
def iter_items(self, depth: int = 1):
'''
get items from directory.
'''
if depth is not None and not isinstance(depth, int):
raise TypeError
def itor(root, d):
if d is not None:
d -= 1
if d < 0:
return
for name in os.listdir(root):
path = os.path.join(root, name)
node = NodeInfo.from_path(path)
yield node
if isinstance(node, DirectoryInfo):
yield from itor(path, d)
yield from itor(self._path, depth)
def list_items(self, depth: int = 1):
'''
get items from directory.
'''
return list(self.iter_items(depth))
def has_directory(self, name: str):
'''
check whether this directory contains the directory.
'''
return os.path.isdir(self._path / name)
def get_fileinfo(self, name: str):
'''
get a `FileInfo` for a file (without create actual file).
'''
return FileInfo(os.path.join(self._path, name))
def get_dirinfo(self, name: str):
'''
get a `DirectoryInfo` for a directory (without create actual directory).
'''
return DirectoryInfo(os.path.join(self._path, name))
def create_file(self, name: str, generate_unique_name: bool = False):
'''
create a `FileInfo` for a new file.
if the file was exists, and `generate_unique_name` if `False`, raise `FileExistsError`.
the op does mean the file is created on disk.
'''
def enumerate_name():
yield name
index = 0
while True:
index += 1
yield f'{name} ({index})'
for n in enumerate_name():
path = os.path.join(self._path, n)
if os.path.exists(path):
if not generate_unique_name:
raise FileExistsError
return FileInfo(path)
create_fileinfo = create_file # keep old name
# override common methods
@property
def node_type(self):
return NodeType.dir
def is_exists(self) -> bool:
return self.is_directory()
def is_directory(self) -> bool:
''' check if this is a exists directory. '''
return os.path.isdir(self._path)
# override @abstractmethod
def delete(self):
''' remove the directory from disk. '''
os.rmdir(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the directory (includes childs). '''
# self
dirinfo = DirectoryInfo(dest_path)
dirinfo.ensure_created()
# child
for item in self.list_items():
item.create_hardlink(os.path.join(dest_path, item.path.name))
|
Cologler/fsoopify-python | fsoopify/nodes.py | DirectoryInfo.has_directory | python | def has_directory(self, name: str):
'''
check whether this directory contains the directory.
'''
return os.path.isdir(self._path / name) | check whether this directory contains the directory. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L305-L309 | null | class DirectoryInfo(NodeInfo):
def create(self):
''' create directory. '''
os.mkdir(self.path)
def ensure_created(self):
''' ensure the directory was created. '''
if not self.is_directory():
self.create()
def iter_items(self, depth: int = 1):
'''
get items from directory.
'''
if depth is not None and not isinstance(depth, int):
raise TypeError
def itor(root, d):
if d is not None:
d -= 1
if d < 0:
return
for name in os.listdir(root):
path = os.path.join(root, name)
node = NodeInfo.from_path(path)
yield node
if isinstance(node, DirectoryInfo):
yield from itor(path, d)
yield from itor(self._path, depth)
def list_items(self, depth: int = 1):
'''
get items from directory.
'''
return list(self.iter_items(depth))
def has_file(self, name: str):
'''
check whether this directory contains the file.
'''
return os.path.isfile(self._path / name)
def get_fileinfo(self, name: str):
'''
get a `FileInfo` for a file (without create actual file).
'''
return FileInfo(os.path.join(self._path, name))
def get_dirinfo(self, name: str):
'''
get a `DirectoryInfo` for a directory (without create actual directory).
'''
return DirectoryInfo(os.path.join(self._path, name))
def create_file(self, name: str, generate_unique_name: bool = False):
'''
create a `FileInfo` for a new file.
if the file was exists, and `generate_unique_name` if `False`, raise `FileExistsError`.
the op does mean the file is created on disk.
'''
def enumerate_name():
yield name
index = 0
while True:
index += 1
yield f'{name} ({index})'
for n in enumerate_name():
path = os.path.join(self._path, n)
if os.path.exists(path):
if not generate_unique_name:
raise FileExistsError
return FileInfo(path)
create_fileinfo = create_file # keep old name
# override common methods
@property
def node_type(self):
return NodeType.dir
def is_exists(self) -> bool:
return self.is_directory()
def is_directory(self) -> bool:
''' check if this is a exists directory. '''
return os.path.isdir(self._path)
# override @abstractmethod
def delete(self):
''' remove the directory from disk. '''
os.rmdir(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the directory (includes childs). '''
# self
dirinfo = DirectoryInfo(dest_path)
dirinfo.ensure_created()
# child
for item in self.list_items():
item.create_hardlink(os.path.join(dest_path, item.path.name))
|
Cologler/fsoopify-python | fsoopify/nodes.py | DirectoryInfo.get_fileinfo | python | def get_fileinfo(self, name: str):
'''
get a `FileInfo` for a file (without create actual file).
'''
return FileInfo(os.path.join(self._path, name)) | get a `FileInfo` for a file (without create actual file). | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L311-L315 | null | class DirectoryInfo(NodeInfo):
def create(self):
''' create directory. '''
os.mkdir(self.path)
def ensure_created(self):
''' ensure the directory was created. '''
if not self.is_directory():
self.create()
def iter_items(self, depth: int = 1):
'''
get items from directory.
'''
if depth is not None and not isinstance(depth, int):
raise TypeError
def itor(root, d):
if d is not None:
d -= 1
if d < 0:
return
for name in os.listdir(root):
path = os.path.join(root, name)
node = NodeInfo.from_path(path)
yield node
if isinstance(node, DirectoryInfo):
yield from itor(path, d)
yield from itor(self._path, depth)
def list_items(self, depth: int = 1):
'''
get items from directory.
'''
return list(self.iter_items(depth))
def has_file(self, name: str):
'''
check whether this directory contains the file.
'''
return os.path.isfile(self._path / name)
def has_directory(self, name: str):
'''
check whether this directory contains the directory.
'''
return os.path.isdir(self._path / name)
def get_dirinfo(self, name: str):
'''
get a `DirectoryInfo` for a directory (without create actual directory).
'''
return DirectoryInfo(os.path.join(self._path, name))
def create_file(self, name: str, generate_unique_name: bool = False):
'''
create a `FileInfo` for a new file.
if the file was exists, and `generate_unique_name` if `False`, raise `FileExistsError`.
the op does mean the file is created on disk.
'''
def enumerate_name():
yield name
index = 0
while True:
index += 1
yield f'{name} ({index})'
for n in enumerate_name():
path = os.path.join(self._path, n)
if os.path.exists(path):
if not generate_unique_name:
raise FileExistsError
return FileInfo(path)
create_fileinfo = create_file # keep old name
# override common methods
@property
def node_type(self):
return NodeType.dir
def is_exists(self) -> bool:
return self.is_directory()
def is_directory(self) -> bool:
''' check if this is a exists directory. '''
return os.path.isdir(self._path)
# override @abstractmethod
def delete(self):
''' remove the directory from disk. '''
os.rmdir(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the directory (includes childs). '''
# self
dirinfo = DirectoryInfo(dest_path)
dirinfo.ensure_created()
# child
for item in self.list_items():
item.create_hardlink(os.path.join(dest_path, item.path.name))
|
Cologler/fsoopify-python | fsoopify/nodes.py | DirectoryInfo.get_dirinfo | python | def get_dirinfo(self, name: str):
'''
get a `DirectoryInfo` for a directory (without create actual directory).
'''
return DirectoryInfo(os.path.join(self._path, name)) | get a `DirectoryInfo` for a directory (without create actual directory). | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L317-L321 | null | class DirectoryInfo(NodeInfo):
def create(self):
''' create directory. '''
os.mkdir(self.path)
def ensure_created(self):
''' ensure the directory was created. '''
if not self.is_directory():
self.create()
def iter_items(self, depth: int = 1):
'''
get items from directory.
'''
if depth is not None and not isinstance(depth, int):
raise TypeError
def itor(root, d):
if d is not None:
d -= 1
if d < 0:
return
for name in os.listdir(root):
path = os.path.join(root, name)
node = NodeInfo.from_path(path)
yield node
if isinstance(node, DirectoryInfo):
yield from itor(path, d)
yield from itor(self._path, depth)
def list_items(self, depth: int = 1):
'''
get items from directory.
'''
return list(self.iter_items(depth))
def has_file(self, name: str):
'''
check whether this directory contains the file.
'''
return os.path.isfile(self._path / name)
def has_directory(self, name: str):
'''
check whether this directory contains the directory.
'''
return os.path.isdir(self._path / name)
def get_fileinfo(self, name: str):
'''
get a `FileInfo` for a file (without create actual file).
'''
return FileInfo(os.path.join(self._path, name))
def create_file(self, name: str, generate_unique_name: bool = False):
'''
create a `FileInfo` for a new file.
if the file was exists, and `generate_unique_name` if `False`, raise `FileExistsError`.
the op does mean the file is created on disk.
'''
def enumerate_name():
yield name
index = 0
while True:
index += 1
yield f'{name} ({index})'
for n in enumerate_name():
path = os.path.join(self._path, n)
if os.path.exists(path):
if not generate_unique_name:
raise FileExistsError
return FileInfo(path)
create_fileinfo = create_file # keep old name
# override common methods
@property
def node_type(self):
return NodeType.dir
def is_exists(self) -> bool:
return self.is_directory()
def is_directory(self) -> bool:
''' check if this is a exists directory. '''
return os.path.isdir(self._path)
# override @abstractmethod
def delete(self):
''' remove the directory from disk. '''
os.rmdir(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the directory (includes childs). '''
# self
dirinfo = DirectoryInfo(dest_path)
dirinfo.ensure_created()
# child
for item in self.list_items():
item.create_hardlink(os.path.join(dest_path, item.path.name))
|
Cologler/fsoopify-python | fsoopify/nodes.py | DirectoryInfo.create_file | python | def create_file(self, name: str, generate_unique_name: bool = False):
'''
create a `FileInfo` for a new file.
if the file was exists, and `generate_unique_name` if `False`, raise `FileExistsError`.
the op does mean the file is created on disk.
'''
def enumerate_name():
yield name
index = 0
while True:
index += 1
yield f'{name} ({index})'
for n in enumerate_name():
path = os.path.join(self._path, n)
if os.path.exists(path):
if not generate_unique_name:
raise FileExistsError
return FileInfo(path) | create a `FileInfo` for a new file.
if the file was exists, and `generate_unique_name` if `False`, raise `FileExistsError`.
the op does mean the file is created on disk. | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L323-L342 | [
"def enumerate_name():\n yield name\n index = 0\n while True:\n index += 1\n yield f'{name} ({index})'\n"
] | class DirectoryInfo(NodeInfo):
def create(self):
''' create directory. '''
os.mkdir(self.path)
def ensure_created(self):
''' ensure the directory was created. '''
if not self.is_directory():
self.create()
def iter_items(self, depth: int = 1):
'''
get items from directory.
'''
if depth is not None and not isinstance(depth, int):
raise TypeError
def itor(root, d):
if d is not None:
d -= 1
if d < 0:
return
for name in os.listdir(root):
path = os.path.join(root, name)
node = NodeInfo.from_path(path)
yield node
if isinstance(node, DirectoryInfo):
yield from itor(path, d)
yield from itor(self._path, depth)
def list_items(self, depth: int = 1):
'''
get items from directory.
'''
return list(self.iter_items(depth))
def has_file(self, name: str):
'''
check whether this directory contains the file.
'''
return os.path.isfile(self._path / name)
def has_directory(self, name: str):
'''
check whether this directory contains the directory.
'''
return os.path.isdir(self._path / name)
def get_fileinfo(self, name: str):
'''
get a `FileInfo` for a file (without create actual file).
'''
return FileInfo(os.path.join(self._path, name))
def get_dirinfo(self, name: str):
'''
get a `DirectoryInfo` for a directory (without create actual directory).
'''
return DirectoryInfo(os.path.join(self._path, name))
create_fileinfo = create_file # keep old name
# override common methods
@property
def node_type(self):
return NodeType.dir
def is_exists(self) -> bool:
return self.is_directory()
def is_directory(self) -> bool:
''' check if this is a exists directory. '''
return os.path.isdir(self._path)
# override @abstractmethod
def delete(self):
''' remove the directory from disk. '''
os.rmdir(self._path)
def create_hardlink(self, dest_path: str):
''' create hardlink for the directory (includes childs). '''
# self
dirinfo = DirectoryInfo(dest_path)
dirinfo.ensure_created()
# child
for item in self.list_items():
item.create_hardlink(os.path.join(dest_path, item.path.name))
|
Cologler/fsoopify-python | fsoopify/nodes.py | DirectoryInfo.create_hardlink | python | def create_hardlink(self, dest_path: str):
''' create hardlink for the directory (includes childs). '''
# self
dirinfo = DirectoryInfo(dest_path)
dirinfo.ensure_created()
# child
for item in self.list_items():
item.create_hardlink(os.path.join(dest_path, item.path.name)) | create hardlink for the directory (includes childs). | train | https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L365-L374 | [
"def ensure_created(self):\n ''' ensure the directory was created. '''\n if not self.is_directory():\n self.create()\n",
"def list_items(self, depth: int = 1):\n '''\n get items from directory.\n '''\n return list(self.iter_items(depth))\n"
] | class DirectoryInfo(NodeInfo):
def create(self):
''' create directory. '''
os.mkdir(self.path)
def ensure_created(self):
''' ensure the directory was created. '''
if not self.is_directory():
self.create()
def iter_items(self, depth: int = 1):
'''
get items from directory.
'''
if depth is not None and not isinstance(depth, int):
raise TypeError
def itor(root, d):
if d is not None:
d -= 1
if d < 0:
return
for name in os.listdir(root):
path = os.path.join(root, name)
node = NodeInfo.from_path(path)
yield node
if isinstance(node, DirectoryInfo):
yield from itor(path, d)
yield from itor(self._path, depth)
def list_items(self, depth: int = 1):
'''
get items from directory.
'''
return list(self.iter_items(depth))
def has_file(self, name: str):
'''
check whether this directory contains the file.
'''
return os.path.isfile(self._path / name)
def has_directory(self, name: str):
'''
check whether this directory contains the directory.
'''
return os.path.isdir(self._path / name)
def get_fileinfo(self, name: str):
'''
get a `FileInfo` for a file (without create actual file).
'''
return FileInfo(os.path.join(self._path, name))
def get_dirinfo(self, name: str):
'''
get a `DirectoryInfo` for a directory (without create actual directory).
'''
return DirectoryInfo(os.path.join(self._path, name))
def create_file(self, name: str, generate_unique_name: bool = False):
'''
create a `FileInfo` for a new file.
if the file was exists, and `generate_unique_name` if `False`, raise `FileExistsError`.
the op does mean the file is created on disk.
'''
def enumerate_name():
yield name
index = 0
while True:
index += 1
yield f'{name} ({index})'
for n in enumerate_name():
path = os.path.join(self._path, n)
if os.path.exists(path):
if not generate_unique_name:
raise FileExistsError
return FileInfo(path)
create_fileinfo = create_file # keep old name
# override common methods
@property
def node_type(self):
return NodeType.dir
def is_exists(self) -> bool:
return self.is_directory()
def is_directory(self) -> bool:
''' check if this is a exists directory. '''
return os.path.isdir(self._path)
# override @abstractmethod
def delete(self):
''' remove the directory from disk. '''
os.rmdir(self._path)
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/__init__.py | get_driver | python | def get_driver(driver='ASCII_RS232', *args, **keywords):
if driver.upper() == 'ASCII_RS232':
return drivers.ASCII_RS232(*args, **keywords)
else:
raise NotImplementedError('Driver not supported: '
+ str(driver)) | Gets a driver for a Parker Motion Gemini drive.
Gets and connects a particular driver in ``drivers`` to a Parker
Motion Gemini GV-6 or GT-6 servo/stepper motor drive.
The only driver currently supported is the ``'ASCII_RS232'`` driver
which corresponds to ``drivers.ASCII_RS232``.
Parameters
----------
driver : str, optional
The driver to communicate to the particular driver with, which
includes the hardware connection and possibly the communications
protocol. The only driver currently supported is the
``'ASCII_RS232'`` driver which corresponds to
``drivers.ASCII_RS232``.
*args : additional positional arguments
Additional positional arguments to pass onto the constructor for
the driver.
**keywords : additional keyword arguments
Additional keyword arguments to pass onto the constructor for
the driver.
Returns
-------
drivers : drivers
The connected drivers class that is connected to the drive.
Raises
------
NotImplementedError
If the `driver` is not supported.
See Also
--------
drivers
drivers.ASCII_RS232 | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/__init__.py#L41-L85 | null | # Copyright 2014-2016 Freja Nordsiek
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the GeminiMotorDrive package, a Python package for controlling a
Parker Hannifin Gemini GV-6 and GT-6 servo and stepper motor drives.
Version 0.2
"""
__version__ = "0.2"
import math
import copy
import re
from . import drivers, utilities
class GeminiError(IOError):
"""Base exception class for this module."""
pass
class CommandError(GeminiError):
"""Exception executing a command."""
pass
class GeminiG6(object):
""" Controller for a Parker Motion Gemini GV-6 or GT-6.
An object to connect to and control a Parker Motion Gemini GV-6 or
GT-6 servo/stepper motor drive already connected to with a
particular `driver`.
Parameters
----------
driver : driver
Connected instance of a class in ``drivers``. Use ``get_driver``
to load one. Is stored in the attribute ``driver``.
Raises
------
GeminiError
If the attached device is not a Gemini GV-6 or GT-6.
Attributes
----------
driver : driver
Driver for communicating to the drive.
energized : bool
denergize_on_kill : bool
encoder_resolution : int
electrical_pitch : float
max_velocity : float
motion_commanded : bool
See Also
--------
get_driver
"""
def __init__(self, driver):
#: Driver for communicating to the drive.
#:
#: driver
#:
#: A class from ``GeminiMotorDriver.drivers``. Can be loaded
#: using ``get_driver``.
#:
#: See Also
#: --------
#: get_driver
self.driver = driver
# Make sure that it is indeed a GV/T6, and throw an exception
# otherwise. It should respond to the 'TREV' command with 'TREV'
# echoed and '*TREV-GV6-L3E_D1.50_F1.00' where everything after
# the 'GV6' (possibly replaced with a 'GT6') part is model
# dependent.
response = self.driver.send_command('TREV', timeout=1.0,
immediate=True)
if re.search('^!TREV\r\\*TREV-G[VT]{1}6', response[1]) is None:
raise GeminiError('Not a valid Gemini GV-6 or GT-6 device.')
def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str)
def _set_parameter(self, name, value, tp, timeout=1.0,
max_retries=2):
""" Sets the specified drive parameter.
Sets a parameter on the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to set. It is always the command to
set it when followed by the value.
value : bool, int, or float
Value to set the parameter to.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the last attempt to set the parameter was successful
(``True``) or not (``False`` meaning it had an error).
See Also
--------
_get_parameter : Get a parameter.
"""
# Return False if tp isn't one of the valid types.
if tp not in (bool, int, float):
return False
# Convert value to the string that the drive will expect. value
# must first be converted to the proper type before getting
# converted to str in the usual fasion. As bools need to be a
# '1' or a '0', it must be converted to int before going through
# str.
if tp == bool:
value_str = str(int(bool(value)))
elif tp == int:
value_str = str(int(value))
elif tp == float:
value_str = str(float(value))
# Immediately set the named parameter of the drive. The command
# is just the parameter name followed by the value string.
response = self.driver.send_command(name+value_str, \
timeout=timeout, immediate=True, max_retries=max_retries)
# Return whether the setting was successful or not.
return not self.driver.command_error(response)
def pause(self, max_retries=0):
""" Pauses the drive (execution of commands).
Causes the drive to pause execution of commands till it is
unpaused. Commands will be queued until it is unpaused. Motion
is not stopped.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to pause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last pause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
unpause : Unpause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('PS', timeout=1.0,
immediate=True, max_retries=max_retries)))
def unpause(self, max_retries=0):
""" Unpauses the drive.
Unpauses the drive. Commands queued while it is paused will then
be executed.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to unpause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last unpause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
pause : Pause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('C',
timeout=1.0, immediate=True, max_retries=max_retries)))
def stop(self, max_retries=0):
""" Stops motion.
The drive stops the motor.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last stop command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!S1'.
"""
return (not self.driver.command_error(
self.driver.send_command('S1',
timeout=1.0, immediate=True, max_retries=max_retries)))
def kill(self, max_retries=0):
""" Kills the drive.
The drive stops the motor and any running program. The motor
will de-energize depending on the state of
``denergize_on_kill``.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last kill command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!K'.
See Also
--------
denergize_on_kill : Controls whether the motor de-energizes
after the drive is killed or not.
"""
return (not self.driver.command_error(
self.driver.send_command('K',
timeout=1.0, immediate=True, max_retries=max_retries)))
def reset(self, max_retries=0):
""" Resets the drive.
Resets the drive, which is equivalent to a power cycling.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to reset the drive in the
case of errors.
Returns
-------
success : bool
Whether the last reset command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!RESET'.
"""
return (not self.driver.command_error(
self.driver.send_command('RESET',
timeout=10.0, immediate=True, max_retries=max_retries)))
def get_program(self, n, timeout=2.0, max_retries=2):
""" Get a program from the drive.
Gets program 'n' from the drive and returns its commands.
Parameters
----------
n : int
Which program to get.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the program. The
trailing 'END' is removed. Empty if there was an error.
Notes
-----
The command sent to the drive is '!TPROG PROGn'.
See Also
--------
set_program_profile : Sets a program or profile.
run_program_profile : Runs a program or profile.
"""
# Send the 'TPROG PROGn' command to read the program.
response = self.driver.send_command( \
'TPROG PROG' + str(int(n)), timeout=timeout, \
immediate=True, max_retries=max_retries)
# If there was an error, then return empty. Otherwise, return
# the response lines but strip the leading '*' first and the
# 'END' at the end of the list.
if self.driver.command_error(response) \
or len(response[4]) == 0:
return []
else:
if '*END' in response[4]:
response[4].remove('*END')
return [line[1:] for line in response[4]]
def set_program_profile(self, n, commands,
program_or_profile='program',
timeout=1.0, max_retries=0):
""" Sets a program/profile on the drive.
Sets program or profile 'n' on the drive to the sequence of
commands in 'commands'. If the existing program is identical, it
is not overwritten (can't check this for a profile). Returns
whether the program or profile was successfully set or not (if
the existing one is identical, it is considered a success).
Parameters
----------
n : int
Which program to set.
commands : list or tuple of strings
``list`` or ``tuple`` of commands to send to the drive. Each
command must be a string.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the program or profile was successfully set or not
(an identical program already existing on the drive is
considered a success).
Notes
-----
'commands' gets wrapped between ['DEL PROGn', 'DEF PROGn'] and
'END' or the equivalent profile ones.
See Also
--------
get_program : Gets a program.
run_program_profile : Runs a program or profile.
"""
# Grab the n'th program on the drive and strip commands. If we
# are doing a profile, None will be used as a placeholder.
if program_or_profile != 'profile':
current_program = self.get_program(n, timeout=timeout, \
max_retries=max_retries+2)
else:
current_program = None
stripped_commands = utilities.strip_commands(commands)
# If the two are identical and we are doing a program, then
# nothing needs to be done and the program is already set
# (return True). Otherwise, it needs to be overwritten. If there
# were no errors on the last command, then it was written
# successfully. Otherwise, the program or profile needs to be
# terminated and then deleted.
if current_program is not None \
and current_program == stripped_commands:
return True
else:
# Construct the End Of Responses for each command that will
# be sent. They are '\n' for deletion and ending, but are
# '\n- ' for the rest.
eor = ['\n'] + (['\n- '] * (1 + len(stripped_commands))) \
+ ['\n']
# The commands consist of a header that tells which program
# or profile to set, the stripped commands, followed by an
# 'END'.
if program_or_profile != 'profile':
header = ['DEL PROG'+str(int(n)),
'DEF PROG'+str(int(n))]
else:
header = ['DEL PROF'+str(int(n)),
'DEF PROF'+str(int(n))]
responses = self.driver.send_commands(\
header + stripped_commands + ['END'], \
timeout=timeout, max_retries=max_retries, eor=eor)
# Check to see if it was set successfully. If it was (the
# last command had no errors), return True. Otherwise, the
# program or profile needs to be ended and deleted before
# returning False.
if not self.driver.command_error(responses[-1]):
return True
else:
if program_or_profile != 'profile':
cmds = ['END', 'DEL PROG'+str(int(n))]
else:
cmds = ['END', 'DEL PROF'+str(int(n))]
self.driver.send_commands(cmds, timeout=timeout,
max_retries=max_retries+2)
return False
def run_program_profile(self, n, program_or_profile='program',
timeout=10.0):
""" Runs a program/profile on the drive.
Runs program or profile 'n' on the drive, grabs its output, and
processes the output. The response from the drive is broken down
into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response; which are all returned.
It is **VERY IMPORTANT** that 'timeout' is long enough for the
program to run if all the output from the drive is to be
collected.
Parameters
----------
n : int
Which program to get.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response for running a program (set to 1.0 for a profile
regardless of what is given). A negative value or ``None``
indicates that the an infinite timeout should be used.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
Notes
-----
Sends 'RUN PROGn' (program) or 'PRUN PROFn' (profile) as the
command to the drive. For a profile, the only output is that
command echoed back. For a program, it will echo back each
command in the program (preceeded by an '*' and followed by a
line feed as opposed to a carriage return).
See Also
--------
get_program : Gets a program.
set_program_profile : Sets a program or profile.
"""
if program_or_profile != 'profile':
return self.driver.send_command('RUN PROG' + str(int(n)), \
timeout=timeout, immediate=True, eor='*END\n')
else:
return self.driver.send_command( \
'PRUN PROF' + str(int(n)), timeout=1.0, immediate=True)
@property
def energized(self):
""" Energized state of the motor.
``bool`` with energized being ``True``.
Setting it sends an immediate command to the drive to energize
the motor.
Notes
-----
This uses the 'DRIVE' command.
"""
return self._get_parameter('DRIVE', bool)
@energized.setter
def energized(self, value):
self._set_parameter('DRIVE', value, bool)
@property
def denergize_on_kill(self):
""" De-energize motor when the drive is killed.
``bool`` with ``True`` meaning that whenever the drive is given
the kill signal, the motor will de-energize.
Setting it sends an immediate command to the drive to set it.
Notes
-----
This uses the 'KDRIVE' command.
See Also
--------
energized : Get/set the motor energized state.
kill : Kill the drive.
"""
return self._get_parameter('KDRIVE', bool)
@denergize_on_kill.setter
def denergize_on_kill(self, value):
self._set_parameter('KDRIVE', value, bool)
@property
def encoder_resolution(self):
""" Encoder/Resolver resolution.
``int`` with units counts/rev (servo) or counts/pitch (linear)
Setting it sends an immediate command to the drive to change the
encoder/resolver resolution.
Notes
-----
This uses the 'ERES' command.
"""
return self._get_parameter('ERES', int)
@encoder_resolution.setter
def encoder_resolution(self, value):
self._set_parameter('ERES', value, int)
self.reset()
@property
def electrical_pitch(self):
""" The motor's electrical pitch.
float with units of mm
It gives the spacing between two magnets (full magnetic cycle)
on a linear motor. Velocities and accelerations are in units of
pitches/s and pitches/s^2, so it is important.
Setting it sends an immediate command to the drive to change the
electrical pitch.
Notes
-----
This uses the 'DMEPIT' command.
"""
return self._get_parameter('DMEPIT', float)
@electrical_pitch.setter
def electrical_pitch(self, value):
self._set_parameter('DMEPIT', value, float)
self.reset()
@property
def max_velocity(self):
""" The motor's velocity limit.
``float`` in motor units
Notes
-----
This uses the 'DMVLIM' command.
"""
return self._get_parameter('DMVLIM', float)
@max_velocity.setter
def max_velocity(self, value):
self._set_parameter('DMVLIM', value, float)
@property
def motion_commanded(self):
""" Whether motion is commanded or not.
``bool``
Can't be set.
Notes
-----
It is the value of the first bit of the 'TAS' command.
"""
rsp = self.driver.send_command('TAS', immediate=True)
if self.driver.command_error(rsp) or len(rsp[4]) != 1 \
or rsp[4][0][0:4] != '*TAS':
return False
else:
return (rsp[4][0][4] == '1')
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/__init__.py | GeminiG6._get_parameter | python | def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str) | Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/__init__.py#L145-L219 | null | class GeminiG6(object):
""" Controller for a Parker Motion Gemini GV-6 or GT-6.
An object to connect to and control a Parker Motion Gemini GV-6 or
GT-6 servo/stepper motor drive already connected to with a
particular `driver`.
Parameters
----------
driver : driver
Connected instance of a class in ``drivers``. Use ``get_driver``
to load one. Is stored in the attribute ``driver``.
Raises
------
GeminiError
If the attached device is not a Gemini GV-6 or GT-6.
Attributes
----------
driver : driver
Driver for communicating to the drive.
energized : bool
denergize_on_kill : bool
encoder_resolution : int
electrical_pitch : float
max_velocity : float
motion_commanded : bool
See Also
--------
get_driver
"""
def __init__(self, driver):
#: Driver for communicating to the drive.
#:
#: driver
#:
#: A class from ``GeminiMotorDriver.drivers``. Can be loaded
#: using ``get_driver``.
#:
#: See Also
#: --------
#: get_driver
self.driver = driver
# Make sure that it is indeed a GV/T6, and throw an exception
# otherwise. It should respond to the 'TREV' command with 'TREV'
# echoed and '*TREV-GV6-L3E_D1.50_F1.00' where everything after
# the 'GV6' (possibly replaced with a 'GT6') part is model
# dependent.
response = self.driver.send_command('TREV', timeout=1.0,
immediate=True)
if re.search('^!TREV\r\\*TREV-G[VT]{1}6', response[1]) is None:
raise GeminiError('Not a valid Gemini GV-6 or GT-6 device.')
def _set_parameter(self, name, value, tp, timeout=1.0,
max_retries=2):
""" Sets the specified drive parameter.
Sets a parameter on the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to set. It is always the command to
set it when followed by the value.
value : bool, int, or float
Value to set the parameter to.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the last attempt to set the parameter was successful
(``True``) or not (``False`` meaning it had an error).
See Also
--------
_get_parameter : Get a parameter.
"""
# Return False if tp isn't one of the valid types.
if tp not in (bool, int, float):
return False
# Convert value to the string that the drive will expect. value
# must first be converted to the proper type before getting
# converted to str in the usual fasion. As bools need to be a
# '1' or a '0', it must be converted to int before going through
# str.
if tp == bool:
value_str = str(int(bool(value)))
elif tp == int:
value_str = str(int(value))
elif tp == float:
value_str = str(float(value))
# Immediately set the named parameter of the drive. The command
# is just the parameter name followed by the value string.
response = self.driver.send_command(name+value_str, \
timeout=timeout, immediate=True, max_retries=max_retries)
# Return whether the setting was successful or not.
return not self.driver.command_error(response)
def pause(self, max_retries=0):
""" Pauses the drive (execution of commands).
Causes the drive to pause execution of commands till it is
unpaused. Commands will be queued until it is unpaused. Motion
is not stopped.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to pause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last pause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
unpause : Unpause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('PS', timeout=1.0,
immediate=True, max_retries=max_retries)))
def unpause(self, max_retries=0):
""" Unpauses the drive.
Unpauses the drive. Commands queued while it is paused will then
be executed.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to unpause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last unpause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
pause : Pause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('C',
timeout=1.0, immediate=True, max_retries=max_retries)))
def stop(self, max_retries=0):
""" Stops motion.
The drive stops the motor.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last stop command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!S1'.
"""
return (not self.driver.command_error(
self.driver.send_command('S1',
timeout=1.0, immediate=True, max_retries=max_retries)))
def kill(self, max_retries=0):
""" Kills the drive.
The drive stops the motor and any running program. The motor
will de-energize depending on the state of
``denergize_on_kill``.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last kill command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!K'.
See Also
--------
denergize_on_kill : Controls whether the motor de-energizes
after the drive is killed or not.
"""
return (not self.driver.command_error(
self.driver.send_command('K',
timeout=1.0, immediate=True, max_retries=max_retries)))
def reset(self, max_retries=0):
""" Resets the drive.
Resets the drive, which is equivalent to a power cycling.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to reset the drive in the
case of errors.
Returns
-------
success : bool
Whether the last reset command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!RESET'.
"""
return (not self.driver.command_error(
self.driver.send_command('RESET',
timeout=10.0, immediate=True, max_retries=max_retries)))
def get_program(self, n, timeout=2.0, max_retries=2):
""" Get a program from the drive.
Gets program 'n' from the drive and returns its commands.
Parameters
----------
n : int
Which program to get.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the program. The
trailing 'END' is removed. Empty if there was an error.
Notes
-----
The command sent to the drive is '!TPROG PROGn'.
See Also
--------
set_program_profile : Sets a program or profile.
run_program_profile : Runs a program or profile.
"""
# Send the 'TPROG PROGn' command to read the program.
response = self.driver.send_command( \
'TPROG PROG' + str(int(n)), timeout=timeout, \
immediate=True, max_retries=max_retries)
# If there was an error, then return empty. Otherwise, return
# the response lines but strip the leading '*' first and the
# 'END' at the end of the list.
if self.driver.command_error(response) \
or len(response[4]) == 0:
return []
else:
if '*END' in response[4]:
response[4].remove('*END')
return [line[1:] for line in response[4]]
def set_program_profile(self, n, commands,
program_or_profile='program',
timeout=1.0, max_retries=0):
""" Sets a program/profile on the drive.
Sets program or profile 'n' on the drive to the sequence of
commands in 'commands'. If the existing program is identical, it
is not overwritten (can't check this for a profile). Returns
whether the program or profile was successfully set or not (if
the existing one is identical, it is considered a success).
Parameters
----------
n : int
Which program to set.
commands : list or tuple of strings
``list`` or ``tuple`` of commands to send to the drive. Each
command must be a string.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the program or profile was successfully set or not
(an identical program already existing on the drive is
considered a success).
Notes
-----
'commands' gets wrapped between ['DEL PROGn', 'DEF PROGn'] and
'END' or the equivalent profile ones.
See Also
--------
get_program : Gets a program.
run_program_profile : Runs a program or profile.
"""
# Grab the n'th program on the drive and strip commands. If we
# are doing a profile, None will be used as a placeholder.
if program_or_profile != 'profile':
current_program = self.get_program(n, timeout=timeout, \
max_retries=max_retries+2)
else:
current_program = None
stripped_commands = utilities.strip_commands(commands)
# If the two are identical and we are doing a program, then
# nothing needs to be done and the program is already set
# (return True). Otherwise, it needs to be overwritten. If there
# were no errors on the last command, then it was written
# successfully. Otherwise, the program or profile needs to be
# terminated and then deleted.
if current_program is not None \
and current_program == stripped_commands:
return True
else:
# Construct the End Of Responses for each command that will
# be sent. They are '\n' for deletion and ending, but are
# '\n- ' for the rest.
eor = ['\n'] + (['\n- '] * (1 + len(stripped_commands))) \
+ ['\n']
# The commands consist of a header that tells which program
# or profile to set, the stripped commands, followed by an
# 'END'.
if program_or_profile != 'profile':
header = ['DEL PROG'+str(int(n)),
'DEF PROG'+str(int(n))]
else:
header = ['DEL PROF'+str(int(n)),
'DEF PROF'+str(int(n))]
responses = self.driver.send_commands(\
header + stripped_commands + ['END'], \
timeout=timeout, max_retries=max_retries, eor=eor)
# Check to see if it was set successfully. If it was (the
# last command had no errors), return True. Otherwise, the
# program or profile needs to be ended and deleted before
# returning False.
if not self.driver.command_error(responses[-1]):
return True
else:
if program_or_profile != 'profile':
cmds = ['END', 'DEL PROG'+str(int(n))]
else:
cmds = ['END', 'DEL PROF'+str(int(n))]
self.driver.send_commands(cmds, timeout=timeout,
max_retries=max_retries+2)
return False
def run_program_profile(self, n, program_or_profile='program',
timeout=10.0):
""" Runs a program/profile on the drive.
Runs program or profile 'n' on the drive, grabs its output, and
processes the output. The response from the drive is broken down
into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response; which are all returned.
It is **VERY IMPORTANT** that 'timeout' is long enough for the
program to run if all the output from the drive is to be
collected.
Parameters
----------
n : int
Which program to get.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response for running a program (set to 1.0 for a profile
regardless of what is given). A negative value or ``None``
indicates that the an infinite timeout should be used.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
Notes
-----
Sends 'RUN PROGn' (program) or 'PRUN PROFn' (profile) as the
command to the drive. For a profile, the only output is that
command echoed back. For a program, it will echo back each
command in the program (preceeded by an '*' and followed by a
line feed as opposed to a carriage return).
See Also
--------
get_program : Gets a program.
set_program_profile : Sets a program or profile.
"""
if program_or_profile != 'profile':
return self.driver.send_command('RUN PROG' + str(int(n)), \
timeout=timeout, immediate=True, eor='*END\n')
else:
return self.driver.send_command( \
'PRUN PROF' + str(int(n)), timeout=1.0, immediate=True)
@property
def energized(self):
""" Energized state of the motor.
``bool`` with energized being ``True``.
Setting it sends an immediate command to the drive to energize
the motor.
Notes
-----
This uses the 'DRIVE' command.
"""
return self._get_parameter('DRIVE', bool)
@energized.setter
def energized(self, value):
self._set_parameter('DRIVE', value, bool)
@property
def denergize_on_kill(self):
""" De-energize motor when the drive is killed.
``bool`` with ``True`` meaning that whenever the drive is given
the kill signal, the motor will de-energize.
Setting it sends an immediate command to the drive to set it.
Notes
-----
This uses the 'KDRIVE' command.
See Also
--------
energized : Get/set the motor energized state.
kill : Kill the drive.
"""
return self._get_parameter('KDRIVE', bool)
@denergize_on_kill.setter
def denergize_on_kill(self, value):
self._set_parameter('KDRIVE', value, bool)
@property
def encoder_resolution(self):
""" Encoder/Resolver resolution.
``int`` with units counts/rev (servo) or counts/pitch (linear)
Setting it sends an immediate command to the drive to change the
encoder/resolver resolution.
Notes
-----
This uses the 'ERES' command.
"""
return self._get_parameter('ERES', int)
@encoder_resolution.setter
def encoder_resolution(self, value):
self._set_parameter('ERES', value, int)
self.reset()
@property
def electrical_pitch(self):
""" The motor's electrical pitch.
float with units of mm
It gives the spacing between two magnets (full magnetic cycle)
on a linear motor. Velocities and accelerations are in units of
pitches/s and pitches/s^2, so it is important.
Setting it sends an immediate command to the drive to change the
electrical pitch.
Notes
-----
This uses the 'DMEPIT' command.
"""
return self._get_parameter('DMEPIT', float)
@electrical_pitch.setter
def electrical_pitch(self, value):
self._set_parameter('DMEPIT', value, float)
self.reset()
@property
def max_velocity(self):
""" The motor's velocity limit.
``float`` in motor units
Notes
-----
This uses the 'DMVLIM' command.
"""
return self._get_parameter('DMVLIM', float)
@max_velocity.setter
def max_velocity(self, value):
self._set_parameter('DMVLIM', value, float)
@property
def motion_commanded(self):
""" Whether motion is commanded or not.
``bool``
Can't be set.
Notes
-----
It is the value of the first bit of the 'TAS' command.
"""
rsp = self.driver.send_command('TAS', immediate=True)
if self.driver.command_error(rsp) or len(rsp[4]) != 1 \
or rsp[4][0][0:4] != '*TAS':
return False
else:
return (rsp[4][0][4] == '1')
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/__init__.py | GeminiG6._set_parameter | python | def _set_parameter(self, name, value, tp, timeout=1.0,
max_retries=2):
# Return False if tp isn't one of the valid types.
if tp not in (bool, int, float):
return False
# Convert value to the string that the drive will expect. value
# must first be converted to the proper type before getting
# converted to str in the usual fasion. As bools need to be a
# '1' or a '0', it must be converted to int before going through
# str.
if tp == bool:
value_str = str(int(bool(value)))
elif tp == int:
value_str = str(int(value))
elif tp == float:
value_str = str(float(value))
# Immediately set the named parameter of the drive. The command
# is just the parameter name followed by the value string.
response = self.driver.send_command(name+value_str, \
timeout=timeout, immediate=True, max_retries=max_retries)
# Return whether the setting was successful or not.
return not self.driver.command_error(response) | Sets the specified drive parameter.
Sets a parameter on the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to set. It is always the command to
set it when followed by the value.
value : bool, int, or float
Value to set the parameter to.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the last attempt to set the parameter was successful
(``True``) or not (``False`` meaning it had an error).
See Also
--------
_get_parameter : Get a parameter. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/__init__.py#L221-L278 | null | class GeminiG6(object):
""" Controller for a Parker Motion Gemini GV-6 or GT-6.
An object to connect to and control a Parker Motion Gemini GV-6 or
GT-6 servo/stepper motor drive already connected to with a
particular `driver`.
Parameters
----------
driver : driver
Connected instance of a class in ``drivers``. Use ``get_driver``
to load one. Is stored in the attribute ``driver``.
Raises
------
GeminiError
If the attached device is not a Gemini GV-6 or GT-6.
Attributes
----------
driver : driver
Driver for communicating to the drive.
energized : bool
denergize_on_kill : bool
encoder_resolution : int
electrical_pitch : float
max_velocity : float
motion_commanded : bool
See Also
--------
get_driver
"""
def __init__(self, driver):
#: Driver for communicating to the drive.
#:
#: driver
#:
#: A class from ``GeminiMotorDriver.drivers``. Can be loaded
#: using ``get_driver``.
#:
#: See Also
#: --------
#: get_driver
self.driver = driver
# Make sure that it is indeed a GV/T6, and throw an exception
# otherwise. It should respond to the 'TREV' command with 'TREV'
# echoed and '*TREV-GV6-L3E_D1.50_F1.00' where everything after
# the 'GV6' (possibly replaced with a 'GT6') part is model
# dependent.
response = self.driver.send_command('TREV', timeout=1.0,
immediate=True)
if re.search('^!TREV\r\\*TREV-G[VT]{1}6', response[1]) is None:
raise GeminiError('Not a valid Gemini GV-6 or GT-6 device.')
def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str)
def pause(self, max_retries=0):
""" Pauses the drive (execution of commands).
Causes the drive to pause execution of commands till it is
unpaused. Commands will be queued until it is unpaused. Motion
is not stopped.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to pause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last pause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
unpause : Unpause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('PS', timeout=1.0,
immediate=True, max_retries=max_retries)))
def unpause(self, max_retries=0):
""" Unpauses the drive.
Unpauses the drive. Commands queued while it is paused will then
be executed.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to unpause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last unpause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
pause : Pause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('C',
timeout=1.0, immediate=True, max_retries=max_retries)))
def stop(self, max_retries=0):
""" Stops motion.
The drive stops the motor.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last stop command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!S1'.
"""
return (not self.driver.command_error(
self.driver.send_command('S1',
timeout=1.0, immediate=True, max_retries=max_retries)))
def kill(self, max_retries=0):
""" Kills the drive.
The drive stops the motor and any running program. The motor
will de-energize depending on the state of
``denergize_on_kill``.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last kill command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!K'.
See Also
--------
denergize_on_kill : Controls whether the motor de-energizes
after the drive is killed or not.
"""
return (not self.driver.command_error(
self.driver.send_command('K',
timeout=1.0, immediate=True, max_retries=max_retries)))
def reset(self, max_retries=0):
""" Resets the drive.
Resets the drive, which is equivalent to a power cycling.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to reset the drive in the
case of errors.
Returns
-------
success : bool
Whether the last reset command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!RESET'.
"""
return (not self.driver.command_error(
self.driver.send_command('RESET',
timeout=10.0, immediate=True, max_retries=max_retries)))
def get_program(self, n, timeout=2.0, max_retries=2):
""" Get a program from the drive.
Gets program 'n' from the drive and returns its commands.
Parameters
----------
n : int
Which program to get.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the program. The
trailing 'END' is removed. Empty if there was an error.
Notes
-----
The command sent to the drive is '!TPROG PROGn'.
See Also
--------
set_program_profile : Sets a program or profile.
run_program_profile : Runs a program or profile.
"""
# Send the 'TPROG PROGn' command to read the program.
response = self.driver.send_command( \
'TPROG PROG' + str(int(n)), timeout=timeout, \
immediate=True, max_retries=max_retries)
# If there was an error, then return empty. Otherwise, return
# the response lines but strip the leading '*' first and the
# 'END' at the end of the list.
if self.driver.command_error(response) \
or len(response[4]) == 0:
return []
else:
if '*END' in response[4]:
response[4].remove('*END')
return [line[1:] for line in response[4]]
def set_program_profile(self, n, commands,
program_or_profile='program',
timeout=1.0, max_retries=0):
""" Sets a program/profile on the drive.
Sets program or profile 'n' on the drive to the sequence of
commands in 'commands'. If the existing program is identical, it
is not overwritten (can't check this for a profile). Returns
whether the program or profile was successfully set or not (if
the existing one is identical, it is considered a success).
Parameters
----------
n : int
Which program to set.
commands : list or tuple of strings
``list`` or ``tuple`` of commands to send to the drive. Each
command must be a string.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the program or profile was successfully set or not
(an identical program already existing on the drive is
considered a success).
Notes
-----
'commands' gets wrapped between ['DEL PROGn', 'DEF PROGn'] and
'END' or the equivalent profile ones.
See Also
--------
get_program : Gets a program.
run_program_profile : Runs a program or profile.
"""
# Grab the n'th program on the drive and strip commands. If we
# are doing a profile, None will be used as a placeholder.
if program_or_profile != 'profile':
current_program = self.get_program(n, timeout=timeout, \
max_retries=max_retries+2)
else:
current_program = None
stripped_commands = utilities.strip_commands(commands)
# If the two are identical and we are doing a program, then
# nothing needs to be done and the program is already set
# (return True). Otherwise, it needs to be overwritten. If there
# were no errors on the last command, then it was written
# successfully. Otherwise, the program or profile needs to be
# terminated and then deleted.
if current_program is not None \
and current_program == stripped_commands:
return True
else:
# Construct the End Of Responses for each command that will
# be sent. They are '\n' for deletion and ending, but are
# '\n- ' for the rest.
eor = ['\n'] + (['\n- '] * (1 + len(stripped_commands))) \
+ ['\n']
# The commands consist of a header that tells which program
# or profile to set, the stripped commands, followed by an
# 'END'.
if program_or_profile != 'profile':
header = ['DEL PROG'+str(int(n)),
'DEF PROG'+str(int(n))]
else:
header = ['DEL PROF'+str(int(n)),
'DEF PROF'+str(int(n))]
responses = self.driver.send_commands(\
header + stripped_commands + ['END'], \
timeout=timeout, max_retries=max_retries, eor=eor)
# Check to see if it was set successfully. If it was (the
# last command had no errors), return True. Otherwise, the
# program or profile needs to be ended and deleted before
# returning False.
if not self.driver.command_error(responses[-1]):
return True
else:
if program_or_profile != 'profile':
cmds = ['END', 'DEL PROG'+str(int(n))]
else:
cmds = ['END', 'DEL PROF'+str(int(n))]
self.driver.send_commands(cmds, timeout=timeout,
max_retries=max_retries+2)
return False
def run_program_profile(self, n, program_or_profile='program',
timeout=10.0):
""" Runs a program/profile on the drive.
Runs program or profile 'n' on the drive, grabs its output, and
processes the output. The response from the drive is broken down
into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response; which are all returned.
It is **VERY IMPORTANT** that 'timeout' is long enough for the
program to run if all the output from the drive is to be
collected.
Parameters
----------
n : int
Which program to get.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response for running a program (set to 1.0 for a profile
regardless of what is given). A negative value or ``None``
indicates that the an infinite timeout should be used.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
Notes
-----
Sends 'RUN PROGn' (program) or 'PRUN PROFn' (profile) as the
command to the drive. For a profile, the only output is that
command echoed back. For a program, it will echo back each
command in the program (preceeded by an '*' and followed by a
line feed as opposed to a carriage return).
See Also
--------
get_program : Gets a program.
set_program_profile : Sets a program or profile.
"""
if program_or_profile != 'profile':
return self.driver.send_command('RUN PROG' + str(int(n)), \
timeout=timeout, immediate=True, eor='*END\n')
else:
return self.driver.send_command( \
'PRUN PROF' + str(int(n)), timeout=1.0, immediate=True)
@property
def energized(self):
""" Energized state of the motor.
``bool`` with energized being ``True``.
Setting it sends an immediate command to the drive to energize
the motor.
Notes
-----
This uses the 'DRIVE' command.
"""
return self._get_parameter('DRIVE', bool)
@energized.setter
def energized(self, value):
self._set_parameter('DRIVE', value, bool)
@property
def denergize_on_kill(self):
""" De-energize motor when the drive is killed.
``bool`` with ``True`` meaning that whenever the drive is given
the kill signal, the motor will de-energize.
Setting it sends an immediate command to the drive to set it.
Notes
-----
This uses the 'KDRIVE' command.
See Also
--------
energized : Get/set the motor energized state.
kill : Kill the drive.
"""
return self._get_parameter('KDRIVE', bool)
@denergize_on_kill.setter
def denergize_on_kill(self, value):
self._set_parameter('KDRIVE', value, bool)
@property
def encoder_resolution(self):
""" Encoder/Resolver resolution.
``int`` with units counts/rev (servo) or counts/pitch (linear)
Setting it sends an immediate command to the drive to change the
encoder/resolver resolution.
Notes
-----
This uses the 'ERES' command.
"""
return self._get_parameter('ERES', int)
@encoder_resolution.setter
def encoder_resolution(self, value):
self._set_parameter('ERES', value, int)
self.reset()
@property
def electrical_pitch(self):
""" The motor's electrical pitch.
float with units of mm
It gives the spacing between two magnets (full magnetic cycle)
on a linear motor. Velocities and accelerations are in units of
pitches/s and pitches/s^2, so it is important.
Setting it sends an immediate command to the drive to change the
electrical pitch.
Notes
-----
This uses the 'DMEPIT' command.
"""
return self._get_parameter('DMEPIT', float)
@electrical_pitch.setter
def electrical_pitch(self, value):
self._set_parameter('DMEPIT', value, float)
self.reset()
@property
def max_velocity(self):
""" The motor's velocity limit.
``float`` in motor units
Notes
-----
This uses the 'DMVLIM' command.
"""
return self._get_parameter('DMVLIM', float)
@max_velocity.setter
def max_velocity(self, value):
self._set_parameter('DMVLIM', value, float)
@property
def motion_commanded(self):
""" Whether motion is commanded or not.
``bool``
Can't be set.
Notes
-----
It is the value of the first bit of the 'TAS' command.
"""
rsp = self.driver.send_command('TAS', immediate=True)
if self.driver.command_error(rsp) or len(rsp[4]) != 1 \
or rsp[4][0][0:4] != '*TAS':
return False
else:
return (rsp[4][0][4] == '1')
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/__init__.py | GeminiG6.pause | python | def pause(self, max_retries=0):
return (not self.driver.command_error(
self.driver.send_command('PS', timeout=1.0,
immediate=True, max_retries=max_retries))) | Pauses the drive (execution of commands).
Causes the drive to pause execution of commands till it is
unpaused. Commands will be queued until it is unpaused. Motion
is not stopped.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to pause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last pause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
unpause : Unpause the drive. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/__init__.py#L280-L311 | null | class GeminiG6(object):
""" Controller for a Parker Motion Gemini GV-6 or GT-6.
An object to connect to and control a Parker Motion Gemini GV-6 or
GT-6 servo/stepper motor drive already connected to with a
particular `driver`.
Parameters
----------
driver : driver
Connected instance of a class in ``drivers``. Use ``get_driver``
to load one. Is stored in the attribute ``driver``.
Raises
------
GeminiError
If the attached device is not a Gemini GV-6 or GT-6.
Attributes
----------
driver : driver
Driver for communicating to the drive.
energized : bool
denergize_on_kill : bool
encoder_resolution : int
electrical_pitch : float
max_velocity : float
motion_commanded : bool
See Also
--------
get_driver
"""
def __init__(self, driver):
#: Driver for communicating to the drive.
#:
#: driver
#:
#: A class from ``GeminiMotorDriver.drivers``. Can be loaded
#: using ``get_driver``.
#:
#: See Also
#: --------
#: get_driver
self.driver = driver
# Make sure that it is indeed a GV/T6, and throw an exception
# otherwise. It should respond to the 'TREV' command with 'TREV'
# echoed and '*TREV-GV6-L3E_D1.50_F1.00' where everything after
# the 'GV6' (possibly replaced with a 'GT6') part is model
# dependent.
response = self.driver.send_command('TREV', timeout=1.0,
immediate=True)
if re.search('^!TREV\r\\*TREV-G[VT]{1}6', response[1]) is None:
raise GeminiError('Not a valid Gemini GV-6 or GT-6 device.')
def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str)
def _set_parameter(self, name, value, tp, timeout=1.0,
max_retries=2):
""" Sets the specified drive parameter.
Sets a parameter on the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to set. It is always the command to
set it when followed by the value.
value : bool, int, or float
Value to set the parameter to.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the last attempt to set the parameter was successful
(``True``) or not (``False`` meaning it had an error).
See Also
--------
_get_parameter : Get a parameter.
"""
# Return False if tp isn't one of the valid types.
if tp not in (bool, int, float):
return False
# Convert value to the string that the drive will expect. value
# must first be converted to the proper type before getting
# converted to str in the usual fasion. As bools need to be a
# '1' or a '0', it must be converted to int before going through
# str.
if tp == bool:
value_str = str(int(bool(value)))
elif tp == int:
value_str = str(int(value))
elif tp == float:
value_str = str(float(value))
# Immediately set the named parameter of the drive. The command
# is just the parameter name followed by the value string.
response = self.driver.send_command(name+value_str, \
timeout=timeout, immediate=True, max_retries=max_retries)
# Return whether the setting was successful or not.
return not self.driver.command_error(response)
def unpause(self, max_retries=0):
""" Unpauses the drive.
Unpauses the drive. Commands queued while it is paused will then
be executed.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to unpause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last unpause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
pause : Pause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('C',
timeout=1.0, immediate=True, max_retries=max_retries)))
def stop(self, max_retries=0):
""" Stops motion.
The drive stops the motor.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last stop command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!S1'.
"""
return (not self.driver.command_error(
self.driver.send_command('S1',
timeout=1.0, immediate=True, max_retries=max_retries)))
def kill(self, max_retries=0):
""" Kills the drive.
The drive stops the motor and any running program. The motor
will de-energize depending on the state of
``denergize_on_kill``.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last kill command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!K'.
See Also
--------
denergize_on_kill : Controls whether the motor de-energizes
after the drive is killed or not.
"""
return (not self.driver.command_error(
self.driver.send_command('K',
timeout=1.0, immediate=True, max_retries=max_retries)))
def reset(self, max_retries=0):
""" Resets the drive.
Resets the drive, which is equivalent to a power cycling.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to reset the drive in the
case of errors.
Returns
-------
success : bool
Whether the last reset command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!RESET'.
"""
return (not self.driver.command_error(
self.driver.send_command('RESET',
timeout=10.0, immediate=True, max_retries=max_retries)))
def get_program(self, n, timeout=2.0, max_retries=2):
""" Get a program from the drive.
Gets program 'n' from the drive and returns its commands.
Parameters
----------
n : int
Which program to get.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the program. The
trailing 'END' is removed. Empty if there was an error.
Notes
-----
The command sent to the drive is '!TPROG PROGn'.
See Also
--------
set_program_profile : Sets a program or profile.
run_program_profile : Runs a program or profile.
"""
# Send the 'TPROG PROGn' command to read the program.
response = self.driver.send_command( \
'TPROG PROG' + str(int(n)), timeout=timeout, \
immediate=True, max_retries=max_retries)
# If there was an error, then return empty. Otherwise, return
# the response lines but strip the leading '*' first and the
# 'END' at the end of the list.
if self.driver.command_error(response) \
or len(response[4]) == 0:
return []
else:
if '*END' in response[4]:
response[4].remove('*END')
return [line[1:] for line in response[4]]
def set_program_profile(self, n, commands,
program_or_profile='program',
timeout=1.0, max_retries=0):
""" Sets a program/profile on the drive.
Sets program or profile 'n' on the drive to the sequence of
commands in 'commands'. If the existing program is identical, it
is not overwritten (can't check this for a profile). Returns
whether the program or profile was successfully set or not (if
the existing one is identical, it is considered a success).
Parameters
----------
n : int
Which program to set.
commands : list or tuple of strings
``list`` or ``tuple`` of commands to send to the drive. Each
command must be a string.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the program or profile was successfully set or not
(an identical program already existing on the drive is
considered a success).
Notes
-----
'commands' gets wrapped between ['DEL PROGn', 'DEF PROGn'] and
'END' or the equivalent profile ones.
See Also
--------
get_program : Gets a program.
run_program_profile : Runs a program or profile.
"""
# Grab the n'th program on the drive and strip commands. If we
# are doing a profile, None will be used as a placeholder.
if program_or_profile != 'profile':
current_program = self.get_program(n, timeout=timeout, \
max_retries=max_retries+2)
else:
current_program = None
stripped_commands = utilities.strip_commands(commands)
# If the two are identical and we are doing a program, then
# nothing needs to be done and the program is already set
# (return True). Otherwise, it needs to be overwritten. If there
# were no errors on the last command, then it was written
# successfully. Otherwise, the program or profile needs to be
# terminated and then deleted.
if current_program is not None \
and current_program == stripped_commands:
return True
else:
# Construct the End Of Responses for each command that will
# be sent. They are '\n' for deletion and ending, but are
# '\n- ' for the rest.
eor = ['\n'] + (['\n- '] * (1 + len(stripped_commands))) \
+ ['\n']
# The commands consist of a header that tells which program
# or profile to set, the stripped commands, followed by an
# 'END'.
if program_or_profile != 'profile':
header = ['DEL PROG'+str(int(n)),
'DEF PROG'+str(int(n))]
else:
header = ['DEL PROF'+str(int(n)),
'DEF PROF'+str(int(n))]
responses = self.driver.send_commands(\
header + stripped_commands + ['END'], \
timeout=timeout, max_retries=max_retries, eor=eor)
# Check to see if it was set successfully. If it was (the
# last command had no errors), return True. Otherwise, the
# program or profile needs to be ended and deleted before
# returning False.
if not self.driver.command_error(responses[-1]):
return True
else:
if program_or_profile != 'profile':
cmds = ['END', 'DEL PROG'+str(int(n))]
else:
cmds = ['END', 'DEL PROF'+str(int(n))]
self.driver.send_commands(cmds, timeout=timeout,
max_retries=max_retries+2)
return False
def run_program_profile(self, n, program_or_profile='program',
timeout=10.0):
""" Runs a program/profile on the drive.
Runs program or profile 'n' on the drive, grabs its output, and
processes the output. The response from the drive is broken down
into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response; which are all returned.
It is **VERY IMPORTANT** that 'timeout' is long enough for the
program to run if all the output from the drive is to be
collected.
Parameters
----------
n : int
Which program to get.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response for running a program (set to 1.0 for a profile
regardless of what is given). A negative value or ``None``
indicates that the an infinite timeout should be used.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
Notes
-----
Sends 'RUN PROGn' (program) or 'PRUN PROFn' (profile) as the
command to the drive. For a profile, the only output is that
command echoed back. For a program, it will echo back each
command in the program (preceeded by an '*' and followed by a
line feed as opposed to a carriage return).
See Also
--------
get_program : Gets a program.
set_program_profile : Sets a program or profile.
"""
if program_or_profile != 'profile':
return self.driver.send_command('RUN PROG' + str(int(n)), \
timeout=timeout, immediate=True, eor='*END\n')
else:
return self.driver.send_command( \
'PRUN PROF' + str(int(n)), timeout=1.0, immediate=True)
@property
def energized(self):
""" Energized state of the motor.
``bool`` with energized being ``True``.
Setting it sends an immediate command to the drive to energize
the motor.
Notes
-----
This uses the 'DRIVE' command.
"""
return self._get_parameter('DRIVE', bool)
@energized.setter
def energized(self, value):
self._set_parameter('DRIVE', value, bool)
@property
def denergize_on_kill(self):
""" De-energize motor when the drive is killed.
``bool`` with ``True`` meaning that whenever the drive is given
the kill signal, the motor will de-energize.
Setting it sends an immediate command to the drive to set it.
Notes
-----
This uses the 'KDRIVE' command.
See Also
--------
energized : Get/set the motor energized state.
kill : Kill the drive.
"""
return self._get_parameter('KDRIVE', bool)
@denergize_on_kill.setter
def denergize_on_kill(self, value):
self._set_parameter('KDRIVE', value, bool)
@property
def encoder_resolution(self):
""" Encoder/Resolver resolution.
``int`` with units counts/rev (servo) or counts/pitch (linear)
Setting it sends an immediate command to the drive to change the
encoder/resolver resolution.
Notes
-----
This uses the 'ERES' command.
"""
return self._get_parameter('ERES', int)
@encoder_resolution.setter
def encoder_resolution(self, value):
self._set_parameter('ERES', value, int)
self.reset()
@property
def electrical_pitch(self):
""" The motor's electrical pitch.
float with units of mm
It gives the spacing between two magnets (full magnetic cycle)
on a linear motor. Velocities and accelerations are in units of
pitches/s and pitches/s^2, so it is important.
Setting it sends an immediate command to the drive to change the
electrical pitch.
Notes
-----
This uses the 'DMEPIT' command.
"""
return self._get_parameter('DMEPIT', float)
@electrical_pitch.setter
def electrical_pitch(self, value):
self._set_parameter('DMEPIT', value, float)
self.reset()
@property
def max_velocity(self):
""" The motor's velocity limit.
``float`` in motor units
Notes
-----
This uses the 'DMVLIM' command.
"""
return self._get_parameter('DMVLIM', float)
@max_velocity.setter
def max_velocity(self, value):
self._set_parameter('DMVLIM', value, float)
@property
def motion_commanded(self):
""" Whether motion is commanded or not.
``bool``
Can't be set.
Notes
-----
It is the value of the first bit of the 'TAS' command.
"""
rsp = self.driver.send_command('TAS', immediate=True)
if self.driver.command_error(rsp) or len(rsp[4]) != 1 \
or rsp[4][0][0:4] != '*TAS':
return False
else:
return (rsp[4][0][4] == '1')
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/__init__.py | GeminiG6.get_program | python | def get_program(self, n, timeout=2.0, max_retries=2):
# Send the 'TPROG PROGn' command to read the program.
response = self.driver.send_command( \
'TPROG PROG' + str(int(n)), timeout=timeout, \
immediate=True, max_retries=max_retries)
# If there was an error, then return empty. Otherwise, return
# the response lines but strip the leading '*' first and the
# 'END' at the end of the list.
if self.driver.command_error(response) \
or len(response[4]) == 0:
return []
else:
if '*END' in response[4]:
response[4].remove('*END')
return [line[1:] for line in response[4]] | Get a program from the drive.
Gets program 'n' from the drive and returns its commands.
Parameters
----------
n : int
Which program to get.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the program. The
trailing 'END' is removed. Empty if there was an error.
Notes
-----
The command sent to the drive is '!TPROG PROGn'.
See Also
--------
set_program_profile : Sets a program or profile.
run_program_profile : Runs a program or profile. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/__init__.py#L433-L480 | null | class GeminiG6(object):
""" Controller for a Parker Motion Gemini GV-6 or GT-6.
An object to connect to and control a Parker Motion Gemini GV-6 or
GT-6 servo/stepper motor drive already connected to with a
particular `driver`.
Parameters
----------
driver : driver
Connected instance of a class in ``drivers``. Use ``get_driver``
to load one. Is stored in the attribute ``driver``.
Raises
------
GeminiError
If the attached device is not a Gemini GV-6 or GT-6.
Attributes
----------
driver : driver
Driver for communicating to the drive.
energized : bool
denergize_on_kill : bool
encoder_resolution : int
electrical_pitch : float
max_velocity : float
motion_commanded : bool
See Also
--------
get_driver
"""
def __init__(self, driver):
#: Driver for communicating to the drive.
#:
#: driver
#:
#: A class from ``GeminiMotorDriver.drivers``. Can be loaded
#: using ``get_driver``.
#:
#: See Also
#: --------
#: get_driver
self.driver = driver
# Make sure that it is indeed a GV/T6, and throw an exception
# otherwise. It should respond to the 'TREV' command with 'TREV'
# echoed and '*TREV-GV6-L3E_D1.50_F1.00' where everything after
# the 'GV6' (possibly replaced with a 'GT6') part is model
# dependent.
response = self.driver.send_command('TREV', timeout=1.0,
immediate=True)
if re.search('^!TREV\r\\*TREV-G[VT]{1}6', response[1]) is None:
raise GeminiError('Not a valid Gemini GV-6 or GT-6 device.')
def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str)
def _set_parameter(self, name, value, tp, timeout=1.0,
max_retries=2):
""" Sets the specified drive parameter.
Sets a parameter on the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to set. It is always the command to
set it when followed by the value.
value : bool, int, or float
Value to set the parameter to.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the last attempt to set the parameter was successful
(``True``) or not (``False`` meaning it had an error).
See Also
--------
_get_parameter : Get a parameter.
"""
# Return False if tp isn't one of the valid types.
if tp not in (bool, int, float):
return False
# Convert value to the string that the drive will expect. value
# must first be converted to the proper type before getting
# converted to str in the usual fasion. As bools need to be a
# '1' or a '0', it must be converted to int before going through
# str.
if tp == bool:
value_str = str(int(bool(value)))
elif tp == int:
value_str = str(int(value))
elif tp == float:
value_str = str(float(value))
# Immediately set the named parameter of the drive. The command
# is just the parameter name followed by the value string.
response = self.driver.send_command(name+value_str, \
timeout=timeout, immediate=True, max_retries=max_retries)
# Return whether the setting was successful or not.
return not self.driver.command_error(response)
def pause(self, max_retries=0):
""" Pauses the drive (execution of commands).
Causes the drive to pause execution of commands till it is
unpaused. Commands will be queued until it is unpaused. Motion
is not stopped.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to pause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last pause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
unpause : Unpause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('PS', timeout=1.0,
immediate=True, max_retries=max_retries)))
def unpause(self, max_retries=0):
""" Unpauses the drive.
Unpauses the drive. Commands queued while it is paused will then
be executed.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to unpause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last unpause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
pause : Pause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('C',
timeout=1.0, immediate=True, max_retries=max_retries)))
def stop(self, max_retries=0):
""" Stops motion.
The drive stops the motor.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last stop command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!S1'.
"""
return (not self.driver.command_error(
self.driver.send_command('S1',
timeout=1.0, immediate=True, max_retries=max_retries)))
def kill(self, max_retries=0):
""" Kills the drive.
The drive stops the motor and any running program. The motor
will de-energize depending on the state of
``denergize_on_kill``.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last kill command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!K'.
See Also
--------
denergize_on_kill : Controls whether the motor de-energizes
after the drive is killed or not.
"""
return (not self.driver.command_error(
self.driver.send_command('K',
timeout=1.0, immediate=True, max_retries=max_retries)))
def reset(self, max_retries=0):
""" Resets the drive.
Resets the drive, which is equivalent to a power cycling.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to reset the drive in the
case of errors.
Returns
-------
success : bool
Whether the last reset command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!RESET'.
"""
return (not self.driver.command_error(
self.driver.send_command('RESET',
timeout=10.0, immediate=True, max_retries=max_retries)))
def set_program_profile(self, n, commands,
program_or_profile='program',
timeout=1.0, max_retries=0):
""" Sets a program/profile on the drive.
Sets program or profile 'n' on the drive to the sequence of
commands in 'commands'. If the existing program is identical, it
is not overwritten (can't check this for a profile). Returns
whether the program or profile was successfully set or not (if
the existing one is identical, it is considered a success).
Parameters
----------
n : int
Which program to set.
commands : list or tuple of strings
``list`` or ``tuple`` of commands to send to the drive. Each
command must be a string.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the program or profile was successfully set or not
(an identical program already existing on the drive is
considered a success).
Notes
-----
'commands' gets wrapped between ['DEL PROGn', 'DEF PROGn'] and
'END' or the equivalent profile ones.
See Also
--------
get_program : Gets a program.
run_program_profile : Runs a program or profile.
"""
# Grab the n'th program on the drive and strip commands. If we
# are doing a profile, None will be used as a placeholder.
if program_or_profile != 'profile':
current_program = self.get_program(n, timeout=timeout, \
max_retries=max_retries+2)
else:
current_program = None
stripped_commands = utilities.strip_commands(commands)
# If the two are identical and we are doing a program, then
# nothing needs to be done and the program is already set
# (return True). Otherwise, it needs to be overwritten. If there
# were no errors on the last command, then it was written
# successfully. Otherwise, the program or profile needs to be
# terminated and then deleted.
if current_program is not None \
and current_program == stripped_commands:
return True
else:
# Construct the End Of Responses for each command that will
# be sent. They are '\n' for deletion and ending, but are
# '\n- ' for the rest.
eor = ['\n'] + (['\n- '] * (1 + len(stripped_commands))) \
+ ['\n']
# The commands consist of a header that tells which program
# or profile to set, the stripped commands, followed by an
# 'END'.
if program_or_profile != 'profile':
header = ['DEL PROG'+str(int(n)),
'DEF PROG'+str(int(n))]
else:
header = ['DEL PROF'+str(int(n)),
'DEF PROF'+str(int(n))]
responses = self.driver.send_commands(\
header + stripped_commands + ['END'], \
timeout=timeout, max_retries=max_retries, eor=eor)
# Check to see if it was set successfully. If it was (the
# last command had no errors), return True. Otherwise, the
# program or profile needs to be ended and deleted before
# returning False.
if not self.driver.command_error(responses[-1]):
return True
else:
if program_or_profile != 'profile':
cmds = ['END', 'DEL PROG'+str(int(n))]
else:
cmds = ['END', 'DEL PROF'+str(int(n))]
self.driver.send_commands(cmds, timeout=timeout,
max_retries=max_retries+2)
return False
def run_program_profile(self, n, program_or_profile='program',
timeout=10.0):
""" Runs a program/profile on the drive.
Runs program or profile 'n' on the drive, grabs its output, and
processes the output. The response from the drive is broken down
into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response; which are all returned.
It is **VERY IMPORTANT** that 'timeout' is long enough for the
program to run if all the output from the drive is to be
collected.
Parameters
----------
n : int
Which program to get.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response for running a program (set to 1.0 for a profile
regardless of what is given). A negative value or ``None``
indicates that the an infinite timeout should be used.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
Notes
-----
Sends 'RUN PROGn' (program) or 'PRUN PROFn' (profile) as the
command to the drive. For a profile, the only output is that
command echoed back. For a program, it will echo back each
command in the program (preceeded by an '*' and followed by a
line feed as opposed to a carriage return).
See Also
--------
get_program : Gets a program.
set_program_profile : Sets a program or profile.
"""
if program_or_profile != 'profile':
return self.driver.send_command('RUN PROG' + str(int(n)), \
timeout=timeout, immediate=True, eor='*END\n')
else:
return self.driver.send_command( \
'PRUN PROF' + str(int(n)), timeout=1.0, immediate=True)
@property
def energized(self):
""" Energized state of the motor.
``bool`` with energized being ``True``.
Setting it sends an immediate command to the drive to energize
the motor.
Notes
-----
This uses the 'DRIVE' command.
"""
return self._get_parameter('DRIVE', bool)
@energized.setter
def energized(self, value):
self._set_parameter('DRIVE', value, bool)
@property
def denergize_on_kill(self):
""" De-energize motor when the drive is killed.
``bool`` with ``True`` meaning that whenever the drive is given
the kill signal, the motor will de-energize.
Setting it sends an immediate command to the drive to set it.
Notes
-----
This uses the 'KDRIVE' command.
See Also
--------
energized : Get/set the motor energized state.
kill : Kill the drive.
"""
return self._get_parameter('KDRIVE', bool)
@denergize_on_kill.setter
def denergize_on_kill(self, value):
self._set_parameter('KDRIVE', value, bool)
@property
def encoder_resolution(self):
""" Encoder/Resolver resolution.
``int`` with units counts/rev (servo) or counts/pitch (linear)
Setting it sends an immediate command to the drive to change the
encoder/resolver resolution.
Notes
-----
This uses the 'ERES' command.
"""
return self._get_parameter('ERES', int)
@encoder_resolution.setter
def encoder_resolution(self, value):
self._set_parameter('ERES', value, int)
self.reset()
@property
def electrical_pitch(self):
""" The motor's electrical pitch.
float with units of mm
It gives the spacing between two magnets (full magnetic cycle)
on a linear motor. Velocities and accelerations are in units of
pitches/s and pitches/s^2, so it is important.
Setting it sends an immediate command to the drive to change the
electrical pitch.
Notes
-----
This uses the 'DMEPIT' command.
"""
return self._get_parameter('DMEPIT', float)
@electrical_pitch.setter
def electrical_pitch(self, value):
self._set_parameter('DMEPIT', value, float)
self.reset()
@property
def max_velocity(self):
""" The motor's velocity limit.
``float`` in motor units
Notes
-----
This uses the 'DMVLIM' command.
"""
return self._get_parameter('DMVLIM', float)
@max_velocity.setter
def max_velocity(self, value):
self._set_parameter('DMVLIM', value, float)
@property
def motion_commanded(self):
""" Whether motion is commanded or not.
``bool``
Can't be set.
Notes
-----
It is the value of the first bit of the 'TAS' command.
"""
rsp = self.driver.send_command('TAS', immediate=True)
if self.driver.command_error(rsp) or len(rsp[4]) != 1 \
or rsp[4][0][0:4] != '*TAS':
return False
else:
return (rsp[4][0][4] == '1')
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/__init__.py | GeminiG6.set_program_profile | python | def set_program_profile(self, n, commands,
program_or_profile='program',
timeout=1.0, max_retries=0):
# Grab the n'th program on the drive and strip commands. If we
# are doing a profile, None will be used as a placeholder.
if program_or_profile != 'profile':
current_program = self.get_program(n, timeout=timeout, \
max_retries=max_retries+2)
else:
current_program = None
stripped_commands = utilities.strip_commands(commands)
# If the two are identical and we are doing a program, then
# nothing needs to be done and the program is already set
# (return True). Otherwise, it needs to be overwritten. If there
# were no errors on the last command, then it was written
# successfully. Otherwise, the program or profile needs to be
# terminated and then deleted.
if current_program is not None \
and current_program == stripped_commands:
return True
else:
# Construct the End Of Responses for each command that will
# be sent. They are '\n' for deletion and ending, but are
# '\n- ' for the rest.
eor = ['\n'] + (['\n- '] * (1 + len(stripped_commands))) \
+ ['\n']
# The commands consist of a header that tells which program
# or profile to set, the stripped commands, followed by an
# 'END'.
if program_or_profile != 'profile':
header = ['DEL PROG'+str(int(n)),
'DEF PROG'+str(int(n))]
else:
header = ['DEL PROF'+str(int(n)),
'DEF PROF'+str(int(n))]
responses = self.driver.send_commands(\
header + stripped_commands + ['END'], \
timeout=timeout, max_retries=max_retries, eor=eor)
# Check to see if it was set successfully. If it was (the
# last command had no errors), return True. Otherwise, the
# program or profile needs to be ended and deleted before
# returning False.
if not self.driver.command_error(responses[-1]):
return True
else:
if program_or_profile != 'profile':
cmds = ['END', 'DEL PROG'+str(int(n))]
else:
cmds = ['END', 'DEL PROF'+str(int(n))]
self.driver.send_commands(cmds, timeout=timeout,
max_retries=max_retries+2)
return False | Sets a program/profile on the drive.
Sets program or profile 'n' on the drive to the sequence of
commands in 'commands'. If the existing program is identical, it
is not overwritten (can't check this for a profile). Returns
whether the program or profile was successfully set or not (if
the existing one is identical, it is considered a success).
Parameters
----------
n : int
Which program to set.
commands : list or tuple of strings
``list`` or ``tuple`` of commands to send to the drive. Each
command must be a string.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the program or profile was successfully set or not
(an identical program already existing on the drive is
considered a success).
Notes
-----
'commands' gets wrapped between ['DEL PROGn', 'DEF PROGn'] and
'END' or the equivalent profile ones.
See Also
--------
get_program : Gets a program.
run_program_profile : Runs a program or profile. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/__init__.py#L482-L581 | [
"def strip_commands(commands):\n \"\"\" Strips a sequence of commands.\n\n Strips down the sequence of commands by removing comments and\n surrounding whitespace around each individual command and then\n removing blank commands.\n\n Parameters\n ----------\n commands : iterable of strings\n Iterable of commands to strip.\n\n Returns\n -------\n stripped_commands : list of str\n The stripped commands with blank ones removed.\n\n \"\"\"\n # Go through each command one by one, stripping it and adding it to\n # a growing list if it is not blank. Each command needs to be\n # converted to an str if it is a bytes.\n stripped_commands = []\n for v in commands:\n if isinstance(v, bytes):\n v = v.decode(errors='replace')\n v = v.split(';')[0].strip()\n if len(v) != 0:\n stripped_commands.append(v)\n return stripped_commands\n",
"def get_program(self, n, timeout=2.0, max_retries=2):\n \"\"\" Get a program from the drive.\n\n Gets program 'n' from the drive and returns its commands.\n\n Parameters\n ----------\n n : int\n Which program to get.\n timeout : number, optional\n Optional timeout in seconds to use when reading the\n response. A negative value or ``None`` indicates that the\n an infinite timeout should be used.\n max_retries : int, optional\n Maximum number of retries to do per command in the case of\n errors.\n\n Returns\n -------\n commands : list of str\n ``list`` of ``str`` commands making up the program. The\n trailing 'END' is removed. Empty if there was an error.\n\n Notes\n -----\n The command sent to the drive is '!TPROG PROGn'.\n\n See Also\n --------\n set_program_profile : Sets a program or profile.\n run_program_profile : Runs a program or profile.\n\n \"\"\"\n # Send the 'TPROG PROGn' command to read the program.\n response = self.driver.send_command( \\\n 'TPROG PROG' + str(int(n)), timeout=timeout, \\\n immediate=True, max_retries=max_retries)\n\n # If there was an error, then return empty. Otherwise, return\n # the response lines but strip the leading '*' first and the\n # 'END' at the end of the list.\n if self.driver.command_error(response) \\\n or len(response[4]) == 0:\n return []\n else:\n if '*END' in response[4]:\n response[4].remove('*END')\n return [line[1:] for line in response[4]]\n"
] | class GeminiG6(object):
""" Controller for a Parker Motion Gemini GV-6 or GT-6.
An object to connect to and control a Parker Motion Gemini GV-6 or
GT-6 servo/stepper motor drive already connected to with a
particular `driver`.
Parameters
----------
driver : driver
Connected instance of a class in ``drivers``. Use ``get_driver``
to load one. Is stored in the attribute ``driver``.
Raises
------
GeminiError
If the attached device is not a Gemini GV-6 or GT-6.
Attributes
----------
driver : driver
Driver for communicating to the drive.
energized : bool
denergize_on_kill : bool
encoder_resolution : int
electrical_pitch : float
max_velocity : float
motion_commanded : bool
See Also
--------
get_driver
"""
def __init__(self, driver):
#: Driver for communicating to the drive.
#:
#: driver
#:
#: A class from ``GeminiMotorDriver.drivers``. Can be loaded
#: using ``get_driver``.
#:
#: See Also
#: --------
#: get_driver
self.driver = driver
# Make sure that it is indeed a GV/T6, and throw an exception
# otherwise. It should respond to the 'TREV' command with 'TREV'
# echoed and '*TREV-GV6-L3E_D1.50_F1.00' where everything after
# the 'GV6' (possibly replaced with a 'GT6') part is model
# dependent.
response = self.driver.send_command('TREV', timeout=1.0,
immediate=True)
if re.search('^!TREV\r\\*TREV-G[VT]{1}6', response[1]) is None:
raise GeminiError('Not a valid Gemini GV-6 or GT-6 device.')
def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str)
def _set_parameter(self, name, value, tp, timeout=1.0,
max_retries=2):
""" Sets the specified drive parameter.
Sets a parameter on the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to set. It is always the command to
set it when followed by the value.
value : bool, int, or float
Value to set the parameter to.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the last attempt to set the parameter was successful
(``True``) or not (``False`` meaning it had an error).
See Also
--------
_get_parameter : Get a parameter.
"""
# Return False if tp isn't one of the valid types.
if tp not in (bool, int, float):
return False
# Convert value to the string that the drive will expect. value
# must first be converted to the proper type before getting
# converted to str in the usual fasion. As bools need to be a
# '1' or a '0', it must be converted to int before going through
# str.
if tp == bool:
value_str = str(int(bool(value)))
elif tp == int:
value_str = str(int(value))
elif tp == float:
value_str = str(float(value))
# Immediately set the named parameter of the drive. The command
# is just the parameter name followed by the value string.
response = self.driver.send_command(name+value_str, \
timeout=timeout, immediate=True, max_retries=max_retries)
# Return whether the setting was successful or not.
return not self.driver.command_error(response)
def pause(self, max_retries=0):
""" Pauses the drive (execution of commands).
Causes the drive to pause execution of commands till it is
unpaused. Commands will be queued until it is unpaused. Motion
is not stopped.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to pause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last pause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
unpause : Unpause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('PS', timeout=1.0,
immediate=True, max_retries=max_retries)))
def unpause(self, max_retries=0):
""" Unpauses the drive.
Unpauses the drive. Commands queued while it is paused will then
be executed.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to unpause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last unpause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
pause : Pause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('C',
timeout=1.0, immediate=True, max_retries=max_retries)))
def stop(self, max_retries=0):
""" Stops motion.
The drive stops the motor.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last stop command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!S1'.
"""
return (not self.driver.command_error(
self.driver.send_command('S1',
timeout=1.0, immediate=True, max_retries=max_retries)))
def kill(self, max_retries=0):
""" Kills the drive.
The drive stops the motor and any running program. The motor
will de-energize depending on the state of
``denergize_on_kill``.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last kill command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!K'.
See Also
--------
denergize_on_kill : Controls whether the motor de-energizes
after the drive is killed or not.
"""
return (not self.driver.command_error(
self.driver.send_command('K',
timeout=1.0, immediate=True, max_retries=max_retries)))
def reset(self, max_retries=0):
""" Resets the drive.
Resets the drive, which is equivalent to a power cycling.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to reset the drive in the
case of errors.
Returns
-------
success : bool
Whether the last reset command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!RESET'.
"""
return (not self.driver.command_error(
self.driver.send_command('RESET',
timeout=10.0, immediate=True, max_retries=max_retries)))
def get_program(self, n, timeout=2.0, max_retries=2):
""" Get a program from the drive.
Gets program 'n' from the drive and returns its commands.
Parameters
----------
n : int
Which program to get.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the program. The
trailing 'END' is removed. Empty if there was an error.
Notes
-----
The command sent to the drive is '!TPROG PROGn'.
See Also
--------
set_program_profile : Sets a program or profile.
run_program_profile : Runs a program or profile.
"""
# Send the 'TPROG PROGn' command to read the program.
response = self.driver.send_command( \
'TPROG PROG' + str(int(n)), timeout=timeout, \
immediate=True, max_retries=max_retries)
# If there was an error, then return empty. Otherwise, return
# the response lines but strip the leading '*' first and the
# 'END' at the end of the list.
if self.driver.command_error(response) \
or len(response[4]) == 0:
return []
else:
if '*END' in response[4]:
response[4].remove('*END')
return [line[1:] for line in response[4]]
def run_program_profile(self, n, program_or_profile='program',
timeout=10.0):
""" Runs a program/profile on the drive.
Runs program or profile 'n' on the drive, grabs its output, and
processes the output. The response from the drive is broken down
into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response; which are all returned.
It is **VERY IMPORTANT** that 'timeout' is long enough for the
program to run if all the output from the drive is to be
collected.
Parameters
----------
n : int
Which program to get.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response for running a program (set to 1.0 for a profile
regardless of what is given). A negative value or ``None``
indicates that the an infinite timeout should be used.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
Notes
-----
Sends 'RUN PROGn' (program) or 'PRUN PROFn' (profile) as the
command to the drive. For a profile, the only output is that
command echoed back. For a program, it will echo back each
command in the program (preceeded by an '*' and followed by a
line feed as opposed to a carriage return).
See Also
--------
get_program : Gets a program.
set_program_profile : Sets a program or profile.
"""
if program_or_profile != 'profile':
return self.driver.send_command('RUN PROG' + str(int(n)), \
timeout=timeout, immediate=True, eor='*END\n')
else:
return self.driver.send_command( \
'PRUN PROF' + str(int(n)), timeout=1.0, immediate=True)
@property
def energized(self):
""" Energized state of the motor.
``bool`` with energized being ``True``.
Setting it sends an immediate command to the drive to energize
the motor.
Notes
-----
This uses the 'DRIVE' command.
"""
return self._get_parameter('DRIVE', bool)
@energized.setter
def energized(self, value):
self._set_parameter('DRIVE', value, bool)
@property
def denergize_on_kill(self):
""" De-energize motor when the drive is killed.
``bool`` with ``True`` meaning that whenever the drive is given
the kill signal, the motor will de-energize.
Setting it sends an immediate command to the drive to set it.
Notes
-----
This uses the 'KDRIVE' command.
See Also
--------
energized : Get/set the motor energized state.
kill : Kill the drive.
"""
return self._get_parameter('KDRIVE', bool)
@denergize_on_kill.setter
def denergize_on_kill(self, value):
self._set_parameter('KDRIVE', value, bool)
@property
def encoder_resolution(self):
""" Encoder/Resolver resolution.
``int`` with units counts/rev (servo) or counts/pitch (linear)
Setting it sends an immediate command to the drive to change the
encoder/resolver resolution.
Notes
-----
This uses the 'ERES' command.
"""
return self._get_parameter('ERES', int)
@encoder_resolution.setter
def encoder_resolution(self, value):
self._set_parameter('ERES', value, int)
self.reset()
@property
def electrical_pitch(self):
""" The motor's electrical pitch.
float with units of mm
It gives the spacing between two magnets (full magnetic cycle)
on a linear motor. Velocities and accelerations are in units of
pitches/s and pitches/s^2, so it is important.
Setting it sends an immediate command to the drive to change the
electrical pitch.
Notes
-----
This uses the 'DMEPIT' command.
"""
return self._get_parameter('DMEPIT', float)
@electrical_pitch.setter
def electrical_pitch(self, value):
self._set_parameter('DMEPIT', value, float)
self.reset()
@property
def max_velocity(self):
""" The motor's velocity limit.
``float`` in motor units
Notes
-----
This uses the 'DMVLIM' command.
"""
return self._get_parameter('DMVLIM', float)
@max_velocity.setter
def max_velocity(self, value):
self._set_parameter('DMVLIM', value, float)
@property
def motion_commanded(self):
""" Whether motion is commanded or not.
``bool``
Can't be set.
Notes
-----
It is the value of the first bit of the 'TAS' command.
"""
rsp = self.driver.send_command('TAS', immediate=True)
if self.driver.command_error(rsp) or len(rsp[4]) != 1 \
or rsp[4][0][0:4] != '*TAS':
return False
else:
return (rsp[4][0][4] == '1')
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/__init__.py | GeminiG6.run_program_profile | python | def run_program_profile(self, n, program_or_profile='program',
timeout=10.0):
if program_or_profile != 'profile':
return self.driver.send_command('RUN PROG' + str(int(n)), \
timeout=timeout, immediate=True, eor='*END\n')
else:
return self.driver.send_command( \
'PRUN PROF' + str(int(n)), timeout=1.0, immediate=True) | Runs a program/profile on the drive.
Runs program or profile 'n' on the drive, grabs its output, and
processes the output. The response from the drive is broken down
into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response; which are all returned.
It is **VERY IMPORTANT** that 'timeout' is long enough for the
program to run if all the output from the drive is to be
collected.
Parameters
----------
n : int
Which program to get.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response for running a program (set to 1.0 for a profile
regardless of what is given). A negative value or ``None``
indicates that the an infinite timeout should be used.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
Notes
-----
Sends 'RUN PROGn' (program) or 'PRUN PROFn' (profile) as the
command to the drive. For a profile, the only output is that
command echoed back. For a program, it will echo back each
command in the program (preceeded by an '*' and followed by a
line feed as opposed to a carriage return).
See Also
--------
get_program : Gets a program.
set_program_profile : Sets a program or profile. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/__init__.py#L583-L639 | null | class GeminiG6(object):
""" Controller for a Parker Motion Gemini GV-6 or GT-6.
An object to connect to and control a Parker Motion Gemini GV-6 or
GT-6 servo/stepper motor drive already connected to with a
particular `driver`.
Parameters
----------
driver : driver
Connected instance of a class in ``drivers``. Use ``get_driver``
to load one. Is stored in the attribute ``driver``.
Raises
------
GeminiError
If the attached device is not a Gemini GV-6 or GT-6.
Attributes
----------
driver : driver
Driver for communicating to the drive.
energized : bool
denergize_on_kill : bool
encoder_resolution : int
electrical_pitch : float
max_velocity : float
motion_commanded : bool
See Also
--------
get_driver
"""
def __init__(self, driver):
#: Driver for communicating to the drive.
#:
#: driver
#:
#: A class from ``GeminiMotorDriver.drivers``. Can be loaded
#: using ``get_driver``.
#:
#: See Also
#: --------
#: get_driver
self.driver = driver
# Make sure that it is indeed a GV/T6, and throw an exception
# otherwise. It should respond to the 'TREV' command with 'TREV'
# echoed and '*TREV-GV6-L3E_D1.50_F1.00' where everything after
# the 'GV6' (possibly replaced with a 'GT6') part is model
# dependent.
response = self.driver.send_command('TREV', timeout=1.0,
immediate=True)
if re.search('^!TREV\r\\*TREV-G[VT]{1}6', response[1]) is None:
raise GeminiError('Not a valid Gemini GV-6 or GT-6 device.')
def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str)
def _set_parameter(self, name, value, tp, timeout=1.0,
max_retries=2):
""" Sets the specified drive parameter.
Sets a parameter on the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to set. It is always the command to
set it when followed by the value.
value : bool, int, or float
Value to set the parameter to.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the last attempt to set the parameter was successful
(``True``) or not (``False`` meaning it had an error).
See Also
--------
_get_parameter : Get a parameter.
"""
# Return False if tp isn't one of the valid types.
if tp not in (bool, int, float):
return False
# Convert value to the string that the drive will expect. value
# must first be converted to the proper type before getting
# converted to str in the usual fasion. As bools need to be a
# '1' or a '0', it must be converted to int before going through
# str.
if tp == bool:
value_str = str(int(bool(value)))
elif tp == int:
value_str = str(int(value))
elif tp == float:
value_str = str(float(value))
# Immediately set the named parameter of the drive. The command
# is just the parameter name followed by the value string.
response = self.driver.send_command(name+value_str, \
timeout=timeout, immediate=True, max_retries=max_retries)
# Return whether the setting was successful or not.
return not self.driver.command_error(response)
def pause(self, max_retries=0):
""" Pauses the drive (execution of commands).
Causes the drive to pause execution of commands till it is
unpaused. Commands will be queued until it is unpaused. Motion
is not stopped.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to pause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last pause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
unpause : Unpause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('PS', timeout=1.0,
immediate=True, max_retries=max_retries)))
def unpause(self, max_retries=0):
""" Unpauses the drive.
Unpauses the drive. Commands queued while it is paused will then
be executed.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to unpause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last unpause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
pause : Pause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('C',
timeout=1.0, immediate=True, max_retries=max_retries)))
def stop(self, max_retries=0):
""" Stops motion.
The drive stops the motor.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last stop command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!S1'.
"""
return (not self.driver.command_error(
self.driver.send_command('S1',
timeout=1.0, immediate=True, max_retries=max_retries)))
def kill(self, max_retries=0):
""" Kills the drive.
The drive stops the motor and any running program. The motor
will de-energize depending on the state of
``denergize_on_kill``.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last kill command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!K'.
See Also
--------
denergize_on_kill : Controls whether the motor de-energizes
after the drive is killed or not.
"""
return (not self.driver.command_error(
self.driver.send_command('K',
timeout=1.0, immediate=True, max_retries=max_retries)))
def reset(self, max_retries=0):
""" Resets the drive.
Resets the drive, which is equivalent to a power cycling.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to reset the drive in the
case of errors.
Returns
-------
success : bool
Whether the last reset command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!RESET'.
"""
return (not self.driver.command_error(
self.driver.send_command('RESET',
timeout=10.0, immediate=True, max_retries=max_retries)))
def get_program(self, n, timeout=2.0, max_retries=2):
""" Get a program from the drive.
Gets program 'n' from the drive and returns its commands.
Parameters
----------
n : int
Which program to get.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the program. The
trailing 'END' is removed. Empty if there was an error.
Notes
-----
The command sent to the drive is '!TPROG PROGn'.
See Also
--------
set_program_profile : Sets a program or profile.
run_program_profile : Runs a program or profile.
"""
# Send the 'TPROG PROGn' command to read the program.
response = self.driver.send_command( \
'TPROG PROG' + str(int(n)), timeout=timeout, \
immediate=True, max_retries=max_retries)
# If there was an error, then return empty. Otherwise, return
# the response lines but strip the leading '*' first and the
# 'END' at the end of the list.
if self.driver.command_error(response) \
or len(response[4]) == 0:
return []
else:
if '*END' in response[4]:
response[4].remove('*END')
return [line[1:] for line in response[4]]
def set_program_profile(self, n, commands,
program_or_profile='program',
timeout=1.0, max_retries=0):
""" Sets a program/profile on the drive.
Sets program or profile 'n' on the drive to the sequence of
commands in 'commands'. If the existing program is identical, it
is not overwritten (can't check this for a profile). Returns
whether the program or profile was successfully set or not (if
the existing one is identical, it is considered a success).
Parameters
----------
n : int
Which program to set.
commands : list or tuple of strings
``list`` or ``tuple`` of commands to send to the drive. Each
command must be a string.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the program or profile was successfully set or not
(an identical program already existing on the drive is
considered a success).
Notes
-----
'commands' gets wrapped between ['DEL PROGn', 'DEF PROGn'] and
'END' or the equivalent profile ones.
See Also
--------
get_program : Gets a program.
run_program_profile : Runs a program or profile.
"""
# Grab the n'th program on the drive and strip commands. If we
# are doing a profile, None will be used as a placeholder.
if program_or_profile != 'profile':
current_program = self.get_program(n, timeout=timeout, \
max_retries=max_retries+2)
else:
current_program = None
stripped_commands = utilities.strip_commands(commands)
# If the two are identical and we are doing a program, then
# nothing needs to be done and the program is already set
# (return True). Otherwise, it needs to be overwritten. If there
# were no errors on the last command, then it was written
# successfully. Otherwise, the program or profile needs to be
# terminated and then deleted.
if current_program is not None \
and current_program == stripped_commands:
return True
else:
# Construct the End Of Responses for each command that will
# be sent. They are '\n' for deletion and ending, but are
# '\n- ' for the rest.
eor = ['\n'] + (['\n- '] * (1 + len(stripped_commands))) \
+ ['\n']
# The commands consist of a header that tells which program
# or profile to set, the stripped commands, followed by an
# 'END'.
if program_or_profile != 'profile':
header = ['DEL PROG'+str(int(n)),
'DEF PROG'+str(int(n))]
else:
header = ['DEL PROF'+str(int(n)),
'DEF PROF'+str(int(n))]
responses = self.driver.send_commands(\
header + stripped_commands + ['END'], \
timeout=timeout, max_retries=max_retries, eor=eor)
# Check to see if it was set successfully. If it was (the
# last command had no errors), return True. Otherwise, the
# program or profile needs to be ended and deleted before
# returning False.
if not self.driver.command_error(responses[-1]):
return True
else:
if program_or_profile != 'profile':
cmds = ['END', 'DEL PROG'+str(int(n))]
else:
cmds = ['END', 'DEL PROF'+str(int(n))]
self.driver.send_commands(cmds, timeout=timeout,
max_retries=max_retries+2)
return False
@property
def energized(self):
""" Energized state of the motor.
``bool`` with energized being ``True``.
Setting it sends an immediate command to the drive to energize
the motor.
Notes
-----
This uses the 'DRIVE' command.
"""
return self._get_parameter('DRIVE', bool)
@energized.setter
def energized(self, value):
self._set_parameter('DRIVE', value, bool)
@property
def denergize_on_kill(self):
""" De-energize motor when the drive is killed.
``bool`` with ``True`` meaning that whenever the drive is given
the kill signal, the motor will de-energize.
Setting it sends an immediate command to the drive to set it.
Notes
-----
This uses the 'KDRIVE' command.
See Also
--------
energized : Get/set the motor energized state.
kill : Kill the drive.
"""
return self._get_parameter('KDRIVE', bool)
@denergize_on_kill.setter
def denergize_on_kill(self, value):
self._set_parameter('KDRIVE', value, bool)
@property
def encoder_resolution(self):
""" Encoder/Resolver resolution.
``int`` with units counts/rev (servo) or counts/pitch (linear)
Setting it sends an immediate command to the drive to change the
encoder/resolver resolution.
Notes
-----
This uses the 'ERES' command.
"""
return self._get_parameter('ERES', int)
@encoder_resolution.setter
def encoder_resolution(self, value):
self._set_parameter('ERES', value, int)
self.reset()
@property
def electrical_pitch(self):
""" The motor's electrical pitch.
float with units of mm
It gives the spacing between two magnets (full magnetic cycle)
on a linear motor. Velocities and accelerations are in units of
pitches/s and pitches/s^2, so it is important.
Setting it sends an immediate command to the drive to change the
electrical pitch.
Notes
-----
This uses the 'DMEPIT' command.
"""
return self._get_parameter('DMEPIT', float)
@electrical_pitch.setter
def electrical_pitch(self, value):
self._set_parameter('DMEPIT', value, float)
self.reset()
@property
def max_velocity(self):
""" The motor's velocity limit.
``float`` in motor units
Notes
-----
This uses the 'DMVLIM' command.
"""
return self._get_parameter('DMVLIM', float)
@max_velocity.setter
def max_velocity(self, value):
self._set_parameter('DMVLIM', value, float)
@property
def motion_commanded(self):
""" Whether motion is commanded or not.
``bool``
Can't be set.
Notes
-----
It is the value of the first bit of the 'TAS' command.
"""
rsp = self.driver.send_command('TAS', immediate=True)
if self.driver.command_error(rsp) or len(rsp[4]) != 1 \
or rsp[4][0][0:4] != '*TAS':
return False
else:
return (rsp[4][0][4] == '1')
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/__init__.py | GeminiG6.motion_commanded | python | def motion_commanded(self):
rsp = self.driver.send_command('TAS', immediate=True)
if self.driver.command_error(rsp) or len(rsp[4]) != 1 \
or rsp[4][0][0:4] != '*TAS':
return False
else:
return (rsp[4][0][4] == '1') | Whether motion is commanded or not.
``bool``
Can't be set.
Notes
-----
It is the value of the first bit of the 'TAS' command. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/__init__.py#L750-L767 | null | class GeminiG6(object):
""" Controller for a Parker Motion Gemini GV-6 or GT-6.
An object to connect to and control a Parker Motion Gemini GV-6 or
GT-6 servo/stepper motor drive already connected to with a
particular `driver`.
Parameters
----------
driver : driver
Connected instance of a class in ``drivers``. Use ``get_driver``
to load one. Is stored in the attribute ``driver``.
Raises
------
GeminiError
If the attached device is not a Gemini GV-6 or GT-6.
Attributes
----------
driver : driver
Driver for communicating to the drive.
energized : bool
denergize_on_kill : bool
encoder_resolution : int
electrical_pitch : float
max_velocity : float
motion_commanded : bool
See Also
--------
get_driver
"""
def __init__(self, driver):
#: Driver for communicating to the drive.
#:
#: driver
#:
#: A class from ``GeminiMotorDriver.drivers``. Can be loaded
#: using ``get_driver``.
#:
#: See Also
#: --------
#: get_driver
self.driver = driver
# Make sure that it is indeed a GV/T6, and throw an exception
# otherwise. It should respond to the 'TREV' command with 'TREV'
# echoed and '*TREV-GV6-L3E_D1.50_F1.00' where everything after
# the 'GV6' (possibly replaced with a 'GT6') part is model
# dependent.
response = self.driver.send_command('TREV', timeout=1.0,
immediate=True)
if re.search('^!TREV\r\\*TREV-G[VT]{1}6', response[1]) is None:
raise GeminiError('Not a valid Gemini GV-6 or GT-6 device.')
def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str)
def _set_parameter(self, name, value, tp, timeout=1.0,
max_retries=2):
""" Sets the specified drive parameter.
Sets a parameter on the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to set. It is always the command to
set it when followed by the value.
value : bool, int, or float
Value to set the parameter to.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the last attempt to set the parameter was successful
(``True``) or not (``False`` meaning it had an error).
See Also
--------
_get_parameter : Get a parameter.
"""
# Return False if tp isn't one of the valid types.
if tp not in (bool, int, float):
return False
# Convert value to the string that the drive will expect. value
# must first be converted to the proper type before getting
# converted to str in the usual fasion. As bools need to be a
# '1' or a '0', it must be converted to int before going through
# str.
if tp == bool:
value_str = str(int(bool(value)))
elif tp == int:
value_str = str(int(value))
elif tp == float:
value_str = str(float(value))
# Immediately set the named parameter of the drive. The command
# is just the parameter name followed by the value string.
response = self.driver.send_command(name+value_str, \
timeout=timeout, immediate=True, max_retries=max_retries)
# Return whether the setting was successful or not.
return not self.driver.command_error(response)
def pause(self, max_retries=0):
""" Pauses the drive (execution of commands).
Causes the drive to pause execution of commands till it is
unpaused. Commands will be queued until it is unpaused. Motion
is not stopped.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to pause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last pause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
unpause : Unpause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('PS', timeout=1.0,
immediate=True, max_retries=max_retries)))
def unpause(self, max_retries=0):
""" Unpauses the drive.
Unpauses the drive. Commands queued while it is paused will then
be executed.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to unpause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last unpause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
pause : Pause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('C',
timeout=1.0, immediate=True, max_retries=max_retries)))
def stop(self, max_retries=0):
""" Stops motion.
The drive stops the motor.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last stop command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!S1'.
"""
return (not self.driver.command_error(
self.driver.send_command('S1',
timeout=1.0, immediate=True, max_retries=max_retries)))
def kill(self, max_retries=0):
""" Kills the drive.
The drive stops the motor and any running program. The motor
will de-energize depending on the state of
``denergize_on_kill``.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to kill the drive in the
case of errors.
Returns
-------
success : bool
Whether the last kill command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!K'.
See Also
--------
denergize_on_kill : Controls whether the motor de-energizes
after the drive is killed or not.
"""
return (not self.driver.command_error(
self.driver.send_command('K',
timeout=1.0, immediate=True, max_retries=max_retries)))
def reset(self, max_retries=0):
""" Resets the drive.
Resets the drive, which is equivalent to a power cycling.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to reset the drive in the
case of errors.
Returns
-------
success : bool
Whether the last reset command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!RESET'.
"""
return (not self.driver.command_error(
self.driver.send_command('RESET',
timeout=10.0, immediate=True, max_retries=max_retries)))
def get_program(self, n, timeout=2.0, max_retries=2):
""" Get a program from the drive.
Gets program 'n' from the drive and returns its commands.
Parameters
----------
n : int
Which program to get.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the program. The
trailing 'END' is removed. Empty if there was an error.
Notes
-----
The command sent to the drive is '!TPROG PROGn'.
See Also
--------
set_program_profile : Sets a program or profile.
run_program_profile : Runs a program or profile.
"""
# Send the 'TPROG PROGn' command to read the program.
response = self.driver.send_command( \
'TPROG PROG' + str(int(n)), timeout=timeout, \
immediate=True, max_retries=max_retries)
# If there was an error, then return empty. Otherwise, return
# the response lines but strip the leading '*' first and the
# 'END' at the end of the list.
if self.driver.command_error(response) \
or len(response[4]) == 0:
return []
else:
if '*END' in response[4]:
response[4].remove('*END')
return [line[1:] for line in response[4]]
def set_program_profile(self, n, commands,
program_or_profile='program',
timeout=1.0, max_retries=0):
""" Sets a program/profile on the drive.
Sets program or profile 'n' on the drive to the sequence of
commands in 'commands'. If the existing program is identical, it
is not overwritten (can't check this for a profile). Returns
whether the program or profile was successfully set or not (if
the existing one is identical, it is considered a success).
Parameters
----------
n : int
Which program to set.
commands : list or tuple of strings
``list`` or ``tuple`` of commands to send to the drive. Each
command must be a string.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the program or profile was successfully set or not
(an identical program already existing on the drive is
considered a success).
Notes
-----
'commands' gets wrapped between ['DEL PROGn', 'DEF PROGn'] and
'END' or the equivalent profile ones.
See Also
--------
get_program : Gets a program.
run_program_profile : Runs a program or profile.
"""
# Grab the n'th program on the drive and strip commands. If we
# are doing a profile, None will be used as a placeholder.
if program_or_profile != 'profile':
current_program = self.get_program(n, timeout=timeout, \
max_retries=max_retries+2)
else:
current_program = None
stripped_commands = utilities.strip_commands(commands)
# If the two are identical and we are doing a program, then
# nothing needs to be done and the program is already set
# (return True). Otherwise, it needs to be overwritten. If there
# were no errors on the last command, then it was written
# successfully. Otherwise, the program or profile needs to be
# terminated and then deleted.
if current_program is not None \
and current_program == stripped_commands:
return True
else:
# Construct the End Of Responses for each command that will
# be sent. They are '\n' for deletion and ending, but are
# '\n- ' for the rest.
eor = ['\n'] + (['\n- '] * (1 + len(stripped_commands))) \
+ ['\n']
# The commands consist of a header that tells which program
# or profile to set, the stripped commands, followed by an
# 'END'.
if program_or_profile != 'profile':
header = ['DEL PROG'+str(int(n)),
'DEF PROG'+str(int(n))]
else:
header = ['DEL PROF'+str(int(n)),
'DEF PROF'+str(int(n))]
responses = self.driver.send_commands(\
header + stripped_commands + ['END'], \
timeout=timeout, max_retries=max_retries, eor=eor)
# Check to see if it was set successfully. If it was (the
# last command had no errors), return True. Otherwise, the
# program or profile needs to be ended and deleted before
# returning False.
if not self.driver.command_error(responses[-1]):
return True
else:
if program_or_profile != 'profile':
cmds = ['END', 'DEL PROG'+str(int(n))]
else:
cmds = ['END', 'DEL PROF'+str(int(n))]
self.driver.send_commands(cmds, timeout=timeout,
max_retries=max_retries+2)
return False
def run_program_profile(self, n, program_or_profile='program',
timeout=10.0):
""" Runs a program/profile on the drive.
Runs program or profile 'n' on the drive, grabs its output, and
processes the output. The response from the drive is broken down
into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response; which are all returned.
It is **VERY IMPORTANT** that 'timeout' is long enough for the
program to run if all the output from the drive is to be
collected.
Parameters
----------
n : int
Which program to get.
program_or_profile : {'program', 'profile'}, optional
Whether to read a program or a profile. Anything other than
these two values implies the default.
timeout : number, optional
Optional timeout in seconds to use when reading the
response for running a program (set to 1.0 for a profile
regardless of what is given). A negative value or ``None``
indicates that the an infinite timeout should be used.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
Notes
-----
Sends 'RUN PROGn' (program) or 'PRUN PROFn' (profile) as the
command to the drive. For a profile, the only output is that
command echoed back. For a program, it will echo back each
command in the program (preceeded by an '*' and followed by a
line feed as opposed to a carriage return).
See Also
--------
get_program : Gets a program.
set_program_profile : Sets a program or profile.
"""
if program_or_profile != 'profile':
return self.driver.send_command('RUN PROG' + str(int(n)), \
timeout=timeout, immediate=True, eor='*END\n')
else:
return self.driver.send_command( \
'PRUN PROF' + str(int(n)), timeout=1.0, immediate=True)
@property
def energized(self):
""" Energized state of the motor.
``bool`` with energized being ``True``.
Setting it sends an immediate command to the drive to energize
the motor.
Notes
-----
This uses the 'DRIVE' command.
"""
return self._get_parameter('DRIVE', bool)
@energized.setter
def energized(self, value):
self._set_parameter('DRIVE', value, bool)
@property
def denergize_on_kill(self):
""" De-energize motor when the drive is killed.
``bool`` with ``True`` meaning that whenever the drive is given
the kill signal, the motor will de-energize.
Setting it sends an immediate command to the drive to set it.
Notes
-----
This uses the 'KDRIVE' command.
See Also
--------
energized : Get/set the motor energized state.
kill : Kill the drive.
"""
return self._get_parameter('KDRIVE', bool)
@denergize_on_kill.setter
def denergize_on_kill(self, value):
self._set_parameter('KDRIVE', value, bool)
@property
def encoder_resolution(self):
""" Encoder/Resolver resolution.
``int`` with units counts/rev (servo) or counts/pitch (linear)
Setting it sends an immediate command to the drive to change the
encoder/resolver resolution.
Notes
-----
This uses the 'ERES' command.
"""
return self._get_parameter('ERES', int)
@encoder_resolution.setter
def encoder_resolution(self, value):
self._set_parameter('ERES', value, int)
self.reset()
@property
def electrical_pitch(self):
""" The motor's electrical pitch.
float with units of mm
It gives the spacing between two magnets (full magnetic cycle)
on a linear motor. Velocities and accelerations are in units of
pitches/s and pitches/s^2, so it is important.
Setting it sends an immediate command to the drive to change the
electrical pitch.
Notes
-----
This uses the 'DMEPIT' command.
"""
return self._get_parameter('DMEPIT', float)
@electrical_pitch.setter
def electrical_pitch(self, value):
self._set_parameter('DMEPIT', value, float)
self.reset()
@property
def max_velocity(self):
""" The motor's velocity limit.
``float`` in motor units
Notes
-----
This uses the 'DMVLIM' command.
"""
return self._get_parameter('DMVLIM', float)
@max_velocity.setter
def max_velocity(self, value):
self._set_parameter('DMVLIM', value, float)
@property
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/compilers/move_sequence.py | compile_sequence | python | def compile_sequence(cycles, program_or_profile='program',
unit_converter=None):
# If needed, cycles needs to be converted to motor units.
if unit_converter is None:
cv_cycles = cycles
else:
cv_cycles = convert_sequence_to_motor_units(cycles, \
unit_converter=unit_converter)
# Initially, we have no commands in our command list.
commands = []
# The A, AD, D, and V parameters of the previous motion should be
# kept track of because if they don't change from one motion to the
# next, the commands to set them don't need to be included. They
# will be started blank since there are no previous motions yet.
previous_motion = {'A': [], 'AD': [], 'D': [], 'V': []}
# Construct each cycle one by one.
for cycle in cv_cycles:
# If more than one iteration is being done, a loop needs to be
# setup. It will be either 'L' or 'PLOOP' with the number of
# iterations attached if it is a program or a profile
# respectively. Since it will be tough to keep track of what
# motion changed from the end of a loop to the beginning of it,
# it is easier to just forget all previous motion values and set
# them all at the beginning of the loop (clear previous_motion).
iterations = int(cycle['iterations'])
if iterations > 1:
previous_motion = {'A': [], 'AD': [], 'D': [], 'V': []}
if program_or_profile != 'profile':
commands.append('L' + str(iterations))
else:
commands.append('PLOOP' + str(iterations))
# Construct each individual move in the cycle.
for i in range(0, len(cycle['moves'])):
# Grab the motion indicated by the current move.
new_motion = cycle['moves'][i]
# If we are doing a profile, AD must be set explicitly
# to A if it is 0.
if program_or_profile == 'profile' \
and new_motion['AD'] == 0.0:
new_motion['AD'] = new_motion['A']
# Set A, AD, and V if they have changed.
for k in ('A', 'AD', 'V'):
if previous_motion[k] != new_motion[k]:
# Grab it and round it to 4 places after the decimal
# point because that is the most that is
# supported. Then, if it is an integer value,
# convert it to an integer because that is what the
# drive will send back if requested (makes
# comparisons easier). Then add the command.
val = round(float(new_motion[k]), 4)
if val == int(val):
val = int(val)
commands.append(k + str(val))
# If the sign of D has flipped, we just need to issue a 'D~'
# command. If the value has changed in another way, it needs
# to be reset.
if previous_motion['D'] != new_motion['D']:
if previous_motion['D'] == -new_motion['D']:
commands.append('D~')
else:
commands.append('D'
+ str(int(new_motion['D'])))
# Grab the amount of time that should be waited after the
# move is done.
wait_time = cycle['wait_times'][i]
# Give the motion command (GO or GOBUF), tell the drive to
# wait till the motor has stopped (a WAIT command if it is a
# program and a VF0 command if it is a profile), and make it
# wait the period of time wait_time (T and GOWHEN commands).
if program_or_profile != 'profile':
commands.append('GO1')
commands.append('WAIT(AS.1=b0)')
if wait_time != 0:
# The wait time needs to be rounded to 3 places
# after the decimal. If it is an integer, it should
# be converted to an int so that the drive will send
# back what we send (makes compairisons easier).
wait_time = round(float(wait_time), 3)
if wait_time == int(wait_time):
wait_time = int(wait_time)
commands.append('T' + str(wait_time))
else:
commands.append('VF0')
commands.append('GOBUF1')
if wait_time != 0:
commands.append('GOWHEN(T='
+ str(int(1000*wait_time))
+ ')')
# Before going onto the next move, previous_motion needs to
# be set to the one just done.
previous_motion = new_motion
# Done with all the moves of the cycle. If we are looping, the
# loop end needs to be put in.
if iterations > 1:
if program_or_profile != 'profile':
commands.append('LN')
else:
commands.append('PLN')
# Done constructing the command list.
return commands | Makes the command list for a move sequence.
Constructs the list of commands to execute the given sequence of
motion. Program/command line commands or profile commands can be
generated depending on the value of `program_or_profile` so that the
commands can be used to construct a program or profile later. Types
of motion supported (see Notes for how to specify) are moves from
one position to another (the motion will always come to a stop
before doing the next motion), waiting a given interval of time till
starting the next move, and looping over a sequence of moves.
Parameters
----------
cycles : iterable of dicts
The iterable of cycles of motion to do one after another. See
Notes for format.
program_or_profile : {'program', 'profile'}, optional
Whether program or profile motion commands should be used.
Anything other than these two values implies the default.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units. ``None`` indicates that
they are already in motor units.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the move sequence.
Notes
-----
`cycles` is an iterable of individual cycles of motion. Each cycle
is a ``dict`` that represents a sequence of moves that could
possibly be looped over. The field ``'iterations'`` gives how many
times the sequence of moves should be done (a value > 1 implies a
loop). Then the field ``'moves'`` is an iterable of the individual
moves. Each individual move is a ``dict`` with the acceleration
(``'A'``), deceleration (``'AD'`` with 0 meaning the value of the
acceleration is used), velocity (``'V'``), and the distance/position
(``'D'``). Back in the cycle, the field ``'wait_times'`` is an
iterable of numbers giving the time in seconds to wait after each
move before going onto the next.
See Also
--------
get_sequence_time
convert_sequence_to_motor_units
GeminiMotorDrive.utilities.UnitConverter
Examples
--------
Simple program style two motions with a pause in between.
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1, 0],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},
... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]
>>> compile_sequence(cycles)
['A100',
'AD0',
'V100',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'T1',
'A90',
'GO1',
'WAIT(AS.1=b0)']
The same motion but in profile style commands
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1, 0],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},
... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]
>>> compile_sequence(cycles, program_or_profile='profile')
['A100',
'AD100',
'V100',
'D-1000',
'VF0',
'GOBUF1',
'GOWHEN(T=1000)',
'A90',
'AD90',
'VF0',
'GOBUF1']
Another motion with a back and forth loop (100 iterations) in the
middle, done in program style commands.
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100}]},
... {'iterations':100, 'wait_times':[0, 0],
... 'moves':[{'A':50, 'AD':40, 'D':-1000, 'V':30},
... {'A':50, 'AD':40, 'D':1000, 'V':30}]},
... {'iterations':1, 'wait_times':[0],
... 'moves':[{'A':100, 'AD':0, 'D':1000, 'V':100}]}]
>>> compile_sequence(cycles)
['A100',
'AD0',
'V100',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'T1',
'L100',
'A50',
'AD40',
'V30',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'D~',
'GO1',
'WAIT(AS.1=b0)',
'LN',
'A100',
'AD0',
'V100',
'GO1',
'WAIT(AS.1=b0)'] | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/compilers/move_sequence.py#L28-L265 | [
"def convert_sequence_to_motor_units(cycles, unit_converter):\n \"\"\" Converts a move sequence to motor units.\n\n Converts a move sequence to motor units using the provied converter.\n\n Parameters\n ----------\n cycles : iterable of dicts\n The iterable of cycles of motion to do one after another. See\n ``compile_sequence`` for format.\n unit_converter : UnitConverter, optional\n ``GeminiMotorDrive.utilities.UnitConverter`` to use to convert\n the units in `cycles` to motor units.\n\n Returns\n -------\n motor_cycles : list of dicts\n A deep copy of `cycles` with all units converted to motor units.\n\n See Also\n --------\n compile_sequence\n GeminiMotorDrive.utilities.UnitConverter\n\n \"\"\"\n # Make a deep copy of cycles so that the conversions don't damage\n # the original one.\n cv_cycles = copy.deepcopy(cycles)\n\n # Go through each cycle and do the conversions.\n for cycle in cv_cycles:\n # Go through each of the moves and do the conversions.\n for move in cycle['moves']:\n move['A'] = unit_converter.to_motor_velocity_acceleration( \\\n move['A'])\n move['AD'] = \\\n unit_converter.to_motor_velocity_acceleration( \\\n move['AD'])\n move['V'] = unit_converter.to_motor_velocity_acceleration( \\\n move['V'])\n move['D'] = int(unit_converter.to_motor_distance(move['D']))\n\n # Now return the converted move sequence.\n return cv_cycles\n"
] | # Copyright 2014-2016 Freja Nordsiek
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for working with move sequences and compiling them to movement
commands, programs, and profiles for a Gemini drive.
Move sequences care described in ``compile_sequence`` along with a few
examples.
"""
import math
import copy
def get_sequence_time(cycles, unit_converter=None, eres=None):
""" Calculates the time the move sequence will take to complete.
Calculates the amount of time it will take to complete the given
move sequence. Types of motion supported are moves from one position
to another (the motion will always come to a stop before doing the
next motion), waiting a given interval of time till starting the
next move, and looping over a sequence of moves.
Parameters
----------
cycles : list of dicts
The ``list`` of cycles of motion to do one after another. See
``compile_sequence`` for format.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units. ``None`` indicates that
they are already in motor units.
eres : int
Encoder resolution. Only relevant if `unit_converter` is
``None``.
Returns
-------
time : float
Time the move sequence will take in seconds.
See Also
--------
compile_sequence
GeminiMotorDrive.utilities.UnitConverter
move_time
"""
# If we are doing unit conversion, then that is equivalent to motor
# units but with eres equal to one.
if unit_converter is not None:
eres = 1
# Starting with 0 time, steadily add the time of each movement.
tme = 0.0
# Go through each cycle and collect times.
for cycle in cycles:
# Add all the wait times.
tme += cycle['iterations']*sum(cycle['wait_times'])
# Add the time for each individual move.
for move in cycle['moves']:
tme += cycle['iterations'] \
* move_time(move, eres=eres)
# Done.
return tme
def move_time(move, eres):
""" Calculates the time it takes to do a move.
Calculates how long it will take to complete a move of the motor. It
is assumed that the motor will decerate to a stop for the end of the
move as opposed to keep moving at velocity.
Everything is in motor units which are encoder counts for distance,
pitches/s for velocity, and pitches/s^2 for acceleration.
Parameters
----------
move : dict
Contains the move parameters in its fields: acceleration ('A'),
deceleration ('AD' with 0 meaning the value of the acceleration
is used), velocity ('V'), and the distance/position ('D').
eres : int
Encoder resolution.
Returns
-------
time : float
Time the move will take in seconds.
See Also
--------
compile_sequence
get_sequence_time
"""
# Grab the move parameters. If the deceleration is given as zero,
# that means it has the same value as the acceleration. Distance is
# converted to the same units as the others by dividing by the
# encoder resolution. The absolute value of everything is taken for
# simplicity.
A = abs(move['A'])
AD = abs(move['AD'])
if AD == 0.0:
AD = A
V = abs(move['V'])
D = abs(move['D'])/eres
# Calculate the times it would take to accelerate from stop to V and
# decelerate to stop at rates A and AD respectively.
accel_times = [V/A, V/AD]
# Calculate the distances that would be moved in those times.
dists = [0.5*A*(accel_times[0]**2), 0.5*AD*(accel_times[1]**2)]
# If the sum of those dists is greater than D, then the velocity V
# is never reached. The way the time is calculated depends on which
# case it is.
if sum(dists) <= D:
# The time is just the sum of the acceleration times plus the
# remaining distance divided by V.
return (sum(accel_times) + (D-sum(dists))/V)
else:
# We need to find the time it takes for the acceleration path
# and deceleration path to meet and have the same speeds.
#
# (1) t = t_1 + t_2
# (2) A*t_1 = AD*t_2
# (3) D = 0.5*A*(t_1**2) + 0.5*AD*(t_2**2)
#
# Re-writing t_2 in terms of t_1 using (2)
# (4) t_2 = (A / AD) * t_1
#
# Putting that into (1) and (3)
# (4) t = (1 + (A / AD)) * t_1
# (5) D = 0.5*A*(1 + (A / AD)) * (t_1**2)
#
# Solving (5) for t_1,
# (6) t_1 = sqrt( 2*D / (A * (1 + (A / AD))))
#
# Putting that into (4),
# t = sqrt(2*D*(1 + (A / AD)) / A)
return math.sqrt(2*D * (1 + (A / AD)) / A)
def convert_sequence_to_motor_units(cycles, unit_converter):
""" Converts a move sequence to motor units.
Converts a move sequence to motor units using the provied converter.
Parameters
----------
cycles : iterable of dicts
The iterable of cycles of motion to do one after another. See
``compile_sequence`` for format.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units.
Returns
-------
motor_cycles : list of dicts
A deep copy of `cycles` with all units converted to motor units.
See Also
--------
compile_sequence
GeminiMotorDrive.utilities.UnitConverter
"""
# Make a deep copy of cycles so that the conversions don't damage
# the original one.
cv_cycles = copy.deepcopy(cycles)
# Go through each cycle and do the conversions.
for cycle in cv_cycles:
# Go through each of the moves and do the conversions.
for move in cycle['moves']:
move['A'] = unit_converter.to_motor_velocity_acceleration( \
move['A'])
move['AD'] = \
unit_converter.to_motor_velocity_acceleration( \
move['AD'])
move['V'] = unit_converter.to_motor_velocity_acceleration( \
move['V'])
move['D'] = int(unit_converter.to_motor_distance(move['D']))
# Now return the converted move sequence.
return cv_cycles
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/compilers/move_sequence.py | get_sequence_time | python | def get_sequence_time(cycles, unit_converter=None, eres=None):
# If we are doing unit conversion, then that is equivalent to motor
# units but with eres equal to one.
if unit_converter is not None:
eres = 1
# Starting with 0 time, steadily add the time of each movement.
tme = 0.0
# Go through each cycle and collect times.
for cycle in cycles:
# Add all the wait times.
tme += cycle['iterations']*sum(cycle['wait_times'])
# Add the time for each individual move.
for move in cycle['moves']:
tme += cycle['iterations'] \
* move_time(move, eres=eres)
# Done.
return tme | Calculates the time the move sequence will take to complete.
Calculates the amount of time it will take to complete the given
move sequence. Types of motion supported are moves from one position
to another (the motion will always come to a stop before doing the
next motion), waiting a given interval of time till starting the
next move, and looping over a sequence of moves.
Parameters
----------
cycles : list of dicts
The ``list`` of cycles of motion to do one after another. See
``compile_sequence`` for format.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units. ``None`` indicates that
they are already in motor units.
eres : int
Encoder resolution. Only relevant if `unit_converter` is
``None``.
Returns
-------
time : float
Time the move sequence will take in seconds.
See Also
--------
compile_sequence
GeminiMotorDrive.utilities.UnitConverter
move_time | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/compilers/move_sequence.py#L268-L318 | [
"def move_time(move, eres):\n \"\"\" Calculates the time it takes to do a move.\n\n Calculates how long it will take to complete a move of the motor. It\n is assumed that the motor will decerate to a stop for the end of the\n move as opposed to keep moving at velocity.\n\n Everything is in motor units which are encoder counts for distance,\n pitches/s for velocity, and pitches/s^2 for acceleration.\n\n Parameters\n ----------\n move : dict\n Contains the move parameters in its fields: acceleration ('A'),\n deceleration ('AD' with 0 meaning the value of the acceleration\n is used), velocity ('V'), and the distance/position ('D').\n eres : int\n Encoder resolution.\n\n Returns\n -------\n time : float\n Time the move will take in seconds.\n\n See Also\n --------\n compile_sequence\n get_sequence_time\n\n \"\"\"\n # Grab the move parameters. If the deceleration is given as zero,\n # that means it has the same value as the acceleration. Distance is\n # converted to the same units as the others by dividing by the\n # encoder resolution. The absolute value of everything is taken for\n # simplicity.\n A = abs(move['A'])\n AD = abs(move['AD'])\n if AD == 0.0:\n AD = A\n V = abs(move['V'])\n D = abs(move['D'])/eres\n\n # Calculate the times it would take to accelerate from stop to V and\n # decelerate to stop at rates A and AD respectively.\n accel_times = [V/A, V/AD]\n\n # Calculate the distances that would be moved in those times.\n dists = [0.5*A*(accel_times[0]**2), 0.5*AD*(accel_times[1]**2)]\n\n # If the sum of those dists is greater than D, then the velocity V\n # is never reached. The way the time is calculated depends on which\n # case it is.\n if sum(dists) <= D:\n # The time is just the sum of the acceleration times plus the\n # remaining distance divided by V.\n return (sum(accel_times) + (D-sum(dists))/V)\n else:\n # We need to find the time it takes for the acceleration path\n # and deceleration path to meet and have the same speeds.\n #\n # (1) t = t_1 + t_2\n # (2) A*t_1 = AD*t_2\n # (3) D = 0.5*A*(t_1**2) + 0.5*AD*(t_2**2)\n #\n # Re-writing t_2 in terms of t_1 using (2)\n # (4) t_2 = (A / AD) * t_1\n #\n # Putting that into (1) and (3)\n # (4) t = (1 + (A / AD)) * t_1\n # (5) D = 0.5*A*(1 + (A / AD)) * (t_1**2)\n #\n # Solving (5) for t_1,\n # (6) t_1 = sqrt( 2*D / (A * (1 + (A / AD))))\n #\n # Putting that into (4),\n # t = sqrt(2*D*(1 + (A / AD)) / A)\n return math.sqrt(2*D * (1 + (A / AD)) / A)\n"
] | # Copyright 2014-2016 Freja Nordsiek
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for working with move sequences and compiling them to movement
commands, programs, and profiles for a Gemini drive.
Move sequences care described in ``compile_sequence`` along with a few
examples.
"""
import math
import copy
def compile_sequence(cycles, program_or_profile='program',
unit_converter=None):
""" Makes the command list for a move sequence.
Constructs the list of commands to execute the given sequence of
motion. Program/command line commands or profile commands can be
generated depending on the value of `program_or_profile` so that the
commands can be used to construct a program or profile later. Types
of motion supported (see Notes for how to specify) are moves from
one position to another (the motion will always come to a stop
before doing the next motion), waiting a given interval of time till
starting the next move, and looping over a sequence of moves.
Parameters
----------
cycles : iterable of dicts
The iterable of cycles of motion to do one after another. See
Notes for format.
program_or_profile : {'program', 'profile'}, optional
Whether program or profile motion commands should be used.
Anything other than these two values implies the default.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units. ``None`` indicates that
they are already in motor units.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the move sequence.
Notes
-----
`cycles` is an iterable of individual cycles of motion. Each cycle
is a ``dict`` that represents a sequence of moves that could
possibly be looped over. The field ``'iterations'`` gives how many
times the sequence of moves should be done (a value > 1 implies a
loop). Then the field ``'moves'`` is an iterable of the individual
moves. Each individual move is a ``dict`` with the acceleration
(``'A'``), deceleration (``'AD'`` with 0 meaning the value of the
acceleration is used), velocity (``'V'``), and the distance/position
(``'D'``). Back in the cycle, the field ``'wait_times'`` is an
iterable of numbers giving the time in seconds to wait after each
move before going onto the next.
See Also
--------
get_sequence_time
convert_sequence_to_motor_units
GeminiMotorDrive.utilities.UnitConverter
Examples
--------
Simple program style two motions with a pause in between.
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1, 0],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},
... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]
>>> compile_sequence(cycles)
['A100',
'AD0',
'V100',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'T1',
'A90',
'GO1',
'WAIT(AS.1=b0)']
The same motion but in profile style commands
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1, 0],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},
... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]
>>> compile_sequence(cycles, program_or_profile='profile')
['A100',
'AD100',
'V100',
'D-1000',
'VF0',
'GOBUF1',
'GOWHEN(T=1000)',
'A90',
'AD90',
'VF0',
'GOBUF1']
Another motion with a back and forth loop (100 iterations) in the
middle, done in program style commands.
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100}]},
... {'iterations':100, 'wait_times':[0, 0],
... 'moves':[{'A':50, 'AD':40, 'D':-1000, 'V':30},
... {'A':50, 'AD':40, 'D':1000, 'V':30}]},
... {'iterations':1, 'wait_times':[0],
... 'moves':[{'A':100, 'AD':0, 'D':1000, 'V':100}]}]
>>> compile_sequence(cycles)
['A100',
'AD0',
'V100',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'T1',
'L100',
'A50',
'AD40',
'V30',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'D~',
'GO1',
'WAIT(AS.1=b0)',
'LN',
'A100',
'AD0',
'V100',
'GO1',
'WAIT(AS.1=b0)']
"""
# If needed, cycles needs to be converted to motor units.
if unit_converter is None:
cv_cycles = cycles
else:
cv_cycles = convert_sequence_to_motor_units(cycles, \
unit_converter=unit_converter)
# Initially, we have no commands in our command list.
commands = []
# The A, AD, D, and V parameters of the previous motion should be
# kept track of because if they don't change from one motion to the
# next, the commands to set them don't need to be included. They
# will be started blank since there are no previous motions yet.
previous_motion = {'A': [], 'AD': [], 'D': [], 'V': []}
# Construct each cycle one by one.
for cycle in cv_cycles:
# If more than one iteration is being done, a loop needs to be
# setup. It will be either 'L' or 'PLOOP' with the number of
# iterations attached if it is a program or a profile
# respectively. Since it will be tough to keep track of what
# motion changed from the end of a loop to the beginning of it,
# it is easier to just forget all previous motion values and set
# them all at the beginning of the loop (clear previous_motion).
iterations = int(cycle['iterations'])
if iterations > 1:
previous_motion = {'A': [], 'AD': [], 'D': [], 'V': []}
if program_or_profile != 'profile':
commands.append('L' + str(iterations))
else:
commands.append('PLOOP' + str(iterations))
# Construct each individual move in the cycle.
for i in range(0, len(cycle['moves'])):
# Grab the motion indicated by the current move.
new_motion = cycle['moves'][i]
# If we are doing a profile, AD must be set explicitly
# to A if it is 0.
if program_or_profile == 'profile' \
and new_motion['AD'] == 0.0:
new_motion['AD'] = new_motion['A']
# Set A, AD, and V if they have changed.
for k in ('A', 'AD', 'V'):
if previous_motion[k] != new_motion[k]:
# Grab it and round it to 4 places after the decimal
# point because that is the most that is
# supported. Then, if it is an integer value,
# convert it to an integer because that is what the
# drive will send back if requested (makes
# comparisons easier). Then add the command.
val = round(float(new_motion[k]), 4)
if val == int(val):
val = int(val)
commands.append(k + str(val))
# If the sign of D has flipped, we just need to issue a 'D~'
# command. If the value has changed in another way, it needs
# to be reset.
if previous_motion['D'] != new_motion['D']:
if previous_motion['D'] == -new_motion['D']:
commands.append('D~')
else:
commands.append('D'
+ str(int(new_motion['D'])))
# Grab the amount of time that should be waited after the
# move is done.
wait_time = cycle['wait_times'][i]
# Give the motion command (GO or GOBUF), tell the drive to
# wait till the motor has stopped (a WAIT command if it is a
# program and a VF0 command if it is a profile), and make it
# wait the period of time wait_time (T and GOWHEN commands).
if program_or_profile != 'profile':
commands.append('GO1')
commands.append('WAIT(AS.1=b0)')
if wait_time != 0:
# The wait time needs to be rounded to 3 places
# after the decimal. If it is an integer, it should
# be converted to an int so that the drive will send
# back what we send (makes compairisons easier).
wait_time = round(float(wait_time), 3)
if wait_time == int(wait_time):
wait_time = int(wait_time)
commands.append('T' + str(wait_time))
else:
commands.append('VF0')
commands.append('GOBUF1')
if wait_time != 0:
commands.append('GOWHEN(T='
+ str(int(1000*wait_time))
+ ')')
# Before going onto the next move, previous_motion needs to
# be set to the one just done.
previous_motion = new_motion
# Done with all the moves of the cycle. If we are looping, the
# loop end needs to be put in.
if iterations > 1:
if program_or_profile != 'profile':
commands.append('LN')
else:
commands.append('PLN')
# Done constructing the command list.
return commands
def move_time(move, eres):
""" Calculates the time it takes to do a move.
Calculates how long it will take to complete a move of the motor. It
is assumed that the motor will decerate to a stop for the end of the
move as opposed to keep moving at velocity.
Everything is in motor units which are encoder counts for distance,
pitches/s for velocity, and pitches/s^2 for acceleration.
Parameters
----------
move : dict
Contains the move parameters in its fields: acceleration ('A'),
deceleration ('AD' with 0 meaning the value of the acceleration
is used), velocity ('V'), and the distance/position ('D').
eres : int
Encoder resolution.
Returns
-------
time : float
Time the move will take in seconds.
See Also
--------
compile_sequence
get_sequence_time
"""
# Grab the move parameters. If the deceleration is given as zero,
# that means it has the same value as the acceleration. Distance is
# converted to the same units as the others by dividing by the
# encoder resolution. The absolute value of everything is taken for
# simplicity.
A = abs(move['A'])
AD = abs(move['AD'])
if AD == 0.0:
AD = A
V = abs(move['V'])
D = abs(move['D'])/eres
# Calculate the times it would take to accelerate from stop to V and
# decelerate to stop at rates A and AD respectively.
accel_times = [V/A, V/AD]
# Calculate the distances that would be moved in those times.
dists = [0.5*A*(accel_times[0]**2), 0.5*AD*(accel_times[1]**2)]
# If the sum of those dists is greater than D, then the velocity V
# is never reached. The way the time is calculated depends on which
# case it is.
if sum(dists) <= D:
# The time is just the sum of the acceleration times plus the
# remaining distance divided by V.
return (sum(accel_times) + (D-sum(dists))/V)
else:
# We need to find the time it takes for the acceleration path
# and deceleration path to meet and have the same speeds.
#
# (1) t = t_1 + t_2
# (2) A*t_1 = AD*t_2
# (3) D = 0.5*A*(t_1**2) + 0.5*AD*(t_2**2)
#
# Re-writing t_2 in terms of t_1 using (2)
# (4) t_2 = (A / AD) * t_1
#
# Putting that into (1) and (3)
# (4) t = (1 + (A / AD)) * t_1
# (5) D = 0.5*A*(1 + (A / AD)) * (t_1**2)
#
# Solving (5) for t_1,
# (6) t_1 = sqrt( 2*D / (A * (1 + (A / AD))))
#
# Putting that into (4),
# t = sqrt(2*D*(1 + (A / AD)) / A)
return math.sqrt(2*D * (1 + (A / AD)) / A)
def convert_sequence_to_motor_units(cycles, unit_converter):
""" Converts a move sequence to motor units.
Converts a move sequence to motor units using the provied converter.
Parameters
----------
cycles : iterable of dicts
The iterable of cycles of motion to do one after another. See
``compile_sequence`` for format.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units.
Returns
-------
motor_cycles : list of dicts
A deep copy of `cycles` with all units converted to motor units.
See Also
--------
compile_sequence
GeminiMotorDrive.utilities.UnitConverter
"""
# Make a deep copy of cycles so that the conversions don't damage
# the original one.
cv_cycles = copy.deepcopy(cycles)
# Go through each cycle and do the conversions.
for cycle in cv_cycles:
# Go through each of the moves and do the conversions.
for move in cycle['moves']:
move['A'] = unit_converter.to_motor_velocity_acceleration( \
move['A'])
move['AD'] = \
unit_converter.to_motor_velocity_acceleration( \
move['AD'])
move['V'] = unit_converter.to_motor_velocity_acceleration( \
move['V'])
move['D'] = int(unit_converter.to_motor_distance(move['D']))
# Now return the converted move sequence.
return cv_cycles
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/compilers/move_sequence.py | move_time | python | def move_time(move, eres):
# Grab the move parameters. If the deceleration is given as zero,
# that means it has the same value as the acceleration. Distance is
# converted to the same units as the others by dividing by the
# encoder resolution. The absolute value of everything is taken for
# simplicity.
A = abs(move['A'])
AD = abs(move['AD'])
if AD == 0.0:
AD = A
V = abs(move['V'])
D = abs(move['D'])/eres
# Calculate the times it would take to accelerate from stop to V and
# decelerate to stop at rates A and AD respectively.
accel_times = [V/A, V/AD]
# Calculate the distances that would be moved in those times.
dists = [0.5*A*(accel_times[0]**2), 0.5*AD*(accel_times[1]**2)]
# If the sum of those dists is greater than D, then the velocity V
# is never reached. The way the time is calculated depends on which
# case it is.
if sum(dists) <= D:
# The time is just the sum of the acceleration times plus the
# remaining distance divided by V.
return (sum(accel_times) + (D-sum(dists))/V)
else:
# We need to find the time it takes for the acceleration path
# and deceleration path to meet and have the same speeds.
#
# (1) t = t_1 + t_2
# (2) A*t_1 = AD*t_2
# (3) D = 0.5*A*(t_1**2) + 0.5*AD*(t_2**2)
#
# Re-writing t_2 in terms of t_1 using (2)
# (4) t_2 = (A / AD) * t_1
#
# Putting that into (1) and (3)
# (4) t = (1 + (A / AD)) * t_1
# (5) D = 0.5*A*(1 + (A / AD)) * (t_1**2)
#
# Solving (5) for t_1,
# (6) t_1 = sqrt( 2*D / (A * (1 + (A / AD))))
#
# Putting that into (4),
# t = sqrt(2*D*(1 + (A / AD)) / A)
return math.sqrt(2*D * (1 + (A / AD)) / A) | Calculates the time it takes to do a move.
Calculates how long it will take to complete a move of the motor. It
is assumed that the motor will decerate to a stop for the end of the
move as opposed to keep moving at velocity.
Everything is in motor units which are encoder counts for distance,
pitches/s for velocity, and pitches/s^2 for acceleration.
Parameters
----------
move : dict
Contains the move parameters in its fields: acceleration ('A'),
deceleration ('AD' with 0 meaning the value of the acceleration
is used), velocity ('V'), and the distance/position ('D').
eres : int
Encoder resolution.
Returns
-------
time : float
Time the move will take in seconds.
See Also
--------
compile_sequence
get_sequence_time | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/compilers/move_sequence.py#L321-L397 | null | # Copyright 2014-2016 Freja Nordsiek
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for working with move sequences and compiling them to movement
commands, programs, and profiles for a Gemini drive.
Move sequences care described in ``compile_sequence`` along with a few
examples.
"""
import math
import copy
def compile_sequence(cycles, program_or_profile='program',
unit_converter=None):
""" Makes the command list for a move sequence.
Constructs the list of commands to execute the given sequence of
motion. Program/command line commands or profile commands can be
generated depending on the value of `program_or_profile` so that the
commands can be used to construct a program or profile later. Types
of motion supported (see Notes for how to specify) are moves from
one position to another (the motion will always come to a stop
before doing the next motion), waiting a given interval of time till
starting the next move, and looping over a sequence of moves.
Parameters
----------
cycles : iterable of dicts
The iterable of cycles of motion to do one after another. See
Notes for format.
program_or_profile : {'program', 'profile'}, optional
Whether program or profile motion commands should be used.
Anything other than these two values implies the default.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units. ``None`` indicates that
they are already in motor units.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the move sequence.
Notes
-----
`cycles` is an iterable of individual cycles of motion. Each cycle
is a ``dict`` that represents a sequence of moves that could
possibly be looped over. The field ``'iterations'`` gives how many
times the sequence of moves should be done (a value > 1 implies a
loop). Then the field ``'moves'`` is an iterable of the individual
moves. Each individual move is a ``dict`` with the acceleration
(``'A'``), deceleration (``'AD'`` with 0 meaning the value of the
acceleration is used), velocity (``'V'``), and the distance/position
(``'D'``). Back in the cycle, the field ``'wait_times'`` is an
iterable of numbers giving the time in seconds to wait after each
move before going onto the next.
See Also
--------
get_sequence_time
convert_sequence_to_motor_units
GeminiMotorDrive.utilities.UnitConverter
Examples
--------
Simple program style two motions with a pause in between.
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1, 0],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},
... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]
>>> compile_sequence(cycles)
['A100',
'AD0',
'V100',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'T1',
'A90',
'GO1',
'WAIT(AS.1=b0)']
The same motion but in profile style commands
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1, 0],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},
... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]
>>> compile_sequence(cycles, program_or_profile='profile')
['A100',
'AD100',
'V100',
'D-1000',
'VF0',
'GOBUF1',
'GOWHEN(T=1000)',
'A90',
'AD90',
'VF0',
'GOBUF1']
Another motion with a back and forth loop (100 iterations) in the
middle, done in program style commands.
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100}]},
... {'iterations':100, 'wait_times':[0, 0],
... 'moves':[{'A':50, 'AD':40, 'D':-1000, 'V':30},
... {'A':50, 'AD':40, 'D':1000, 'V':30}]},
... {'iterations':1, 'wait_times':[0],
... 'moves':[{'A':100, 'AD':0, 'D':1000, 'V':100}]}]
>>> compile_sequence(cycles)
['A100',
'AD0',
'V100',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'T1',
'L100',
'A50',
'AD40',
'V30',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'D~',
'GO1',
'WAIT(AS.1=b0)',
'LN',
'A100',
'AD0',
'V100',
'GO1',
'WAIT(AS.1=b0)']
"""
# If needed, cycles needs to be converted to motor units.
if unit_converter is None:
cv_cycles = cycles
else:
cv_cycles = convert_sequence_to_motor_units(cycles, \
unit_converter=unit_converter)
# Initially, we have no commands in our command list.
commands = []
# The A, AD, D, and V parameters of the previous motion should be
# kept track of because if they don't change from one motion to the
# next, the commands to set them don't need to be included. They
# will be started blank since there are no previous motions yet.
previous_motion = {'A': [], 'AD': [], 'D': [], 'V': []}
# Construct each cycle one by one.
for cycle in cv_cycles:
# If more than one iteration is being done, a loop needs to be
# setup. It will be either 'L' or 'PLOOP' with the number of
# iterations attached if it is a program or a profile
# respectively. Since it will be tough to keep track of what
# motion changed from the end of a loop to the beginning of it,
# it is easier to just forget all previous motion values and set
# them all at the beginning of the loop (clear previous_motion).
iterations = int(cycle['iterations'])
if iterations > 1:
previous_motion = {'A': [], 'AD': [], 'D': [], 'V': []}
if program_or_profile != 'profile':
commands.append('L' + str(iterations))
else:
commands.append('PLOOP' + str(iterations))
# Construct each individual move in the cycle.
for i in range(0, len(cycle['moves'])):
# Grab the motion indicated by the current move.
new_motion = cycle['moves'][i]
# If we are doing a profile, AD must be set explicitly
# to A if it is 0.
if program_or_profile == 'profile' \
and new_motion['AD'] == 0.0:
new_motion['AD'] = new_motion['A']
# Set A, AD, and V if they have changed.
for k in ('A', 'AD', 'V'):
if previous_motion[k] != new_motion[k]:
# Grab it and round it to 4 places after the decimal
# point because that is the most that is
# supported. Then, if it is an integer value,
# convert it to an integer because that is what the
# drive will send back if requested (makes
# comparisons easier). Then add the command.
val = round(float(new_motion[k]), 4)
if val == int(val):
val = int(val)
commands.append(k + str(val))
# If the sign of D has flipped, we just need to issue a 'D~'
# command. If the value has changed in another way, it needs
# to be reset.
if previous_motion['D'] != new_motion['D']:
if previous_motion['D'] == -new_motion['D']:
commands.append('D~')
else:
commands.append('D'
+ str(int(new_motion['D'])))
# Grab the amount of time that should be waited after the
# move is done.
wait_time = cycle['wait_times'][i]
# Give the motion command (GO or GOBUF), tell the drive to
# wait till the motor has stopped (a WAIT command if it is a
# program and a VF0 command if it is a profile), and make it
# wait the period of time wait_time (T and GOWHEN commands).
if program_or_profile != 'profile':
commands.append('GO1')
commands.append('WAIT(AS.1=b0)')
if wait_time != 0:
# The wait time needs to be rounded to 3 places
# after the decimal. If it is an integer, it should
# be converted to an int so that the drive will send
# back what we send (makes compairisons easier).
wait_time = round(float(wait_time), 3)
if wait_time == int(wait_time):
wait_time = int(wait_time)
commands.append('T' + str(wait_time))
else:
commands.append('VF0')
commands.append('GOBUF1')
if wait_time != 0:
commands.append('GOWHEN(T='
+ str(int(1000*wait_time))
+ ')')
# Before going onto the next move, previous_motion needs to
# be set to the one just done.
previous_motion = new_motion
# Done with all the moves of the cycle. If we are looping, the
# loop end needs to be put in.
if iterations > 1:
if program_or_profile != 'profile':
commands.append('LN')
else:
commands.append('PLN')
# Done constructing the command list.
return commands
def get_sequence_time(cycles, unit_converter=None, eres=None):
""" Calculates the time the move sequence will take to complete.
Calculates the amount of time it will take to complete the given
move sequence. Types of motion supported are moves from one position
to another (the motion will always come to a stop before doing the
next motion), waiting a given interval of time till starting the
next move, and looping over a sequence of moves.
Parameters
----------
cycles : list of dicts
The ``list`` of cycles of motion to do one after another. See
``compile_sequence`` for format.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units. ``None`` indicates that
they are already in motor units.
eres : int
Encoder resolution. Only relevant if `unit_converter` is
``None``.
Returns
-------
time : float
Time the move sequence will take in seconds.
See Also
--------
compile_sequence
GeminiMotorDrive.utilities.UnitConverter
move_time
"""
# If we are doing unit conversion, then that is equivalent to motor
# units but with eres equal to one.
if unit_converter is not None:
eres = 1
# Starting with 0 time, steadily add the time of each movement.
tme = 0.0
# Go through each cycle and collect times.
for cycle in cycles:
# Add all the wait times.
tme += cycle['iterations']*sum(cycle['wait_times'])
# Add the time for each individual move.
for move in cycle['moves']:
tme += cycle['iterations'] \
* move_time(move, eres=eres)
# Done.
return tme
def convert_sequence_to_motor_units(cycles, unit_converter):
""" Converts a move sequence to motor units.
Converts a move sequence to motor units using the provied converter.
Parameters
----------
cycles : iterable of dicts
The iterable of cycles of motion to do one after another. See
``compile_sequence`` for format.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units.
Returns
-------
motor_cycles : list of dicts
A deep copy of `cycles` with all units converted to motor units.
See Also
--------
compile_sequence
GeminiMotorDrive.utilities.UnitConverter
"""
# Make a deep copy of cycles so that the conversions don't damage
# the original one.
cv_cycles = copy.deepcopy(cycles)
# Go through each cycle and do the conversions.
for cycle in cv_cycles:
# Go through each of the moves and do the conversions.
for move in cycle['moves']:
move['A'] = unit_converter.to_motor_velocity_acceleration( \
move['A'])
move['AD'] = \
unit_converter.to_motor_velocity_acceleration( \
move['AD'])
move['V'] = unit_converter.to_motor_velocity_acceleration( \
move['V'])
move['D'] = int(unit_converter.to_motor_distance(move['D']))
# Now return the converted move sequence.
return cv_cycles
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/compilers/move_sequence.py | convert_sequence_to_motor_units | python | def convert_sequence_to_motor_units(cycles, unit_converter):
# Make a deep copy of cycles so that the conversions don't damage
# the original one.
cv_cycles = copy.deepcopy(cycles)
# Go through each cycle and do the conversions.
for cycle in cv_cycles:
# Go through each of the moves and do the conversions.
for move in cycle['moves']:
move['A'] = unit_converter.to_motor_velocity_acceleration( \
move['A'])
move['AD'] = \
unit_converter.to_motor_velocity_acceleration( \
move['AD'])
move['V'] = unit_converter.to_motor_velocity_acceleration( \
move['V'])
move['D'] = int(unit_converter.to_motor_distance(move['D']))
# Now return the converted move sequence.
return cv_cycles | Converts a move sequence to motor units.
Converts a move sequence to motor units using the provied converter.
Parameters
----------
cycles : iterable of dicts
The iterable of cycles of motion to do one after another. See
``compile_sequence`` for format.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units.
Returns
-------
motor_cycles : list of dicts
A deep copy of `cycles` with all units converted to motor units.
See Also
--------
compile_sequence
GeminiMotorDrive.utilities.UnitConverter | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/compilers/move_sequence.py#L399-L442 | null | # Copyright 2014-2016 Freja Nordsiek
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for working with move sequences and compiling them to movement
commands, programs, and profiles for a Gemini drive.
Move sequences care described in ``compile_sequence`` along with a few
examples.
"""
import math
import copy
def compile_sequence(cycles, program_or_profile='program',
unit_converter=None):
""" Makes the command list for a move sequence.
Constructs the list of commands to execute the given sequence of
motion. Program/command line commands or profile commands can be
generated depending on the value of `program_or_profile` so that the
commands can be used to construct a program or profile later. Types
of motion supported (see Notes for how to specify) are moves from
one position to another (the motion will always come to a stop
before doing the next motion), waiting a given interval of time till
starting the next move, and looping over a sequence of moves.
Parameters
----------
cycles : iterable of dicts
The iterable of cycles of motion to do one after another. See
Notes for format.
program_or_profile : {'program', 'profile'}, optional
Whether program or profile motion commands should be used.
Anything other than these two values implies the default.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units. ``None`` indicates that
they are already in motor units.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the move sequence.
Notes
-----
`cycles` is an iterable of individual cycles of motion. Each cycle
is a ``dict`` that represents a sequence of moves that could
possibly be looped over. The field ``'iterations'`` gives how many
times the sequence of moves should be done (a value > 1 implies a
loop). Then the field ``'moves'`` is an iterable of the individual
moves. Each individual move is a ``dict`` with the acceleration
(``'A'``), deceleration (``'AD'`` with 0 meaning the value of the
acceleration is used), velocity (``'V'``), and the distance/position
(``'D'``). Back in the cycle, the field ``'wait_times'`` is an
iterable of numbers giving the time in seconds to wait after each
move before going onto the next.
See Also
--------
get_sequence_time
convert_sequence_to_motor_units
GeminiMotorDrive.utilities.UnitConverter
Examples
--------
Simple program style two motions with a pause in between.
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1, 0],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},
... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]
>>> compile_sequence(cycles)
['A100',
'AD0',
'V100',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'T1',
'A90',
'GO1',
'WAIT(AS.1=b0)']
The same motion but in profile style commands
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1, 0],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},
... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]
>>> compile_sequence(cycles, program_or_profile='profile')
['A100',
'AD100',
'V100',
'D-1000',
'VF0',
'GOBUF1',
'GOWHEN(T=1000)',
'A90',
'AD90',
'VF0',
'GOBUF1']
Another motion with a back and forth loop (100 iterations) in the
middle, done in program style commands.
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100}]},
... {'iterations':100, 'wait_times':[0, 0],
... 'moves':[{'A':50, 'AD':40, 'D':-1000, 'V':30},
... {'A':50, 'AD':40, 'D':1000, 'V':30}]},
... {'iterations':1, 'wait_times':[0],
... 'moves':[{'A':100, 'AD':0, 'D':1000, 'V':100}]}]
>>> compile_sequence(cycles)
['A100',
'AD0',
'V100',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'T1',
'L100',
'A50',
'AD40',
'V30',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'D~',
'GO1',
'WAIT(AS.1=b0)',
'LN',
'A100',
'AD0',
'V100',
'GO1',
'WAIT(AS.1=b0)']
"""
# If needed, cycles needs to be converted to motor units.
if unit_converter is None:
cv_cycles = cycles
else:
cv_cycles = convert_sequence_to_motor_units(cycles, \
unit_converter=unit_converter)
# Initially, we have no commands in our command list.
commands = []
# The A, AD, D, and V parameters of the previous motion should be
# kept track of because if they don't change from one motion to the
# next, the commands to set them don't need to be included. They
# will be started blank since there are no previous motions yet.
previous_motion = {'A': [], 'AD': [], 'D': [], 'V': []}
# Construct each cycle one by one.
for cycle in cv_cycles:
# If more than one iteration is being done, a loop needs to be
# setup. It will be either 'L' or 'PLOOP' with the number of
# iterations attached if it is a program or a profile
# respectively. Since it will be tough to keep track of what
# motion changed from the end of a loop to the beginning of it,
# it is easier to just forget all previous motion values and set
# them all at the beginning of the loop (clear previous_motion).
iterations = int(cycle['iterations'])
if iterations > 1:
previous_motion = {'A': [], 'AD': [], 'D': [], 'V': []}
if program_or_profile != 'profile':
commands.append('L' + str(iterations))
else:
commands.append('PLOOP' + str(iterations))
# Construct each individual move in the cycle.
for i in range(0, len(cycle['moves'])):
# Grab the motion indicated by the current move.
new_motion = cycle['moves'][i]
# If we are doing a profile, AD must be set explicitly
# to A if it is 0.
if program_or_profile == 'profile' \
and new_motion['AD'] == 0.0:
new_motion['AD'] = new_motion['A']
# Set A, AD, and V if they have changed.
for k in ('A', 'AD', 'V'):
if previous_motion[k] != new_motion[k]:
# Grab it and round it to 4 places after the decimal
# point because that is the most that is
# supported. Then, if it is an integer value,
# convert it to an integer because that is what the
# drive will send back if requested (makes
# comparisons easier). Then add the command.
val = round(float(new_motion[k]), 4)
if val == int(val):
val = int(val)
commands.append(k + str(val))
# If the sign of D has flipped, we just need to issue a 'D~'
# command. If the value has changed in another way, it needs
# to be reset.
if previous_motion['D'] != new_motion['D']:
if previous_motion['D'] == -new_motion['D']:
commands.append('D~')
else:
commands.append('D'
+ str(int(new_motion['D'])))
# Grab the amount of time that should be waited after the
# move is done.
wait_time = cycle['wait_times'][i]
# Give the motion command (GO or GOBUF), tell the drive to
# wait till the motor has stopped (a WAIT command if it is a
# program and a VF0 command if it is a profile), and make it
# wait the period of time wait_time (T and GOWHEN commands).
if program_or_profile != 'profile':
commands.append('GO1')
commands.append('WAIT(AS.1=b0)')
if wait_time != 0:
# The wait time needs to be rounded to 3 places
# after the decimal. If it is an integer, it should
# be converted to an int so that the drive will send
# back what we send (makes compairisons easier).
wait_time = round(float(wait_time), 3)
if wait_time == int(wait_time):
wait_time = int(wait_time)
commands.append('T' + str(wait_time))
else:
commands.append('VF0')
commands.append('GOBUF1')
if wait_time != 0:
commands.append('GOWHEN(T='
+ str(int(1000*wait_time))
+ ')')
# Before going onto the next move, previous_motion needs to
# be set to the one just done.
previous_motion = new_motion
# Done with all the moves of the cycle. If we are looping, the
# loop end needs to be put in.
if iterations > 1:
if program_or_profile != 'profile':
commands.append('LN')
else:
commands.append('PLN')
# Done constructing the command list.
return commands
def get_sequence_time(cycles, unit_converter=None, eres=None):
""" Calculates the time the move sequence will take to complete.
Calculates the amount of time it will take to complete the given
move sequence. Types of motion supported are moves from one position
to another (the motion will always come to a stop before doing the
next motion), waiting a given interval of time till starting the
next move, and looping over a sequence of moves.
Parameters
----------
cycles : list of dicts
The ``list`` of cycles of motion to do one after another. See
``compile_sequence`` for format.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units. ``None`` indicates that
they are already in motor units.
eres : int
Encoder resolution. Only relevant if `unit_converter` is
``None``.
Returns
-------
time : float
Time the move sequence will take in seconds.
See Also
--------
compile_sequence
GeminiMotorDrive.utilities.UnitConverter
move_time
"""
# If we are doing unit conversion, then that is equivalent to motor
# units but with eres equal to one.
if unit_converter is not None:
eres = 1
# Starting with 0 time, steadily add the time of each movement.
tme = 0.0
# Go through each cycle and collect times.
for cycle in cycles:
# Add all the wait times.
tme += cycle['iterations']*sum(cycle['wait_times'])
# Add the time for each individual move.
for move in cycle['moves']:
tme += cycle['iterations'] \
* move_time(move, eres=eres)
# Done.
return tme
def move_time(move, eres):
""" Calculates the time it takes to do a move.
Calculates how long it will take to complete a move of the motor. It
is assumed that the motor will decerate to a stop for the end of the
move as opposed to keep moving at velocity.
Everything is in motor units which are encoder counts for distance,
pitches/s for velocity, and pitches/s^2 for acceleration.
Parameters
----------
move : dict
Contains the move parameters in its fields: acceleration ('A'),
deceleration ('AD' with 0 meaning the value of the acceleration
is used), velocity ('V'), and the distance/position ('D').
eres : int
Encoder resolution.
Returns
-------
time : float
Time the move will take in seconds.
See Also
--------
compile_sequence
get_sequence_time
"""
# Grab the move parameters. If the deceleration is given as zero,
# that means it has the same value as the acceleration. Distance is
# converted to the same units as the others by dividing by the
# encoder resolution. The absolute value of everything is taken for
# simplicity.
A = abs(move['A'])
AD = abs(move['AD'])
if AD == 0.0:
AD = A
V = abs(move['V'])
D = abs(move['D'])/eres
# Calculate the times it would take to accelerate from stop to V and
# decelerate to stop at rates A and AD respectively.
accel_times = [V/A, V/AD]
# Calculate the distances that would be moved in those times.
dists = [0.5*A*(accel_times[0]**2), 0.5*AD*(accel_times[1]**2)]
# If the sum of those dists is greater than D, then the velocity V
# is never reached. The way the time is calculated depends on which
# case it is.
if sum(dists) <= D:
# The time is just the sum of the acceleration times plus the
# remaining distance divided by V.
return (sum(accel_times) + (D-sum(dists))/V)
else:
# We need to find the time it takes for the acceleration path
# and deceleration path to meet and have the same speeds.
#
# (1) t = t_1 + t_2
# (2) A*t_1 = AD*t_2
# (3) D = 0.5*A*(t_1**2) + 0.5*AD*(t_2**2)
#
# Re-writing t_2 in terms of t_1 using (2)
# (4) t_2 = (A / AD) * t_1
#
# Putting that into (1) and (3)
# (4) t = (1 + (A / AD)) * t_1
# (5) D = 0.5*A*(1 + (A / AD)) * (t_1**2)
#
# Solving (5) for t_1,
# (6) t_1 = sqrt( 2*D / (A * (1 + (A / AD))))
#
# Putting that into (4),
# t = sqrt(2*D*(1 + (A / AD)) / A)
return math.sqrt(2*D * (1 + (A / AD)) / A)
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/utilities.py | strip_commands | python | def strip_commands(commands):
# Go through each command one by one, stripping it and adding it to
# a growing list if it is not blank. Each command needs to be
# converted to an str if it is a bytes.
stripped_commands = []
for v in commands:
if isinstance(v, bytes):
v = v.decode(errors='replace')
v = v.split(';')[0].strip()
if len(v) != 0:
stripped_commands.append(v)
return stripped_commands | Strips a sequence of commands.
Strips down the sequence of commands by removing comments and
surrounding whitespace around each individual command and then
removing blank commands.
Parameters
----------
commands : iterable of strings
Iterable of commands to strip.
Returns
-------
stripped_commands : list of str
The stripped commands with blank ones removed. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/utilities.py#L22-L50 | null | # Copyright 2014-2016 Freja Nordsiek
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module for a utility function and class.
"""
import collections
class UnitConverter(object):
""" Converter between physical units and motor units.
Class to convert between the desired physical unit of length and
motor units; which are encoder counts for distance, motor pitches
per second for velocity, and motor pitches per second squared for
acceleration.
Parameters
----------
dmepit : float
Electrical pitch of the motor.
eres : int
Encoder resolution.
unit_in_meters : float, optional
The length in meters of the desired physical unit of length for
this convert to convert between. The default, ``1.0``,
corresponds to converting between meters and motor units.
"""
def __init__(self, dmepit, eres, unit_in_meters=1.0):
# Construct the multipliers to convert distances and
# velocities/accelerations to motor units.
self._distance_to_motor = 1e3*unit_in_meters*eres/dmepit
self._va_to_motor = 1e3*unit_in_meters/dmepit
def to_motor_distance(self, distance):
""" Convert distance/s to motor units.
Converts distance/s to units of motor encoder counts, which is
what the drive requires when given move instructions.
Parameters
----------
distance : int, float, or iterable of ints and floats
The distance/s to convert.
Returns
-------
converted_distance : float or list of floats
The converted distance/s.
"""
if isinstance(distance, collections.Iterable):
return [(x * self._distance_to_motor) for x in distance]
else:
return distance * self._distance_to_motor
def to_motor_velocity_acceleration(self, va):
""" Convert velocities/accelerations to motor units.
Converts velocity/ies and/or acceleration/s to units of motor
pitch per second (or second squared), which is what the drive
requires when given move instructions.
Parameters
----------
va : int, float, or iterable of ints and floats
The velocities/accelerations to convert.
Returns
-------
converted_va : float or list of floats
The converted velocities/accelerations.
"""
if isinstance(va, collections.Iterable):
return [(x * self._va_to_motor) for x in va]
else:
return va * self._va_to_motor
def to_unit_distance(self, distance):
""" Convert distance/s to units of UnitConverter.
Converts distance/s from motor encoder counts to that of this
UnitConverter.
Parameters
----------
distance : int, float, or iterable of ints and floats
The distance/s to convert.
Returns
-------
converted_distance : float or list of floats
The converted distance/s.
"""
if isinstance(distance, collections.Iterable):
return [(x / self._distance_to_motor) for x in distance]
else:
return distance / self._distance_to_motor
def to_unit_velocity_acceleration(self, va):
""" Convert velocities/accelerations to units of UnitConverter.
Converts velocity/ies and/or acceleration/s from units of motor
pitch per second (or second squared) to that of this
UnitConverter.
Parameters
----------
va : int, float, or iterable of ints and floats
The velocities/accelerations to convert.
Returns
-------
converted_va : float or list of floats
The converted velocities/accelerations.
"""
if isinstance(va, collections.Iterable):
return [(x / self._va_to_motor) for x in va]
else:
return va / self._va_to_motor
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/utilities.py | UnitConverter.to_motor_distance | python | def to_motor_distance(self, distance):
if isinstance(distance, collections.Iterable):
return [(x * self._distance_to_motor) for x in distance]
else:
return distance * self._distance_to_motor | Convert distance/s to motor units.
Converts distance/s to units of motor encoder counts, which is
what the drive requires when given move instructions.
Parameters
----------
distance : int, float, or iterable of ints and floats
The distance/s to convert.
Returns
-------
converted_distance : float or list of floats
The converted distance/s. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/utilities.py#L79-L99 | null | class UnitConverter(object):
""" Converter between physical units and motor units.
Class to convert between the desired physical unit of length and
motor units; which are encoder counts for distance, motor pitches
per second for velocity, and motor pitches per second squared for
acceleration.
Parameters
----------
dmepit : float
Electrical pitch of the motor.
eres : int
Encoder resolution.
unit_in_meters : float, optional
The length in meters of the desired physical unit of length for
this convert to convert between. The default, ``1.0``,
corresponds to converting between meters and motor units.
"""
def __init__(self, dmepit, eres, unit_in_meters=1.0):
# Construct the multipliers to convert distances and
# velocities/accelerations to motor units.
self._distance_to_motor = 1e3*unit_in_meters*eres/dmepit
self._va_to_motor = 1e3*unit_in_meters/dmepit
def to_motor_velocity_acceleration(self, va):
""" Convert velocities/accelerations to motor units.
Converts velocity/ies and/or acceleration/s to units of motor
pitch per second (or second squared), which is what the drive
requires when given move instructions.
Parameters
----------
va : int, float, or iterable of ints and floats
The velocities/accelerations to convert.
Returns
-------
converted_va : float or list of floats
The converted velocities/accelerations.
"""
if isinstance(va, collections.Iterable):
return [(x * self._va_to_motor) for x in va]
else:
return va * self._va_to_motor
def to_unit_distance(self, distance):
""" Convert distance/s to units of UnitConverter.
Converts distance/s from motor encoder counts to that of this
UnitConverter.
Parameters
----------
distance : int, float, or iterable of ints and floats
The distance/s to convert.
Returns
-------
converted_distance : float or list of floats
The converted distance/s.
"""
if isinstance(distance, collections.Iterable):
return [(x / self._distance_to_motor) for x in distance]
else:
return distance / self._distance_to_motor
def to_unit_velocity_acceleration(self, va):
""" Convert velocities/accelerations to units of UnitConverter.
Converts velocity/ies and/or acceleration/s from units of motor
pitch per second (or second squared) to that of this
UnitConverter.
Parameters
----------
va : int, float, or iterable of ints and floats
The velocities/accelerations to convert.
Returns
-------
converted_va : float or list of floats
The converted velocities/accelerations.
"""
if isinstance(va, collections.Iterable):
return [(x / self._va_to_motor) for x in va]
else:
return va / self._va_to_motor
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/utilities.py | UnitConverter.to_motor_velocity_acceleration | python | def to_motor_velocity_acceleration(self, va):
if isinstance(va, collections.Iterable):
return [(x * self._va_to_motor) for x in va]
else:
return va * self._va_to_motor | Convert velocities/accelerations to motor units.
Converts velocity/ies and/or acceleration/s to units of motor
pitch per second (or second squared), which is what the drive
requires when given move instructions.
Parameters
----------
va : int, float, or iterable of ints and floats
The velocities/accelerations to convert.
Returns
-------
converted_va : float or list of floats
The converted velocities/accelerations. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/utilities.py#L101-L122 | null | class UnitConverter(object):
""" Converter between physical units and motor units.
Class to convert between the desired physical unit of length and
motor units; which are encoder counts for distance, motor pitches
per second for velocity, and motor pitches per second squared for
acceleration.
Parameters
----------
dmepit : float
Electrical pitch of the motor.
eres : int
Encoder resolution.
unit_in_meters : float, optional
The length in meters of the desired physical unit of length for
this convert to convert between. The default, ``1.0``,
corresponds to converting between meters and motor units.
"""
def __init__(self, dmepit, eres, unit_in_meters=1.0):
# Construct the multipliers to convert distances and
# velocities/accelerations to motor units.
self._distance_to_motor = 1e3*unit_in_meters*eres/dmepit
self._va_to_motor = 1e3*unit_in_meters/dmepit
def to_motor_distance(self, distance):
""" Convert distance/s to motor units.
Converts distance/s to units of motor encoder counts, which is
what the drive requires when given move instructions.
Parameters
----------
distance : int, float, or iterable of ints and floats
The distance/s to convert.
Returns
-------
converted_distance : float or list of floats
The converted distance/s.
"""
if isinstance(distance, collections.Iterable):
return [(x * self._distance_to_motor) for x in distance]
else:
return distance * self._distance_to_motor
def to_unit_distance(self, distance):
""" Convert distance/s to units of UnitConverter.
Converts distance/s from motor encoder counts to that of this
UnitConverter.
Parameters
----------
distance : int, float, or iterable of ints and floats
The distance/s to convert.
Returns
-------
converted_distance : float or list of floats
The converted distance/s.
"""
if isinstance(distance, collections.Iterable):
return [(x / self._distance_to_motor) for x in distance]
else:
return distance / self._distance_to_motor
def to_unit_velocity_acceleration(self, va):
""" Convert velocities/accelerations to units of UnitConverter.
Converts velocity/ies and/or acceleration/s from units of motor
pitch per second (or second squared) to that of this
UnitConverter.
Parameters
----------
va : int, float, or iterable of ints and floats
The velocities/accelerations to convert.
Returns
-------
converted_va : float or list of floats
The converted velocities/accelerations.
"""
if isinstance(va, collections.Iterable):
return [(x / self._va_to_motor) for x in va]
else:
return va / self._va_to_motor
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/utilities.py | UnitConverter.to_unit_distance | python | def to_unit_distance(self, distance):
if isinstance(distance, collections.Iterable):
return [(x / self._distance_to_motor) for x in distance]
else:
return distance / self._distance_to_motor | Convert distance/s to units of UnitConverter.
Converts distance/s from motor encoder counts to that of this
UnitConverter.
Parameters
----------
distance : int, float, or iterable of ints and floats
The distance/s to convert.
Returns
-------
converted_distance : float or list of floats
The converted distance/s. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/utilities.py#L124-L144 | null | class UnitConverter(object):
""" Converter between physical units and motor units.
Class to convert between the desired physical unit of length and
motor units; which are encoder counts for distance, motor pitches
per second for velocity, and motor pitches per second squared for
acceleration.
Parameters
----------
dmepit : float
Electrical pitch of the motor.
eres : int
Encoder resolution.
unit_in_meters : float, optional
The length in meters of the desired physical unit of length for
this convert to convert between. The default, ``1.0``,
corresponds to converting between meters and motor units.
"""
def __init__(self, dmepit, eres, unit_in_meters=1.0):
# Construct the multipliers to convert distances and
# velocities/accelerations to motor units.
self._distance_to_motor = 1e3*unit_in_meters*eres/dmepit
self._va_to_motor = 1e3*unit_in_meters/dmepit
def to_motor_distance(self, distance):
""" Convert distance/s to motor units.
Converts distance/s to units of motor encoder counts, which is
what the drive requires when given move instructions.
Parameters
----------
distance : int, float, or iterable of ints and floats
The distance/s to convert.
Returns
-------
converted_distance : float or list of floats
The converted distance/s.
"""
if isinstance(distance, collections.Iterable):
return [(x * self._distance_to_motor) for x in distance]
else:
return distance * self._distance_to_motor
def to_motor_velocity_acceleration(self, va):
""" Convert velocities/accelerations to motor units.
Converts velocity/ies and/or acceleration/s to units of motor
pitch per second (or second squared), which is what the drive
requires when given move instructions.
Parameters
----------
va : int, float, or iterable of ints and floats
The velocities/accelerations to convert.
Returns
-------
converted_va : float or list of floats
The converted velocities/accelerations.
"""
if isinstance(va, collections.Iterable):
return [(x * self._va_to_motor) for x in va]
else:
return va * self._va_to_motor
def to_unit_velocity_acceleration(self, va):
""" Convert velocities/accelerations to units of UnitConverter.
Converts velocity/ies and/or acceleration/s from units of motor
pitch per second (or second squared) to that of this
UnitConverter.
Parameters
----------
va : int, float, or iterable of ints and floats
The velocities/accelerations to convert.
Returns
-------
converted_va : float or list of floats
The converted velocities/accelerations.
"""
if isinstance(va, collections.Iterable):
return [(x / self._va_to_motor) for x in va]
else:
return va / self._va_to_motor
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/utilities.py | UnitConverter.to_unit_velocity_acceleration | python | def to_unit_velocity_acceleration(self, va):
if isinstance(va, collections.Iterable):
return [(x / self._va_to_motor) for x in va]
else:
return va / self._va_to_motor | Convert velocities/accelerations to units of UnitConverter.
Converts velocity/ies and/or acceleration/s from units of motor
pitch per second (or second squared) to that of this
UnitConverter.
Parameters
----------
va : int, float, or iterable of ints and floats
The velocities/accelerations to convert.
Returns
-------
converted_va : float or list of floats
The converted velocities/accelerations. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/utilities.py#L146-L167 | null | class UnitConverter(object):
""" Converter between physical units and motor units.
Class to convert between the desired physical unit of length and
motor units; which are encoder counts for distance, motor pitches
per second for velocity, and motor pitches per second squared for
acceleration.
Parameters
----------
dmepit : float
Electrical pitch of the motor.
eres : int
Encoder resolution.
unit_in_meters : float, optional
The length in meters of the desired physical unit of length for
this convert to convert between. The default, ``1.0``,
corresponds to converting between meters and motor units.
"""
def __init__(self, dmepit, eres, unit_in_meters=1.0):
# Construct the multipliers to convert distances and
# velocities/accelerations to motor units.
self._distance_to_motor = 1e3*unit_in_meters*eres/dmepit
self._va_to_motor = 1e3*unit_in_meters/dmepit
def to_motor_distance(self, distance):
""" Convert distance/s to motor units.
Converts distance/s to units of motor encoder counts, which is
what the drive requires when given move instructions.
Parameters
----------
distance : int, float, or iterable of ints and floats
The distance/s to convert.
Returns
-------
converted_distance : float or list of floats
The converted distance/s.
"""
if isinstance(distance, collections.Iterable):
return [(x * self._distance_to_motor) for x in distance]
else:
return distance * self._distance_to_motor
def to_motor_velocity_acceleration(self, va):
""" Convert velocities/accelerations to motor units.
Converts velocity/ies and/or acceleration/s to units of motor
pitch per second (or second squared), which is what the drive
requires when given move instructions.
Parameters
----------
va : int, float, or iterable of ints and floats
The velocities/accelerations to convert.
Returns
-------
converted_va : float or list of floats
The converted velocities/accelerations.
"""
if isinstance(va, collections.Iterable):
return [(x * self._va_to_motor) for x in va]
else:
return va * self._va_to_motor
def to_unit_distance(self, distance):
""" Convert distance/s to units of UnitConverter.
Converts distance/s from motor encoder counts to that of this
UnitConverter.
Parameters
----------
distance : int, float, or iterable of ints and floats
The distance/s to convert.
Returns
-------
converted_distance : float or list of floats
The converted distance/s.
"""
if isinstance(distance, collections.Iterable):
return [(x / self._distance_to_motor) for x in distance]
else:
return distance / self._distance_to_motor
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/drivers.py | ASCII_RS232._send_command | python | def _send_command(self, command, immediate=False, timeout=1.0,
check_echo=None):
# Use the default echo checking if None was given.
if check_echo is None:
check_echo = self._check_echo
# Convert to bytes and then strip comments, whitespace, and
# newlines.
if sys.hexversion >= 0x03000000:
c = bytes(command, encoding='ASCII')
else:
c = command
c = c.split(b';')[0].strip()
# If the command is supposed to be immediate, insure that it
# starts with an '!'.
if immediate and not c.startswith(b'!'):
c = b'!' + c
# Read out any junk on the serial port before we start.
self._ser.read(self._ser.inWaiting())
# The command needs to be written a character at a time with
# pauses between them to make sure nothing gets lost or
# corrupted. This is a simple loop if we are not checking the
# echo. If we are, it is more complicated.
if not check_echo:
for i in range(0, len(c)):
self._ser.write(bytes([c[i]]))
time.sleep(0.01)
else:
# Infinite timeouts need to be converted to None. Finite
# ones need to be checked to make sure they are not too big,
# which is threading.TIMEOUT_MAX on Python 3.x and not
# specified on Python 2.x (lets use a week).
if timeout is None or timeout <= 0:
timeout = None
else:
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. Then, the
# timer is started.
tm = threading.Timer(timeout, lambda : None)
tm.start()
# Each character needs to be written one by one while the
# echo is collected. If any mistakes occur, they need to be
# corrected with backspaces b'\x08'. The echo starts out
# empty. We go until either the echo is identical to the
# command or the timeout is exceeded.
echo = b''
while c != echo and tm.is_alive():
# If there are no mistakes, then echo will be the
# beginning of c meaning the next character can be
# written. Otherwise, there is a mistake and a backspace
# needs to be written.
if c.startswith(echo):
self._ser.write(bytes([c[len(echo)]]))
else:
self._ser.write(b'\x08')
# Pause for a bit to make sure nothing gets lost. Then
# read the drive's output add it to the echo.
time.sleep(0.01)
echo += self._ser.read(self._ser.inWaiting())
# All backspaces in echo need to be processed. Each
# backspace deletes itself and the character before it
# (if any).
while b'\x08' in echo:
index = echo.index(b'\x08')
if index == 0:
echo = echo[1:]
else:
echo = echo[0:(index-1)] + echo[(index+1):]
# Turn off the timer in the case that it is still running
# (command completely written before timeout).
tm.cancel()
# Write the carriage return to enter the command and then return
# the sanitized command.
self._ser.write(b'\r')
if sys.hexversion >= 0x03000000:
return c.decode(errors='replace')
else:
return c | Send a single command to the drive after sanitizing it.
Takes a single given `command`, sanitizes it (strips out
comments, extra whitespace, and newlines), sends the command to
the drive, and returns the sanitized command. The validity of
the command is **NOT** checked.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : number, optional
Optional timeout in seconds to use to get the command right
when we are doing echo checking. A negative value or
``None`` indicates that the an infinite timeout should be
used.
check_echo : bool or None, optional
Whether the echoing of the command as it is being written to
the drive should be used to correct mistakes in what the
drive is seeing, or whether the default set when the
instance of this class was created should be used
(``None``).
Returns
-------
sanitized_command : str
The sanitized command that was sent to the drive. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/drivers.py#L127-L251 | null | class ASCII_RS232(object):
""" ASCII RS232 comm. driver for a Parker Motion Gemini drive.
Communications driver to talk to a Parker Motion Gemini drive in
ASCII mode over RS232.
Parameters
----------
port : serial port string
The serial port (RS232) that the Gemini drive is connected to.
check_echo : bool, optional
Whether the echoing of the commands as they are being written
to the drive should be used to correct mistakes in what the
drive is seeing or not as the default.
writeTimout : float, optional
The write timeout for the RS232 port. See ``serial.Serial``.
interCharTimeout : float or None, optional
The inter-character timeout for writing on the RS232 port.
``None`` disables. See ``serial.Serial``.
Raises
------
serial.SerialException
If `port` does not correspond to an available RS232 port or
can't be opened.
Notes
-----
The ASCII communications settings of the Gemini drive are changed
while this object is connected and are returned to the default
values when this object is deleted. Thus, the values of the
communications settings before this object is created are lost.
See Also
--------
serial.Serial
"""
def __init__(self, port, check_echo=True, writeTimeout=1.0,
interCharTimeout=0.002):
# Set private variable holding the echo parameters.
self._check_echo = check_echo
# Initialize the serial port to connect to the Gemini drive. The
# only timeout being explicitly set right now is the write
# timeout. Read timeouts are handled in a more manual fasion.
self._ser = serial.Serial(port, baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=None,
writeTimeout=writeTimeout,
interCharTimeout=interCharTimeout,
xonxoff=True, rtscts=False,
dsrdtr=False)
# It is convenient to have a text wrapper around the serial
# port for reading and writing.
self._sio = io.TextIOWrapper(io.BufferedRWPair(self._ser,
self._ser, 1), newline='\n',
encoding='ASCII')
# Change the communications parameters so that commands are
# echoed, on error level 4, no characters are used to preceed
# each response, carriage returns are used for newlines in
# responses, responses are terminated by a '\n', and there are
# no prompts (there are separate prompts depending on whether
# the previous command had an error or not). The echo command is
# the one command that echo checking cannot be done on since
# echo may not be enabled yet.
self._send_command('ECHO1', check_echo=False, immediate=True)
self._send_command('ERRLVL4', immediate=True)
self._send_command('BOT0,0,0', immediate=True)
self._send_command('EOT10,0,0', immediate=True)
self._send_command('EOL13,0,0', immediate=True)
self._send_command('ERRBAD0,0,0,0', immediate=True)
self._send_command('ERROK0,0,0,0', immediate=True)
# Wait a little while for the commands to be processed and then
# discard all the responses.
time.sleep(2)
self._ser.read(self._ser.inWaiting())
def __del__(self):
""" Returns all communications settings to their defaults.
"""
# Return all communicatsions parameters to their default values
# (from the manual).
self._send_command('ECHO1', immediate=True)
self._send_command('ERRLVL4', immediate=True)
self._send_command('BOT0,0,0', immediate=True)
self._send_command('EOT13,0,0', immediate=True)
self._send_command('EOL13,10,0', immediate=True)
self._send_command('ERRBAD13,10,63,32', immediate=True)
self._send_command('ERROK13,10,62,32', immediate=True)
# Wait a little while for the commands to be processed and then
# discard all the responses.
time.sleep(2)
self._ser.read(self._ser.inWaiting())
def _get_response(self, timeout=1.0, eor=('\n', '\n- ')):
""" Reads a response from the drive.
Reads the response returned by the drive with an optional
timeout. All carriage returns and linefeeds are kept.
Parameters
----------
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
eor : str or iterable of str, optional
``str`` or iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
response : str
The response obtained from the drive. Carriage returns and
linefeeds are preserved.
"""
# If no timeout is given or it is invalid and we are using '\n'
# as the eor, use the wrapper to read a line with an infinite
# timeout. Otherwise, the reading and timeout must be
# implemented manually.
if (timeout is None or timeout < 0) and eor == '\n':
return self._sio.readline()
else:
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. They need to
# be checked to make sure they are not too big, which is
# threading.TIMEOUT_MAX on Python 3.x and not specified on
# Python 2.x (lets use a week). Then, the timer is started.
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
tm = threading.Timer(timeout, lambda : None)
tm.start()
# eor needs to be converted to bytes. If it is just an str,
# it needs to be wrapped in a tuple.
if isinstance(eor, str):
eor = tuple([eor])
if sys.hexversion >= 0x03000000:
eor = [s.encode(encoding='ASCII') for s in eor]
# Read from the serial port into buf until the EOR is found
# or the timer has stopped. A small pause is done each time
# so that this thread doesn't hog the CPU.
buf = b''
while not any([(x in buf) for x in eor]) and tm.is_alive():
time.sleep(0.001)
buf += self._ser.read(self._ser.inWaiting())
# Just in case the timer has not stopped (EOR was found),
# stop it.
tm.cancel()
# Remove anything after the EOR if there is one. First, a
# set of matches (index, eor_str) for each string in eor
# needs to be constructed. Sorting the matches by their
# index puts all the ones that were not found (index of -1)
# at the front. Then a list of bools that are True for each
# index that isn't -1 is made, converted to a bytes (True
# goes to b'\x01' and False goes to b'\x00'), and then the
# index of the first True value found. If it is not -1, then
# there was a successful match and all the characters are
# dropped after that eor_str.
matches = [(buf.find(x), x) for x in eor]
matches.sort(key=lambda x: x[0])
index = bytes([x[0] != -1 for x in matches]).find(b'\x01')
if index != -1:
buf = buf[:(matches[index][0] + len(matches[index][1]))]
# Convert to an str before returning.
if sys.hexversion >= 0x03000000:
return buf.decode(errors='replace')
else:
return buf
def _process_response(self, response):
""" Processes a response from the drive.
Processes the response returned from the drive. It is broken
down into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response.
Parameters
----------
response : str
The response returned by the drive.
Returns
-------
processed_response : list
A 4-element ``list``. The elements, in order, are `response`
(``str``), the echoed command (``str``), any error response
(``None`` if none, or the ``str`` of the error), and the
lines of the response that are not the echo or error line
(``list`` of ``str`` with newlines stripped).
"""
# Strip the trailing newline and split the response into lines
# by carriage returns.
rsp_lines = response.rstrip('\r\n').split('\r')
# If we have at least one line, the first one is the echoed
# command. If available, it needs to be grabbed and that line
# removed from rsp_lines since it is just the echoing, not the
# actual response to the command. None will be used to denote a
# non-existent echo.
if len(rsp_lines) > 0:
echoed_command = rsp_lines[0]
del rsp_lines[0]
else:
echoed_command = None
# If the next line is one of the different possible error
# strings, then there was an error that must be grabbed (leading
# '*' is stripped). If there was an error, remove that line from
# the response. None will be used to denote the lack of an error.
if len(rsp_lines) > 0 and \
rsp_lines[0] in ('*INVALID_ADDRESS', '*INVALID_DATA', \
'*INVALID_DATA_HIGH', '*INVALID_DATA_LOW', \
'*UNDEFINED_LABEL'):
err = rsp_lines[0][1:]
del rsp_lines[0]
else:
err = None
return [response, echoed_command, err, rsp_lines]
def command_error(self, response):
""" Checks whether a command produced an error.
Checks whether a command procuded an error based on its
processed response. The two types of errors are an error
returned by the drive and the command that the drive received
being different than the one that was sent (error in
transmission).
Parameters
----------
response : processed response (list)
The processed response ``list`` for the command that was
executed.
Returns
-------
error : bool
``True`` if there was an error and ``False`` otherwise.
"""
# The command should be echoed back accurately (might be
# preceeded by a '- ' if it is part of a program definition) and
# no errors should be returned, if it has no errors.
return (response[2] not in [response[0], '- ' + response[0]]
or response[3] is not None)
def send_command(self, command, immediate=False, timeout=1.0,
max_retries=0, eor=('\n', '\n- ')):
""" Sends a single command to the drive and returns output.
Takes a single given `command`, sanitizes it, sends it to the
drive, reads the response, and returns the processed response.
The command is first sanitized by removing comments, extra
whitespace, and newline characters. If `immediate` is set, the
command is made to be an immediate command. Note, the command is
**NOT** checked for validity. If the drive returns an error, the
command is re-executed up to `max_tries` more times. The
response from the final execution is processed and returned. The
response from the drive is broken down into the echoed command
(drive echoes it back), any error returned by the drive (leading
'*' is stripped), and the different lines of the response; which
are all returned.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
``str`` or an iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
See Also
--------
send_commands : Send multiple commands.
Examples
--------
Simple command energizing the motor with no response and no
errors.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=False, timeout=1.0)
['DRIVE1', 'DRIVE1\\r\\r\\n', 'DRIVE1', None, []]
Same command but made immediate.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=True, timeout=1.0)
['!DRIVE1', '!DRIVE1\\r\\r\\n', '!DRIVE1', None, []]
Same command with a typo.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIV1', immediate=False, timeout=1.0)
['DRIV1', 'DRIV1\\r*UNDEFINED_LABEL\\r\\r\\n', 'DRIV1',
'UNDEFINED_LABEL', []]
Simple command asking whether the motor is energized or not.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE', immediate=False, timeout=1.0)
['DRIVE', 'DRIVE\\r*DRIVE1\\r\\r\\n', 'DRIVE', None,
['*DRIVE1']]
"""
# Execute the command till it either doesn't have an error or
# the maximum number of retries is exceeded.
for i in range(0, max_retries+1):
# Send the command and stuff the sanitized version in a
# list. Then process the response and add it to the list.
response = [self._send_command(command,
immediate=immediate)]
output = self._get_response(timeout=timeout, eor=eor)
# If echo checking was done, the echo was already grabbed,
# is identical to the command, and needs to be placed back
# in front of the output so that it can be processed
# properly.
if self._check_echo:
output = response[0] + output
response.extend(self._process_response(output))
# We are done if there is no error.
if not self.command_error(response):
break
# Put in a slight pause so the drive has a bit of breathing
# time between retries.
time.sleep(0.25)
return response
def send_commands(self, commands, timeout=1.0,
max_retries=1, eor=('\n', '\n- ')):
""" Send a sequence of commands to the drive and collect output.
Takes a sequence of many commands and executes them one by one
till either all are executed or one runs out of retries
(`max_retries`). Retries are optionally performed if a command's
repsonse indicates that there was an error. Remaining commands
are not executed. The processed output of the final execution
(last try or retry) of each command that was actually executed
is returned.
This function basically feeds commands one by one to
``send_command`` and collates the outputs.
Parameters
----------
commands : iterable of str
Iterable of commands to send to the drive. Each command must
be an ``str``.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
End Of Resonse. An EOR is either a ``str`` or an iterable
of ``str`` that denote the possible endings of a response.
'eor' can be a single EOR, in which case it is used for all
commands, or it can be an iterable of EOR to use for each
individual command. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
outputs : list of lists
``list`` composed of the processed responses of each command
in the order that they were done up to and including the
last command executed. See ``send_command`` for the format
of processed responses.
See Also
--------
send_command : Send a single command.
Examples
--------
A sequence of commands to energize the motor, move it a bit away
from the starting position, and then do 4 forward/reverse
cycles, and de-energize the motor. **DO NOT** try these specific
movement distances without checking that the motion won't damage
something (very motor and application specific).
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ra = ASCII_RS232('/dev/ttyS1')
>>> ra.send_commands(['DRIVE1', 'D-10000', 'GO']
... + ['D-10000','GO','D10000','GO']*4
... + [ 'DRIVE0'])
[['DRIVE1', 'DRIVE1\\r', 'DRIVE1', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['DRIVE0', 'DRIVE0\\r', 'DRIVE0', None, []]]
"""
# If eor is not a list, make a list of it replicated enough for
# every command.
if not isinstance(eor, list):
eor = [eor]*len(commands)
# Do every command one by one, collecting the responses and
# stuffing them in a list. Commands that failed are retried, and
# we stop if the last retry is exhausted.
responses = []
for i, command in enumerate(commands):
rsp = self.send_command(command, timeout=timeout,
max_retries=max_retries,
eor=eor[i])
responses.append(rsp)
if self.command_error(rsp):
break
# Put in a slight pause so the drive has a bit of breathing
# time between commands.
time.sleep(0.25)
return responses
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/drivers.py | ASCII_RS232._get_response | python | def _get_response(self, timeout=1.0, eor=('\n', '\n- ')):
# If no timeout is given or it is invalid and we are using '\n'
# as the eor, use the wrapper to read a line with an infinite
# timeout. Otherwise, the reading and timeout must be
# implemented manually.
if (timeout is None or timeout < 0) and eor == '\n':
return self._sio.readline()
else:
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. They need to
# be checked to make sure they are not too big, which is
# threading.TIMEOUT_MAX on Python 3.x and not specified on
# Python 2.x (lets use a week). Then, the timer is started.
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
tm = threading.Timer(timeout, lambda : None)
tm.start()
# eor needs to be converted to bytes. If it is just an str,
# it needs to be wrapped in a tuple.
if isinstance(eor, str):
eor = tuple([eor])
if sys.hexversion >= 0x03000000:
eor = [s.encode(encoding='ASCII') for s in eor]
# Read from the serial port into buf until the EOR is found
# or the timer has stopped. A small pause is done each time
# so that this thread doesn't hog the CPU.
buf = b''
while not any([(x in buf) for x in eor]) and tm.is_alive():
time.sleep(0.001)
buf += self._ser.read(self._ser.inWaiting())
# Just in case the timer has not stopped (EOR was found),
# stop it.
tm.cancel()
# Remove anything after the EOR if there is one. First, a
# set of matches (index, eor_str) for each string in eor
# needs to be constructed. Sorting the matches by their
# index puts all the ones that were not found (index of -1)
# at the front. Then a list of bools that are True for each
# index that isn't -1 is made, converted to a bytes (True
# goes to b'\x01' and False goes to b'\x00'), and then the
# index of the first True value found. If it is not -1, then
# there was a successful match and all the characters are
# dropped after that eor_str.
matches = [(buf.find(x), x) for x in eor]
matches.sort(key=lambda x: x[0])
index = bytes([x[0] != -1 for x in matches]).find(b'\x01')
if index != -1:
buf = buf[:(matches[index][0] + len(matches[index][1]))]
# Convert to an str before returning.
if sys.hexversion >= 0x03000000:
return buf.decode(errors='replace')
else:
return buf | Reads a response from the drive.
Reads the response returned by the drive with an optional
timeout. All carriage returns and linefeeds are kept.
Parameters
----------
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
eor : str or iterable of str, optional
``str`` or iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
response : str
The response obtained from the drive. Carriage returns and
linefeeds are preserved. | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/drivers.py#L253-L338 | null | class ASCII_RS232(object):
""" ASCII RS232 comm. driver for a Parker Motion Gemini drive.
Communications driver to talk to a Parker Motion Gemini drive in
ASCII mode over RS232.
Parameters
----------
port : serial port string
The serial port (RS232) that the Gemini drive is connected to.
check_echo : bool, optional
Whether the echoing of the commands as they are being written
to the drive should be used to correct mistakes in what the
drive is seeing or not as the default.
writeTimout : float, optional
The write timeout for the RS232 port. See ``serial.Serial``.
interCharTimeout : float or None, optional
The inter-character timeout for writing on the RS232 port.
``None`` disables. See ``serial.Serial``.
Raises
------
serial.SerialException
If `port` does not correspond to an available RS232 port or
can't be opened.
Notes
-----
The ASCII communications settings of the Gemini drive are changed
while this object is connected and are returned to the default
values when this object is deleted. Thus, the values of the
communications settings before this object is created are lost.
See Also
--------
serial.Serial
"""
def __init__(self, port, check_echo=True, writeTimeout=1.0,
interCharTimeout=0.002):
# Set private variable holding the echo parameters.
self._check_echo = check_echo
# Initialize the serial port to connect to the Gemini drive. The
# only timeout being explicitly set right now is the write
# timeout. Read timeouts are handled in a more manual fasion.
self._ser = serial.Serial(port, baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=None,
writeTimeout=writeTimeout,
interCharTimeout=interCharTimeout,
xonxoff=True, rtscts=False,
dsrdtr=False)
# It is convenient to have a text wrapper around the serial
# port for reading and writing.
self._sio = io.TextIOWrapper(io.BufferedRWPair(self._ser,
self._ser, 1), newline='\n',
encoding='ASCII')
# Change the communications parameters so that commands are
# echoed, on error level 4, no characters are used to preceed
# each response, carriage returns are used for newlines in
# responses, responses are terminated by a '\n', and there are
# no prompts (there are separate prompts depending on whether
# the previous command had an error or not). The echo command is
# the one command that echo checking cannot be done on since
# echo may not be enabled yet.
self._send_command('ECHO1', check_echo=False, immediate=True)
self._send_command('ERRLVL4', immediate=True)
self._send_command('BOT0,0,0', immediate=True)
self._send_command('EOT10,0,0', immediate=True)
self._send_command('EOL13,0,0', immediate=True)
self._send_command('ERRBAD0,0,0,0', immediate=True)
self._send_command('ERROK0,0,0,0', immediate=True)
# Wait a little while for the commands to be processed and then
# discard all the responses.
time.sleep(2)
self._ser.read(self._ser.inWaiting())
def __del__(self):
""" Returns all communications settings to their defaults.
"""
# Return all communicatsions parameters to their default values
# (from the manual).
self._send_command('ECHO1', immediate=True)
self._send_command('ERRLVL4', immediate=True)
self._send_command('BOT0,0,0', immediate=True)
self._send_command('EOT13,0,0', immediate=True)
self._send_command('EOL13,10,0', immediate=True)
self._send_command('ERRBAD13,10,63,32', immediate=True)
self._send_command('ERROK13,10,62,32', immediate=True)
# Wait a little while for the commands to be processed and then
# discard all the responses.
time.sleep(2)
self._ser.read(self._ser.inWaiting())
def _send_command(self, command, immediate=False, timeout=1.0,
check_echo=None):
""" Send a single command to the drive after sanitizing it.
Takes a single given `command`, sanitizes it (strips out
comments, extra whitespace, and newlines), sends the command to
the drive, and returns the sanitized command. The validity of
the command is **NOT** checked.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : number, optional
Optional timeout in seconds to use to get the command right
when we are doing echo checking. A negative value or
``None`` indicates that the an infinite timeout should be
used.
check_echo : bool or None, optional
Whether the echoing of the command as it is being written to
the drive should be used to correct mistakes in what the
drive is seeing, or whether the default set when the
instance of this class was created should be used
(``None``).
Returns
-------
sanitized_command : str
The sanitized command that was sent to the drive.
"""
# Use the default echo checking if None was given.
if check_echo is None:
check_echo = self._check_echo
# Convert to bytes and then strip comments, whitespace, and
# newlines.
if sys.hexversion >= 0x03000000:
c = bytes(command, encoding='ASCII')
else:
c = command
c = c.split(b';')[0].strip()
# If the command is supposed to be immediate, insure that it
# starts with an '!'.
if immediate and not c.startswith(b'!'):
c = b'!' + c
# Read out any junk on the serial port before we start.
self._ser.read(self._ser.inWaiting())
# The command needs to be written a character at a time with
# pauses between them to make sure nothing gets lost or
# corrupted. This is a simple loop if we are not checking the
# echo. If we are, it is more complicated.
if not check_echo:
for i in range(0, len(c)):
self._ser.write(bytes([c[i]]))
time.sleep(0.01)
else:
# Infinite timeouts need to be converted to None. Finite
# ones need to be checked to make sure they are not too big,
# which is threading.TIMEOUT_MAX on Python 3.x and not
# specified on Python 2.x (lets use a week).
if timeout is None or timeout <= 0:
timeout = None
else:
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. Then, the
# timer is started.
tm = threading.Timer(timeout, lambda : None)
tm.start()
# Each character needs to be written one by one while the
# echo is collected. If any mistakes occur, they need to be
# corrected with backspaces b'\x08'. The echo starts out
# empty. We go until either the echo is identical to the
# command or the timeout is exceeded.
echo = b''
while c != echo and tm.is_alive():
# If there are no mistakes, then echo will be the
# beginning of c meaning the next character can be
# written. Otherwise, there is a mistake and a backspace
# needs to be written.
if c.startswith(echo):
self._ser.write(bytes([c[len(echo)]]))
else:
self._ser.write(b'\x08')
# Pause for a bit to make sure nothing gets lost. Then
# read the drive's output add it to the echo.
time.sleep(0.01)
echo += self._ser.read(self._ser.inWaiting())
# All backspaces in echo need to be processed. Each
# backspace deletes itself and the character before it
# (if any).
while b'\x08' in echo:
index = echo.index(b'\x08')
if index == 0:
echo = echo[1:]
else:
echo = echo[0:(index-1)] + echo[(index+1):]
# Turn off the timer in the case that it is still running
# (command completely written before timeout).
tm.cancel()
# Write the carriage return to enter the command and then return
# the sanitized command.
self._ser.write(b'\r')
if sys.hexversion >= 0x03000000:
return c.decode(errors='replace')
else:
return c
def _process_response(self, response):
""" Processes a response from the drive.
Processes the response returned from the drive. It is broken
down into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response.
Parameters
----------
response : str
The response returned by the drive.
Returns
-------
processed_response : list
A 4-element ``list``. The elements, in order, are `response`
(``str``), the echoed command (``str``), any error response
(``None`` if none, or the ``str`` of the error), and the
lines of the response that are not the echo or error line
(``list`` of ``str`` with newlines stripped).
"""
# Strip the trailing newline and split the response into lines
# by carriage returns.
rsp_lines = response.rstrip('\r\n').split('\r')
# If we have at least one line, the first one is the echoed
# command. If available, it needs to be grabbed and that line
# removed from rsp_lines since it is just the echoing, not the
# actual response to the command. None will be used to denote a
# non-existent echo.
if len(rsp_lines) > 0:
echoed_command = rsp_lines[0]
del rsp_lines[0]
else:
echoed_command = None
# If the next line is one of the different possible error
# strings, then there was an error that must be grabbed (leading
# '*' is stripped). If there was an error, remove that line from
# the response. None will be used to denote the lack of an error.
if len(rsp_lines) > 0 and \
rsp_lines[0] in ('*INVALID_ADDRESS', '*INVALID_DATA', \
'*INVALID_DATA_HIGH', '*INVALID_DATA_LOW', \
'*UNDEFINED_LABEL'):
err = rsp_lines[0][1:]
del rsp_lines[0]
else:
err = None
return [response, echoed_command, err, rsp_lines]
def command_error(self, response):
""" Checks whether a command produced an error.
Checks whether a command procuded an error based on its
processed response. The two types of errors are an error
returned by the drive and the command that the drive received
being different than the one that was sent (error in
transmission).
Parameters
----------
response : processed response (list)
The processed response ``list`` for the command that was
executed.
Returns
-------
error : bool
``True`` if there was an error and ``False`` otherwise.
"""
# The command should be echoed back accurately (might be
# preceeded by a '- ' if it is part of a program definition) and
# no errors should be returned, if it has no errors.
return (response[2] not in [response[0], '- ' + response[0]]
or response[3] is not None)
def send_command(self, command, immediate=False, timeout=1.0,
max_retries=0, eor=('\n', '\n- ')):
""" Sends a single command to the drive and returns output.
Takes a single given `command`, sanitizes it, sends it to the
drive, reads the response, and returns the processed response.
The command is first sanitized by removing comments, extra
whitespace, and newline characters. If `immediate` is set, the
command is made to be an immediate command. Note, the command is
**NOT** checked for validity. If the drive returns an error, the
command is re-executed up to `max_tries` more times. The
response from the final execution is processed and returned. The
response from the drive is broken down into the echoed command
(drive echoes it back), any error returned by the drive (leading
'*' is stripped), and the different lines of the response; which
are all returned.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
``str`` or an iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
See Also
--------
send_commands : Send multiple commands.
Examples
--------
Simple command energizing the motor with no response and no
errors.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=False, timeout=1.0)
['DRIVE1', 'DRIVE1\\r\\r\\n', 'DRIVE1', None, []]
Same command but made immediate.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=True, timeout=1.0)
['!DRIVE1', '!DRIVE1\\r\\r\\n', '!DRIVE1', None, []]
Same command with a typo.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIV1', immediate=False, timeout=1.0)
['DRIV1', 'DRIV1\\r*UNDEFINED_LABEL\\r\\r\\n', 'DRIV1',
'UNDEFINED_LABEL', []]
Simple command asking whether the motor is energized or not.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE', immediate=False, timeout=1.0)
['DRIVE', 'DRIVE\\r*DRIVE1\\r\\r\\n', 'DRIVE', None,
['*DRIVE1']]
"""
# Execute the command till it either doesn't have an error or
# the maximum number of retries is exceeded.
for i in range(0, max_retries+1):
# Send the command and stuff the sanitized version in a
# list. Then process the response and add it to the list.
response = [self._send_command(command,
immediate=immediate)]
output = self._get_response(timeout=timeout, eor=eor)
# If echo checking was done, the echo was already grabbed,
# is identical to the command, and needs to be placed back
# in front of the output so that it can be processed
# properly.
if self._check_echo:
output = response[0] + output
response.extend(self._process_response(output))
# We are done if there is no error.
if not self.command_error(response):
break
# Put in a slight pause so the drive has a bit of breathing
# time between retries.
time.sleep(0.25)
return response
def send_commands(self, commands, timeout=1.0,
max_retries=1, eor=('\n', '\n- ')):
""" Send a sequence of commands to the drive and collect output.
Takes a sequence of many commands and executes them one by one
till either all are executed or one runs out of retries
(`max_retries`). Retries are optionally performed if a command's
repsonse indicates that there was an error. Remaining commands
are not executed. The processed output of the final execution
(last try or retry) of each command that was actually executed
is returned.
This function basically feeds commands one by one to
``send_command`` and collates the outputs.
Parameters
----------
commands : iterable of str
Iterable of commands to send to the drive. Each command must
be an ``str``.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
End Of Resonse. An EOR is either a ``str`` or an iterable
of ``str`` that denote the possible endings of a response.
'eor' can be a single EOR, in which case it is used for all
commands, or it can be an iterable of EOR to use for each
individual command. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
outputs : list of lists
``list`` composed of the processed responses of each command
in the order that they were done up to and including the
last command executed. See ``send_command`` for the format
of processed responses.
See Also
--------
send_command : Send a single command.
Examples
--------
A sequence of commands to energize the motor, move it a bit away
from the starting position, and then do 4 forward/reverse
cycles, and de-energize the motor. **DO NOT** try these specific
movement distances without checking that the motion won't damage
something (very motor and application specific).
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ra = ASCII_RS232('/dev/ttyS1')
>>> ra.send_commands(['DRIVE1', 'D-10000', 'GO']
... + ['D-10000','GO','D10000','GO']*4
... + [ 'DRIVE0'])
[['DRIVE1', 'DRIVE1\\r', 'DRIVE1', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['DRIVE0', 'DRIVE0\\r', 'DRIVE0', None, []]]
"""
# If eor is not a list, make a list of it replicated enough for
# every command.
if not isinstance(eor, list):
eor = [eor]*len(commands)
# Do every command one by one, collecting the responses and
# stuffing them in a list. Commands that failed are retried, and
# we stop if the last retry is exhausted.
responses = []
for i, command in enumerate(commands):
rsp = self.send_command(command, timeout=timeout,
max_retries=max_retries,
eor=eor[i])
responses.append(rsp)
if self.command_error(rsp):
break
# Put in a slight pause so the drive has a bit of breathing
# time between commands.
time.sleep(0.25)
return responses
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/drivers.py | ASCII_RS232._process_response | python | def _process_response(self, response):
# Strip the trailing newline and split the response into lines
# by carriage returns.
rsp_lines = response.rstrip('\r\n').split('\r')
# If we have at least one line, the first one is the echoed
# command. If available, it needs to be grabbed and that line
# removed from rsp_lines since it is just the echoing, not the
# actual response to the command. None will be used to denote a
# non-existent echo.
if len(rsp_lines) > 0:
echoed_command = rsp_lines[0]
del rsp_lines[0]
else:
echoed_command = None
# If the next line is one of the different possible error
# strings, then there was an error that must be grabbed (leading
# '*' is stripped). If there was an error, remove that line from
# the response. None will be used to denote the lack of an error.
if len(rsp_lines) > 0 and \
rsp_lines[0] in ('*INVALID_ADDRESS', '*INVALID_DATA', \
'*INVALID_DATA_HIGH', '*INVALID_DATA_LOW', \
'*UNDEFINED_LABEL'):
err = rsp_lines[0][1:]
del rsp_lines[0]
else:
err = None
return [response, echoed_command, err, rsp_lines] | Processes a response from the drive.
Processes the response returned from the drive. It is broken
down into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response.
Parameters
----------
response : str
The response returned by the drive.
Returns
-------
processed_response : list
A 4-element ``list``. The elements, in order, are `response`
(``str``), the echoed command (``str``), any error response
(``None`` if none, or the ``str`` of the error), and the
lines of the response that are not the echo or error line
(``list`` of ``str`` with newlines stripped). | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/drivers.py#L340-L391 | null | class ASCII_RS232(object):
""" ASCII RS232 comm. driver for a Parker Motion Gemini drive.
Communications driver to talk to a Parker Motion Gemini drive in
ASCII mode over RS232.
Parameters
----------
port : serial port string
The serial port (RS232) that the Gemini drive is connected to.
check_echo : bool, optional
Whether the echoing of the commands as they are being written
to the drive should be used to correct mistakes in what the
drive is seeing or not as the default.
writeTimout : float, optional
The write timeout for the RS232 port. See ``serial.Serial``.
interCharTimeout : float or None, optional
The inter-character timeout for writing on the RS232 port.
``None`` disables. See ``serial.Serial``.
Raises
------
serial.SerialException
If `port` does not correspond to an available RS232 port or
can't be opened.
Notes
-----
The ASCII communications settings of the Gemini drive are changed
while this object is connected and are returned to the default
values when this object is deleted. Thus, the values of the
communications settings before this object is created are lost.
See Also
--------
serial.Serial
"""
def __init__(self, port, check_echo=True, writeTimeout=1.0,
interCharTimeout=0.002):
# Set private variable holding the echo parameters.
self._check_echo = check_echo
# Initialize the serial port to connect to the Gemini drive. The
# only timeout being explicitly set right now is the write
# timeout. Read timeouts are handled in a more manual fasion.
self._ser = serial.Serial(port, baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=None,
writeTimeout=writeTimeout,
interCharTimeout=interCharTimeout,
xonxoff=True, rtscts=False,
dsrdtr=False)
# It is convenient to have a text wrapper around the serial
# port for reading and writing.
self._sio = io.TextIOWrapper(io.BufferedRWPair(self._ser,
self._ser, 1), newline='\n',
encoding='ASCII')
# Change the communications parameters so that commands are
# echoed, on error level 4, no characters are used to preceed
# each response, carriage returns are used for newlines in
# responses, responses are terminated by a '\n', and there are
# no prompts (there are separate prompts depending on whether
# the previous command had an error or not). The echo command is
# the one command that echo checking cannot be done on since
# echo may not be enabled yet.
self._send_command('ECHO1', check_echo=False, immediate=True)
self._send_command('ERRLVL4', immediate=True)
self._send_command('BOT0,0,0', immediate=True)
self._send_command('EOT10,0,0', immediate=True)
self._send_command('EOL13,0,0', immediate=True)
self._send_command('ERRBAD0,0,0,0', immediate=True)
self._send_command('ERROK0,0,0,0', immediate=True)
# Wait a little while for the commands to be processed and then
# discard all the responses.
time.sleep(2)
self._ser.read(self._ser.inWaiting())
def __del__(self):
""" Returns all communications settings to their defaults.
"""
# Return all communicatsions parameters to their default values
# (from the manual).
self._send_command('ECHO1', immediate=True)
self._send_command('ERRLVL4', immediate=True)
self._send_command('BOT0,0,0', immediate=True)
self._send_command('EOT13,0,0', immediate=True)
self._send_command('EOL13,10,0', immediate=True)
self._send_command('ERRBAD13,10,63,32', immediate=True)
self._send_command('ERROK13,10,62,32', immediate=True)
# Wait a little while for the commands to be processed and then
# discard all the responses.
time.sleep(2)
self._ser.read(self._ser.inWaiting())
def _send_command(self, command, immediate=False, timeout=1.0,
check_echo=None):
""" Send a single command to the drive after sanitizing it.
Takes a single given `command`, sanitizes it (strips out
comments, extra whitespace, and newlines), sends the command to
the drive, and returns the sanitized command. The validity of
the command is **NOT** checked.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : number, optional
Optional timeout in seconds to use to get the command right
when we are doing echo checking. A negative value or
``None`` indicates that the an infinite timeout should be
used.
check_echo : bool or None, optional
Whether the echoing of the command as it is being written to
the drive should be used to correct mistakes in what the
drive is seeing, or whether the default set when the
instance of this class was created should be used
(``None``).
Returns
-------
sanitized_command : str
The sanitized command that was sent to the drive.
"""
# Use the default echo checking if None was given.
if check_echo is None:
check_echo = self._check_echo
# Convert to bytes and then strip comments, whitespace, and
# newlines.
if sys.hexversion >= 0x03000000:
c = bytes(command, encoding='ASCII')
else:
c = command
c = c.split(b';')[0].strip()
# If the command is supposed to be immediate, insure that it
# starts with an '!'.
if immediate and not c.startswith(b'!'):
c = b'!' + c
# Read out any junk on the serial port before we start.
self._ser.read(self._ser.inWaiting())
# The command needs to be written a character at a time with
# pauses between them to make sure nothing gets lost or
# corrupted. This is a simple loop if we are not checking the
# echo. If we are, it is more complicated.
if not check_echo:
for i in range(0, len(c)):
self._ser.write(bytes([c[i]]))
time.sleep(0.01)
else:
# Infinite timeouts need to be converted to None. Finite
# ones need to be checked to make sure they are not too big,
# which is threading.TIMEOUT_MAX on Python 3.x and not
# specified on Python 2.x (lets use a week).
if timeout is None or timeout <= 0:
timeout = None
else:
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. Then, the
# timer is started.
tm = threading.Timer(timeout, lambda : None)
tm.start()
# Each character needs to be written one by one while the
# echo is collected. If any mistakes occur, they need to be
# corrected with backspaces b'\x08'. The echo starts out
# empty. We go until either the echo is identical to the
# command or the timeout is exceeded.
echo = b''
while c != echo and tm.is_alive():
# If there are no mistakes, then echo will be the
# beginning of c meaning the next character can be
# written. Otherwise, there is a mistake and a backspace
# needs to be written.
if c.startswith(echo):
self._ser.write(bytes([c[len(echo)]]))
else:
self._ser.write(b'\x08')
# Pause for a bit to make sure nothing gets lost. Then
# read the drive's output add it to the echo.
time.sleep(0.01)
echo += self._ser.read(self._ser.inWaiting())
# All backspaces in echo need to be processed. Each
# backspace deletes itself and the character before it
# (if any).
while b'\x08' in echo:
index = echo.index(b'\x08')
if index == 0:
echo = echo[1:]
else:
echo = echo[0:(index-1)] + echo[(index+1):]
# Turn off the timer in the case that it is still running
# (command completely written before timeout).
tm.cancel()
# Write the carriage return to enter the command and then return
# the sanitized command.
self._ser.write(b'\r')
if sys.hexversion >= 0x03000000:
return c.decode(errors='replace')
else:
return c
def _get_response(self, timeout=1.0, eor=('\n', '\n- ')):
""" Reads a response from the drive.
Reads the response returned by the drive with an optional
timeout. All carriage returns and linefeeds are kept.
Parameters
----------
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
eor : str or iterable of str, optional
``str`` or iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
response : str
The response obtained from the drive. Carriage returns and
linefeeds are preserved.
"""
# If no timeout is given or it is invalid and we are using '\n'
# as the eor, use the wrapper to read a line with an infinite
# timeout. Otherwise, the reading and timeout must be
# implemented manually.
if (timeout is None or timeout < 0) and eor == '\n':
return self._sio.readline()
else:
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. They need to
# be checked to make sure they are not too big, which is
# threading.TIMEOUT_MAX on Python 3.x and not specified on
# Python 2.x (lets use a week). Then, the timer is started.
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
tm = threading.Timer(timeout, lambda : None)
tm.start()
# eor needs to be converted to bytes. If it is just an str,
# it needs to be wrapped in a tuple.
if isinstance(eor, str):
eor = tuple([eor])
if sys.hexversion >= 0x03000000:
eor = [s.encode(encoding='ASCII') for s in eor]
# Read from the serial port into buf until the EOR is found
# or the timer has stopped. A small pause is done each time
# so that this thread doesn't hog the CPU.
buf = b''
while not any([(x in buf) for x in eor]) and tm.is_alive():
time.sleep(0.001)
buf += self._ser.read(self._ser.inWaiting())
# Just in case the timer has not stopped (EOR was found),
# stop it.
tm.cancel()
# Remove anything after the EOR if there is one. First, a
# set of matches (index, eor_str) for each string in eor
# needs to be constructed. Sorting the matches by their
# index puts all the ones that were not found (index of -1)
# at the front. Then a list of bools that are True for each
# index that isn't -1 is made, converted to a bytes (True
# goes to b'\x01' and False goes to b'\x00'), and then the
# index of the first True value found. If it is not -1, then
# there was a successful match and all the characters are
# dropped after that eor_str.
matches = [(buf.find(x), x) for x in eor]
matches.sort(key=lambda x: x[0])
index = bytes([x[0] != -1 for x in matches]).find(b'\x01')
if index != -1:
buf = buf[:(matches[index][0] + len(matches[index][1]))]
# Convert to an str before returning.
if sys.hexversion >= 0x03000000:
return buf.decode(errors='replace')
else:
return buf
def command_error(self, response):
""" Checks whether a command produced an error.
Checks whether a command procuded an error based on its
processed response. The two types of errors are an error
returned by the drive and the command that the drive received
being different than the one that was sent (error in
transmission).
Parameters
----------
response : processed response (list)
The processed response ``list`` for the command that was
executed.
Returns
-------
error : bool
``True`` if there was an error and ``False`` otherwise.
"""
# The command should be echoed back accurately (might be
# preceeded by a '- ' if it is part of a program definition) and
# no errors should be returned, if it has no errors.
return (response[2] not in [response[0], '- ' + response[0]]
or response[3] is not None)
def send_command(self, command, immediate=False, timeout=1.0,
max_retries=0, eor=('\n', '\n- ')):
""" Sends a single command to the drive and returns output.
Takes a single given `command`, sanitizes it, sends it to the
drive, reads the response, and returns the processed response.
The command is first sanitized by removing comments, extra
whitespace, and newline characters. If `immediate` is set, the
command is made to be an immediate command. Note, the command is
**NOT** checked for validity. If the drive returns an error, the
command is re-executed up to `max_tries` more times. The
response from the final execution is processed and returned. The
response from the drive is broken down into the echoed command
(drive echoes it back), any error returned by the drive (leading
'*' is stripped), and the different lines of the response; which
are all returned.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
``str`` or an iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
See Also
--------
send_commands : Send multiple commands.
Examples
--------
Simple command energizing the motor with no response and no
errors.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=False, timeout=1.0)
['DRIVE1', 'DRIVE1\\r\\r\\n', 'DRIVE1', None, []]
Same command but made immediate.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=True, timeout=1.0)
['!DRIVE1', '!DRIVE1\\r\\r\\n', '!DRIVE1', None, []]
Same command with a typo.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIV1', immediate=False, timeout=1.0)
['DRIV1', 'DRIV1\\r*UNDEFINED_LABEL\\r\\r\\n', 'DRIV1',
'UNDEFINED_LABEL', []]
Simple command asking whether the motor is energized or not.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE', immediate=False, timeout=1.0)
['DRIVE', 'DRIVE\\r*DRIVE1\\r\\r\\n', 'DRIVE', None,
['*DRIVE1']]
"""
# Execute the command till it either doesn't have an error or
# the maximum number of retries is exceeded.
for i in range(0, max_retries+1):
# Send the command and stuff the sanitized version in a
# list. Then process the response and add it to the list.
response = [self._send_command(command,
immediate=immediate)]
output = self._get_response(timeout=timeout, eor=eor)
# If echo checking was done, the echo was already grabbed,
# is identical to the command, and needs to be placed back
# in front of the output so that it can be processed
# properly.
if self._check_echo:
output = response[0] + output
response.extend(self._process_response(output))
# We are done if there is no error.
if not self.command_error(response):
break
# Put in a slight pause so the drive has a bit of breathing
# time between retries.
time.sleep(0.25)
return response
def send_commands(self, commands, timeout=1.0,
max_retries=1, eor=('\n', '\n- ')):
""" Send a sequence of commands to the drive and collect output.
Takes a sequence of many commands and executes them one by one
till either all are executed or one runs out of retries
(`max_retries`). Retries are optionally performed if a command's
repsonse indicates that there was an error. Remaining commands
are not executed. The processed output of the final execution
(last try or retry) of each command that was actually executed
is returned.
This function basically feeds commands one by one to
``send_command`` and collates the outputs.
Parameters
----------
commands : iterable of str
Iterable of commands to send to the drive. Each command must
be an ``str``.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
End Of Resonse. An EOR is either a ``str`` or an iterable
of ``str`` that denote the possible endings of a response.
'eor' can be a single EOR, in which case it is used for all
commands, or it can be an iterable of EOR to use for each
individual command. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
outputs : list of lists
``list`` composed of the processed responses of each command
in the order that they were done up to and including the
last command executed. See ``send_command`` for the format
of processed responses.
See Also
--------
send_command : Send a single command.
Examples
--------
A sequence of commands to energize the motor, move it a bit away
from the starting position, and then do 4 forward/reverse
cycles, and de-energize the motor. **DO NOT** try these specific
movement distances without checking that the motion won't damage
something (very motor and application specific).
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ra = ASCII_RS232('/dev/ttyS1')
>>> ra.send_commands(['DRIVE1', 'D-10000', 'GO']
... + ['D-10000','GO','D10000','GO']*4
... + [ 'DRIVE0'])
[['DRIVE1', 'DRIVE1\\r', 'DRIVE1', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['DRIVE0', 'DRIVE0\\r', 'DRIVE0', None, []]]
"""
# If eor is not a list, make a list of it replicated enough for
# every command.
if not isinstance(eor, list):
eor = [eor]*len(commands)
# Do every command one by one, collecting the responses and
# stuffing them in a list. Commands that failed are retried, and
# we stop if the last retry is exhausted.
responses = []
for i, command in enumerate(commands):
rsp = self.send_command(command, timeout=timeout,
max_retries=max_retries,
eor=eor[i])
responses.append(rsp)
if self.command_error(rsp):
break
# Put in a slight pause so the drive has a bit of breathing
# time between commands.
time.sleep(0.25)
return responses
|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/drivers.py | ASCII_RS232.send_command | python | def send_command(self, command, immediate=False, timeout=1.0,
max_retries=0, eor=('\n', '\n- ')):
# Execute the command till it either doesn't have an error or
# the maximum number of retries is exceeded.
for i in range(0, max_retries+1):
# Send the command and stuff the sanitized version in a
# list. Then process the response and add it to the list.
response = [self._send_command(command,
immediate=immediate)]
output = self._get_response(timeout=timeout, eor=eor)
# If echo checking was done, the echo was already grabbed,
# is identical to the command, and needs to be placed back
# in front of the output so that it can be processed
# properly.
if self._check_echo:
output = response[0] + output
response.extend(self._process_response(output))
# We are done if there is no error.
if not self.command_error(response):
break
# Put in a slight pause so the drive has a bit of breathing
# time between retries.
time.sleep(0.25)
return response | Sends a single command to the drive and returns output.
Takes a single given `command`, sanitizes it, sends it to the
drive, reads the response, and returns the processed response.
The command is first sanitized by removing comments, extra
whitespace, and newline characters. If `immediate` is set, the
command is made to be an immediate command. Note, the command is
**NOT** checked for validity. If the drive returns an error, the
command is re-executed up to `max_tries` more times. The
response from the final execution is processed and returned. The
response from the drive is broken down into the echoed command
(drive echoes it back), any error returned by the drive (leading
'*' is stripped), and the different lines of the response; which
are all returned.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
``str`` or an iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
See Also
--------
send_commands : Send multiple commands.
Examples
--------
Simple command energizing the motor with no response and no
errors.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=False, timeout=1.0)
['DRIVE1', 'DRIVE1\\r\\r\\n', 'DRIVE1', None, []]
Same command but made immediate.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=True, timeout=1.0)
['!DRIVE1', '!DRIVE1\\r\\r\\n', '!DRIVE1', None, []]
Same command with a typo.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIV1', immediate=False, timeout=1.0)
['DRIV1', 'DRIV1\\r*UNDEFINED_LABEL\\r\\r\\n', 'DRIV1',
'UNDEFINED_LABEL', []]
Simple command asking whether the motor is energized or not.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE', immediate=False, timeout=1.0)
['DRIVE', 'DRIVE\\r*DRIVE1\\r\\r\\n', 'DRIVE', None,
['*DRIVE1']] | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/drivers.py#L420-L527 | [
"def _send_command(self, command, immediate=False, timeout=1.0,\n check_echo=None):\n \"\"\" Send a single command to the drive after sanitizing it.\n\n Takes a single given `command`, sanitizes it (strips out\n comments, extra whitespace, and newlines), sends the command to\n the drive, and returns the sanitized command. The validity of\n the command is **NOT** checked.\n\n Parameters\n ----------\n command : str\n The command to send to the Gemini drive.\n immediate : bool, optional\n Whether to make it so the command is executed immediately or\n not.\n timeout : number, optional\n Optional timeout in seconds to use to get the command right\n when we are doing echo checking. A negative value or\n ``None`` indicates that the an infinite timeout should be\n used.\n check_echo : bool or None, optional\n Whether the echoing of the command as it is being written to\n the drive should be used to correct mistakes in what the\n drive is seeing, or whether the default set when the\n instance of this class was created should be used\n (``None``).\n\n Returns\n -------\n sanitized_command : str\n The sanitized command that was sent to the drive.\n\n \"\"\"\n # Use the default echo checking if None was given.\n if check_echo is None:\n check_echo = self._check_echo\n\n # Convert to bytes and then strip comments, whitespace, and\n # newlines.\n if sys.hexversion >= 0x03000000:\n c = bytes(command, encoding='ASCII')\n else:\n c = command\n c = c.split(b';')[0].strip()\n\n # If the command is supposed to be immediate, insure that it\n # starts with an '!'.\n if immediate and not c.startswith(b'!'):\n c = b'!' + c\n\n # Read out any junk on the serial port before we start.\n self._ser.read(self._ser.inWaiting())\n\n # The command needs to be written a character at a time with\n # pauses between them to make sure nothing gets lost or\n # corrupted. This is a simple loop if we are not checking the\n # echo. If we are, it is more complicated.\n if not check_echo:\n for i in range(0, len(c)):\n self._ser.write(bytes([c[i]]))\n time.sleep(0.01)\n else:\n # Infinite timeouts need to be converted to None. Finite\n # ones need to be checked to make sure they are not too big,\n # which is threading.TIMEOUT_MAX on Python 3.x and not\n # specified on Python 2.x (lets use a week).\n if timeout is None or timeout <= 0:\n timeout = None\n else:\n if sys.hexversion >= 0x03000000:\n maxtimeout = threading.TIMEOUT_MAX\n else:\n maxtimeout = 7*24*3600\n timeout = min(timeout, maxtimeout)\n\n # A timer will be made that takes timeout to finish. Then,\n # it is a matter of checking whether it is alive or not to\n # know whether the timeout was exceeded or not. Then, the\n # timer is started.\n tm = threading.Timer(timeout, lambda : None)\n tm.start()\n\n # Each character needs to be written one by one while the\n # echo is collected. If any mistakes occur, they need to be\n # corrected with backspaces b'\\x08'. The echo starts out\n # empty. We go until either the echo is identical to the\n # command or the timeout is exceeded.\n echo = b''\n while c != echo and tm.is_alive():\n # If there are no mistakes, then echo will be the\n # beginning of c meaning the next character can be\n # written. Otherwise, there is a mistake and a backspace\n # needs to be written.\n if c.startswith(echo):\n self._ser.write(bytes([c[len(echo)]]))\n else:\n self._ser.write(b'\\x08')\n\n # Pause for a bit to make sure nothing gets lost. Then\n # read the drive's output add it to the echo.\n time.sleep(0.01)\n echo += self._ser.read(self._ser.inWaiting())\n\n # All backspaces in echo need to be processed. Each\n # backspace deletes itself and the character before it\n # (if any).\n while b'\\x08' in echo:\n index = echo.index(b'\\x08')\n if index == 0:\n echo = echo[1:]\n else:\n echo = echo[0:(index-1)] + echo[(index+1):]\n\n # Turn off the timer in the case that it is still running\n # (command completely written before timeout).\n tm.cancel()\n\n # Write the carriage return to enter the command and then return\n # the sanitized command.\n self._ser.write(b'\\r')\n if sys.hexversion >= 0x03000000:\n return c.decode(errors='replace')\n else:\n return c\n",
"def _get_response(self, timeout=1.0, eor=('\\n', '\\n- ')):\n \"\"\" Reads a response from the drive.\n\n Reads the response returned by the drive with an optional\n timeout. All carriage returns and linefeeds are kept.\n\n Parameters\n ----------\n timeout : number, optional\n Optional timeout in seconds to use when reading the\n response. A negative value or ``None`` indicates that the\n an infinite timeout should be used.\n eor : str or iterable of str, optional\n ``str`` or iterable of ``str`` that denote the allowed\n End Of Response. For most commands, it should be\n ``('\\\\n', '\\\\n- ')``, but for running a program, it should\n be ``'*END\\\\n'``. The default is ``('\\\\n', '\\\\n- ')``.\n\n Returns\n -------\n response : str\n The response obtained from the drive. Carriage returns and\n linefeeds are preserved.\n\n \"\"\"\n # If no timeout is given or it is invalid and we are using '\\n'\n # as the eor, use the wrapper to read a line with an infinite\n # timeout. Otherwise, the reading and timeout must be\n # implemented manually.\n if (timeout is None or timeout < 0) and eor == '\\n':\n return self._sio.readline()\n else:\n # A timer will be made that takes timeout to finish. Then,\n # it is a matter of checking whether it is alive or not to\n # know whether the timeout was exceeded or not. They need to\n # be checked to make sure they are not too big, which is\n # threading.TIMEOUT_MAX on Python 3.x and not specified on\n # Python 2.x (lets use a week). Then, the timer is started.\n if sys.hexversion >= 0x03000000:\n maxtimeout = threading.TIMEOUT_MAX\n else:\n maxtimeout = 7*24*3600\n timeout = min(timeout, maxtimeout)\n tm = threading.Timer(timeout, lambda : None)\n tm.start()\n\n # eor needs to be converted to bytes. If it is just an str,\n # it needs to be wrapped in a tuple.\n if isinstance(eor, str):\n eor = tuple([eor])\n if sys.hexversion >= 0x03000000:\n eor = [s.encode(encoding='ASCII') for s in eor]\n\n # Read from the serial port into buf until the EOR is found\n # or the timer has stopped. A small pause is done each time\n # so that this thread doesn't hog the CPU.\n buf = b''\n while not any([(x in buf) for x in eor]) and tm.is_alive():\n time.sleep(0.001)\n buf += self._ser.read(self._ser.inWaiting())\n\n # Just in case the timer has not stopped (EOR was found),\n # stop it.\n tm.cancel()\n\n # Remove anything after the EOR if there is one. First, a\n # set of matches (index, eor_str) for each string in eor\n # needs to be constructed. Sorting the matches by their\n # index puts all the ones that were not found (index of -1)\n # at the front. Then a list of bools that are True for each\n # index that isn't -1 is made, converted to a bytes (True\n # goes to b'\\x01' and False goes to b'\\x00'), and then the\n # index of the first True value found. If it is not -1, then\n # there was a successful match and all the characters are\n # dropped after that eor_str.\n matches = [(buf.find(x), x) for x in eor]\n matches.sort(key=lambda x: x[0])\n index = bytes([x[0] != -1 for x in matches]).find(b'\\x01')\n if index != -1:\n buf = buf[:(matches[index][0] + len(matches[index][1]))]\n\n # Convert to an str before returning.\n if sys.hexversion >= 0x03000000:\n return buf.decode(errors='replace')\n else:\n return buf\n",
"def _process_response(self, response):\n \"\"\" Processes a response from the drive.\n\n Processes the response returned from the drive. It is broken\n down into the echoed command (drive echoes it back), any error\n returned by the drive (leading '*' is stripped), and the\n different lines of the response.\n\n Parameters\n ----------\n response : str\n The response returned by the drive.\n\n Returns\n -------\n processed_response : list\n A 4-element ``list``. The elements, in order, are `response`\n (``str``), the echoed command (``str``), any error response\n (``None`` if none, or the ``str`` of the error), and the\n lines of the response that are not the echo or error line\n (``list`` of ``str`` with newlines stripped).\n\n \"\"\"\n # Strip the trailing newline and split the response into lines\n # by carriage returns.\n rsp_lines = response.rstrip('\\r\\n').split('\\r')\n\n # If we have at least one line, the first one is the echoed\n # command. If available, it needs to be grabbed and that line\n # removed from rsp_lines since it is just the echoing, not the\n # actual response to the command. None will be used to denote a\n # non-existent echo.\n if len(rsp_lines) > 0:\n echoed_command = rsp_lines[0]\n del rsp_lines[0]\n else:\n echoed_command = None\n\n # If the next line is one of the different possible error\n # strings, then there was an error that must be grabbed (leading\n # '*' is stripped). If there was an error, remove that line from\n # the response. None will be used to denote the lack of an error.\n if len(rsp_lines) > 0 and \\\n rsp_lines[0] in ('*INVALID_ADDRESS', '*INVALID_DATA', \\\n '*INVALID_DATA_HIGH', '*INVALID_DATA_LOW', \\\n '*UNDEFINED_LABEL'):\n err = rsp_lines[0][1:]\n del rsp_lines[0]\n else:\n err = None\n\n return [response, echoed_command, err, rsp_lines]\n",
"def command_error(self, response):\n \"\"\" Checks whether a command produced an error.\n\n Checks whether a command procuded an error based on its\n processed response. The two types of errors are an error\n returned by the drive and the command that the drive received\n being different than the one that was sent (error in\n transmission).\n\n Parameters\n ----------\n response : processed response (list)\n The processed response ``list`` for the command that was\n executed.\n\n Returns\n -------\n error : bool\n ``True`` if there was an error and ``False`` otherwise.\n\n \"\"\"\n # The command should be echoed back accurately (might be\n # preceeded by a '- ' if it is part of a program definition) and\n # no errors should be returned, if it has no errors.\n return (response[2] not in [response[0], '- ' + response[0]]\n or response[3] is not None)\n"
] | class ASCII_RS232(object):
""" ASCII RS232 comm. driver for a Parker Motion Gemini drive.
Communications driver to talk to a Parker Motion Gemini drive in
ASCII mode over RS232.
Parameters
----------
port : serial port string
The serial port (RS232) that the Gemini drive is connected to.
check_echo : bool, optional
Whether the echoing of the commands as they are being written
to the drive should be used to correct mistakes in what the
drive is seeing or not as the default.
writeTimout : float, optional
The write timeout for the RS232 port. See ``serial.Serial``.
interCharTimeout : float or None, optional
The inter-character timeout for writing on the RS232 port.
``None`` disables. See ``serial.Serial``.
Raises
------
serial.SerialException
If `port` does not correspond to an available RS232 port or
can't be opened.
Notes
-----
The ASCII communications settings of the Gemini drive are changed
while this object is connected and are returned to the default
values when this object is deleted. Thus, the values of the
communications settings before this object is created are lost.
See Also
--------
serial.Serial
"""
def __init__(self, port, check_echo=True, writeTimeout=1.0,
interCharTimeout=0.002):
# Set private variable holding the echo parameters.
self._check_echo = check_echo
# Initialize the serial port to connect to the Gemini drive. The
# only timeout being explicitly set right now is the write
# timeout. Read timeouts are handled in a more manual fasion.
self._ser = serial.Serial(port, baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=None,
writeTimeout=writeTimeout,
interCharTimeout=interCharTimeout,
xonxoff=True, rtscts=False,
dsrdtr=False)
# It is convenient to have a text wrapper around the serial
# port for reading and writing.
self._sio = io.TextIOWrapper(io.BufferedRWPair(self._ser,
self._ser, 1), newline='\n',
encoding='ASCII')
# Change the communications parameters so that commands are
# echoed, on error level 4, no characters are used to preceed
# each response, carriage returns are used for newlines in
# responses, responses are terminated by a '\n', and there are
# no prompts (there are separate prompts depending on whether
# the previous command had an error or not). The echo command is
# the one command that echo checking cannot be done on since
# echo may not be enabled yet.
self._send_command('ECHO1', check_echo=False, immediate=True)
self._send_command('ERRLVL4', immediate=True)
self._send_command('BOT0,0,0', immediate=True)
self._send_command('EOT10,0,0', immediate=True)
self._send_command('EOL13,0,0', immediate=True)
self._send_command('ERRBAD0,0,0,0', immediate=True)
self._send_command('ERROK0,0,0,0', immediate=True)
# Wait a little while for the commands to be processed and then
# discard all the responses.
time.sleep(2)
self._ser.read(self._ser.inWaiting())
def __del__(self):
""" Returns all communications settings to their defaults.
"""
# Return all communicatsions parameters to their default values
# (from the manual).
self._send_command('ECHO1', immediate=True)
self._send_command('ERRLVL4', immediate=True)
self._send_command('BOT0,0,0', immediate=True)
self._send_command('EOT13,0,0', immediate=True)
self._send_command('EOL13,10,0', immediate=True)
self._send_command('ERRBAD13,10,63,32', immediate=True)
self._send_command('ERROK13,10,62,32', immediate=True)
# Wait a little while for the commands to be processed and then
# discard all the responses.
time.sleep(2)
self._ser.read(self._ser.inWaiting())
def _send_command(self, command, immediate=False, timeout=1.0,
check_echo=None):
""" Send a single command to the drive after sanitizing it.
Takes a single given `command`, sanitizes it (strips out
comments, extra whitespace, and newlines), sends the command to
the drive, and returns the sanitized command. The validity of
the command is **NOT** checked.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : number, optional
Optional timeout in seconds to use to get the command right
when we are doing echo checking. A negative value or
``None`` indicates that the an infinite timeout should be
used.
check_echo : bool or None, optional
Whether the echoing of the command as it is being written to
the drive should be used to correct mistakes in what the
drive is seeing, or whether the default set when the
instance of this class was created should be used
(``None``).
Returns
-------
sanitized_command : str
The sanitized command that was sent to the drive.
"""
# Use the default echo checking if None was given.
if check_echo is None:
check_echo = self._check_echo
# Convert to bytes and then strip comments, whitespace, and
# newlines.
if sys.hexversion >= 0x03000000:
c = bytes(command, encoding='ASCII')
else:
c = command
c = c.split(b';')[0].strip()
# If the command is supposed to be immediate, insure that it
# starts with an '!'.
if immediate and not c.startswith(b'!'):
c = b'!' + c
# Read out any junk on the serial port before we start.
self._ser.read(self._ser.inWaiting())
# The command needs to be written a character at a time with
# pauses between them to make sure nothing gets lost or
# corrupted. This is a simple loop if we are not checking the
# echo. If we are, it is more complicated.
if not check_echo:
for i in range(0, len(c)):
self._ser.write(bytes([c[i]]))
time.sleep(0.01)
else:
# Infinite timeouts need to be converted to None. Finite
# ones need to be checked to make sure they are not too big,
# which is threading.TIMEOUT_MAX on Python 3.x and not
# specified on Python 2.x (lets use a week).
if timeout is None or timeout <= 0:
timeout = None
else:
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. Then, the
# timer is started.
tm = threading.Timer(timeout, lambda : None)
tm.start()
# Each character needs to be written one by one while the
# echo is collected. If any mistakes occur, they need to be
# corrected with backspaces b'\x08'. The echo starts out
# empty. We go until either the echo is identical to the
# command or the timeout is exceeded.
echo = b''
while c != echo and tm.is_alive():
# If there are no mistakes, then echo will be the
# beginning of c meaning the next character can be
# written. Otherwise, there is a mistake and a backspace
# needs to be written.
if c.startswith(echo):
self._ser.write(bytes([c[len(echo)]]))
else:
self._ser.write(b'\x08')
# Pause for a bit to make sure nothing gets lost. Then
# read the drive's output add it to the echo.
time.sleep(0.01)
echo += self._ser.read(self._ser.inWaiting())
# All backspaces in echo need to be processed. Each
# backspace deletes itself and the character before it
# (if any).
while b'\x08' in echo:
index = echo.index(b'\x08')
if index == 0:
echo = echo[1:]
else:
echo = echo[0:(index-1)] + echo[(index+1):]
# Turn off the timer in the case that it is still running
# (command completely written before timeout).
tm.cancel()
# Write the carriage return to enter the command and then return
# the sanitized command.
self._ser.write(b'\r')
if sys.hexversion >= 0x03000000:
return c.decode(errors='replace')
else:
return c
def _get_response(self, timeout=1.0, eor=('\n', '\n- ')):
""" Reads a response from the drive.
Reads the response returned by the drive with an optional
timeout. All carriage returns and linefeeds are kept.
Parameters
----------
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
eor : str or iterable of str, optional
``str`` or iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
response : str
The response obtained from the drive. Carriage returns and
linefeeds are preserved.
"""
# If no timeout is given or it is invalid and we are using '\n'
# as the eor, use the wrapper to read a line with an infinite
# timeout. Otherwise, the reading and timeout must be
# implemented manually.
if (timeout is None or timeout < 0) and eor == '\n':
return self._sio.readline()
else:
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. They need to
# be checked to make sure they are not too big, which is
# threading.TIMEOUT_MAX on Python 3.x and not specified on
# Python 2.x (lets use a week). Then, the timer is started.
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
tm = threading.Timer(timeout, lambda : None)
tm.start()
# eor needs to be converted to bytes. If it is just an str,
# it needs to be wrapped in a tuple.
if isinstance(eor, str):
eor = tuple([eor])
if sys.hexversion >= 0x03000000:
eor = [s.encode(encoding='ASCII') for s in eor]
# Read from the serial port into buf until the EOR is found
# or the timer has stopped. A small pause is done each time
# so that this thread doesn't hog the CPU.
buf = b''
while not any([(x in buf) for x in eor]) and tm.is_alive():
time.sleep(0.001)
buf += self._ser.read(self._ser.inWaiting())
# Just in case the timer has not stopped (EOR was found),
# stop it.
tm.cancel()
# Remove anything after the EOR if there is one. First, a
# set of matches (index, eor_str) for each string in eor
# needs to be constructed. Sorting the matches by their
# index puts all the ones that were not found (index of -1)
# at the front. Then a list of bools that are True for each
# index that isn't -1 is made, converted to a bytes (True
# goes to b'\x01' and False goes to b'\x00'), and then the
# index of the first True value found. If it is not -1, then
# there was a successful match and all the characters are
# dropped after that eor_str.
matches = [(buf.find(x), x) for x in eor]
matches.sort(key=lambda x: x[0])
index = bytes([x[0] != -1 for x in matches]).find(b'\x01')
if index != -1:
buf = buf[:(matches[index][0] + len(matches[index][1]))]
# Convert to an str before returning.
if sys.hexversion >= 0x03000000:
return buf.decode(errors='replace')
else:
return buf
def _process_response(self, response):
""" Processes a response from the drive.
Processes the response returned from the drive. It is broken
down into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response.
Parameters
----------
response : str
The response returned by the drive.
Returns
-------
processed_response : list
A 4-element ``list``. The elements, in order, are `response`
(``str``), the echoed command (``str``), any error response
(``None`` if none, or the ``str`` of the error), and the
lines of the response that are not the echo or error line
(``list`` of ``str`` with newlines stripped).
"""
# Strip the trailing newline and split the response into lines
# by carriage returns.
rsp_lines = response.rstrip('\r\n').split('\r')
# If we have at least one line, the first one is the echoed
# command. If available, it needs to be grabbed and that line
# removed from rsp_lines since it is just the echoing, not the
# actual response to the command. None will be used to denote a
# non-existent echo.
if len(rsp_lines) > 0:
echoed_command = rsp_lines[0]
del rsp_lines[0]
else:
echoed_command = None
# If the next line is one of the different possible error
# strings, then there was an error that must be grabbed (leading
# '*' is stripped). If there was an error, remove that line from
# the response. None will be used to denote the lack of an error.
if len(rsp_lines) > 0 and \
rsp_lines[0] in ('*INVALID_ADDRESS', '*INVALID_DATA', \
'*INVALID_DATA_HIGH', '*INVALID_DATA_LOW', \
'*UNDEFINED_LABEL'):
err = rsp_lines[0][1:]
del rsp_lines[0]
else:
err = None
return [response, echoed_command, err, rsp_lines]
def command_error(self, response):
""" Checks whether a command produced an error.
Checks whether a command procuded an error based on its
processed response. The two types of errors are an error
returned by the drive and the command that the drive received
being different than the one that was sent (error in
transmission).
Parameters
----------
response : processed response (list)
The processed response ``list`` for the command that was
executed.
Returns
-------
error : bool
``True`` if there was an error and ``False`` otherwise.
"""
# The command should be echoed back accurately (might be
# preceeded by a '- ' if it is part of a program definition) and
# no errors should be returned, if it has no errors.
return (response[2] not in [response[0], '- ' + response[0]]
or response[3] is not None)
def send_commands(self, commands, timeout=1.0,
max_retries=1, eor=('\n', '\n- ')):
""" Send a sequence of commands to the drive and collect output.
Takes a sequence of many commands and executes them one by one
till either all are executed or one runs out of retries
(`max_retries`). Retries are optionally performed if a command's
repsonse indicates that there was an error. Remaining commands
are not executed. The processed output of the final execution
(last try or retry) of each command that was actually executed
is returned.
This function basically feeds commands one by one to
``send_command`` and collates the outputs.
Parameters
----------
commands : iterable of str
Iterable of commands to send to the drive. Each command must
be an ``str``.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
End Of Resonse. An EOR is either a ``str`` or an iterable
of ``str`` that denote the possible endings of a response.
'eor' can be a single EOR, in which case it is used for all
commands, or it can be an iterable of EOR to use for each
individual command. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
outputs : list of lists
``list`` composed of the processed responses of each command
in the order that they were done up to and including the
last command executed. See ``send_command`` for the format
of processed responses.
See Also
--------
send_command : Send a single command.
Examples
--------
A sequence of commands to energize the motor, move it a bit away
from the starting position, and then do 4 forward/reverse
cycles, and de-energize the motor. **DO NOT** try these specific
movement distances without checking that the motion won't damage
something (very motor and application specific).
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ra = ASCII_RS232('/dev/ttyS1')
>>> ra.send_commands(['DRIVE1', 'D-10000', 'GO']
... + ['D-10000','GO','D10000','GO']*4
... + [ 'DRIVE0'])
[['DRIVE1', 'DRIVE1\\r', 'DRIVE1', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['DRIVE0', 'DRIVE0\\r', 'DRIVE0', None, []]]
"""
# If eor is not a list, make a list of it replicated enough for
# every command.
if not isinstance(eor, list):
eor = [eor]*len(commands)
# Do every command one by one, collecting the responses and
# stuffing them in a list. Commands that failed are retried, and
# we stop if the last retry is exhausted.
responses = []
for i, command in enumerate(commands):
rsp = self.send_command(command, timeout=timeout,
max_retries=max_retries,
eor=eor[i])
responses.append(rsp)
if self.command_error(rsp):
break
# Put in a slight pause so the drive has a bit of breathing
# time between commands.
time.sleep(0.25)
return responses
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.