repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
kejbaly2/metrique
metrique/utils.py
clear_stale_pids
def clear_stale_pids(pids, pid_dir='/tmp', prefix='', multi=False): 'check for and remove any pids which have no corresponding process' if isinstance(pids, (int, float, long)): pids = [pids] pids = str2list(pids, map_=unicode) procs = map(unicode, os.listdir('/proc')) running = [pid for pid in pids if pid in procs] logger.warn( "Found %s pids running: %s" % (len(running), running)) prefix = prefix.rstrip('.') if prefix else None for pid in pids: if prefix: _prefix = prefix else: _prefix = unicode(pid) # remove non-running procs if pid in running: continue if multi: pid_file = '%s%s.pid' % (_prefix, pid) else: pid_file = '%s.pid' % (_prefix) path = os.path.join(pid_dir, pid_file) if os.path.exists(path): logger.debug("Removing pidfile: %s" % path) try: remove_file(path) except OSError as e: logger.warn(e) return running
python
def clear_stale_pids(pids, pid_dir='/tmp', prefix='', multi=False): 'check for and remove any pids which have no corresponding process' if isinstance(pids, (int, float, long)): pids = [pids] pids = str2list(pids, map_=unicode) procs = map(unicode, os.listdir('/proc')) running = [pid for pid in pids if pid in procs] logger.warn( "Found %s pids running: %s" % (len(running), running)) prefix = prefix.rstrip('.') if prefix else None for pid in pids: if prefix: _prefix = prefix else: _prefix = unicode(pid) # remove non-running procs if pid in running: continue if multi: pid_file = '%s%s.pid' % (_prefix, pid) else: pid_file = '%s.pid' % (_prefix) path = os.path.join(pid_dir, pid_file) if os.path.exists(path): logger.debug("Removing pidfile: %s" % path) try: remove_file(path) except OSError as e: logger.warn(e) return running
[ "def", "clear_stale_pids", "(", "pids", ",", "pid_dir", "=", "'/tmp'", ",", "prefix", "=", "''", ",", "multi", "=", "False", ")", ":", "if", "isinstance", "(", "pids", ",", "(", "int", ",", "float", ",", "long", ")", ")", ":", "pids", "=", "[", "...
check for and remove any pids which have no corresponding process
[ "check", "for", "and", "remove", "any", "pids", "which", "have", "no", "corresponding", "process" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L197-L227
kejbaly2/metrique
metrique/utils.py
daemonize
def daemonize(pid_file=None, cwd=None): """ Detach a process from the controlling terminal and run it in the background as a daemon. Modified version of: code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/ author = "Chad J. Schroeder" copyright = "Copyright (C) 2005 Chad J. Schroeder" """ cwd = cwd or '/' try: pid = os.fork() except OSError as e: raise Exception("%s [%d]" % (e.strerror, e.errno)) if (pid == 0): # The first child. os.setsid() try: pid = os.fork() # Fork a second child. except OSError as e: raise Exception("%s [%d]" % (e.strerror, e.errno)) if (pid == 0): # The second child. os.chdir(cwd) os.umask(0) else: os._exit(0) # Exit parent (the first child) of the second child. else: os._exit(0) # Exit parent of the first child. maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if (maxfd == resource.RLIM_INFINITY): maxfd = 1024 # Iterate through and close all file descriptors. for fd in range(0, maxfd): try: os.close(fd) except OSError: # ERROR, fd wasn't open to begin with (ignored) pass os.open('/dev/null', os.O_RDWR) # standard input (0) # Duplicate standard input to standard output and standard error. os.dup2(0, 1) # standard output (1) os.dup2(0, 2) # standard error (2) pid_file = pid_file or '%s.pid' % os.getpid() write_file(pid_file, os.getpid()) return 0
python
def daemonize(pid_file=None, cwd=None): """ Detach a process from the controlling terminal and run it in the background as a daemon. Modified version of: code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/ author = "Chad J. Schroeder" copyright = "Copyright (C) 2005 Chad J. Schroeder" """ cwd = cwd or '/' try: pid = os.fork() except OSError as e: raise Exception("%s [%d]" % (e.strerror, e.errno)) if (pid == 0): # The first child. os.setsid() try: pid = os.fork() # Fork a second child. except OSError as e: raise Exception("%s [%d]" % (e.strerror, e.errno)) if (pid == 0): # The second child. os.chdir(cwd) os.umask(0) else: os._exit(0) # Exit parent (the first child) of the second child. else: os._exit(0) # Exit parent of the first child. maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if (maxfd == resource.RLIM_INFINITY): maxfd = 1024 # Iterate through and close all file descriptors. for fd in range(0, maxfd): try: os.close(fd) except OSError: # ERROR, fd wasn't open to begin with (ignored) pass os.open('/dev/null', os.O_RDWR) # standard input (0) # Duplicate standard input to standard output and standard error. os.dup2(0, 1) # standard output (1) os.dup2(0, 2) # standard error (2) pid_file = pid_file or '%s.pid' % os.getpid() write_file(pid_file, os.getpid()) return 0
[ "def", "daemonize", "(", "pid_file", "=", "None", ",", "cwd", "=", "None", ")", ":", "cwd", "=", "cwd", "or", "'/'", "try", ":", "pid", "=", "os", ".", "fork", "(", ")", "except", "OSError", "as", "e", ":", "raise", "Exception", "(", "\"%s [%d]\"",...
Detach a process from the controlling terminal and run it in the background as a daemon. Modified version of: code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/ author = "Chad J. Schroeder" copyright = "Copyright (C) 2005 Chad J. Schroeder"
[ "Detach", "a", "process", "from", "the", "controlling", "terminal", "and", "run", "it", "in", "the", "background", "as", "a", "daemon", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L270-L320
kejbaly2/metrique
metrique/utils.py
cube_pkg_mod_cls
def cube_pkg_mod_cls(cube): ''' Used to dynamically importing cube classes based on string slug name. Converts 'pkg_mod' -> pkg, mod, Cls eg: tw_tweet -> tw, tweet, Tweet Assumes `Metrique Cube Naming Convention` is used :param cube: cube name to use when searching for cube pkg.mod.class to load ''' _cube = cube.split('_') pkg = _cube[0] mod = '_'.join(_cube[1:]) _cls = ''.join([s[0].upper() + s[1:] for s in _cube[1:]]) return pkg, mod, _cls
python
def cube_pkg_mod_cls(cube): ''' Used to dynamically importing cube classes based on string slug name. Converts 'pkg_mod' -> pkg, mod, Cls eg: tw_tweet -> tw, tweet, Tweet Assumes `Metrique Cube Naming Convention` is used :param cube: cube name to use when searching for cube pkg.mod.class to load ''' _cube = cube.split('_') pkg = _cube[0] mod = '_'.join(_cube[1:]) _cls = ''.join([s[0].upper() + s[1:] for s in _cube[1:]]) return pkg, mod, _cls
[ "def", "cube_pkg_mod_cls", "(", "cube", ")", ":", "_cube", "=", "cube", ".", "split", "(", "'_'", ")", "pkg", "=", "_cube", "[", "0", "]", "mod", "=", "'_'", ".", "join", "(", "_cube", "[", "1", ":", "]", ")", "_cls", "=", "''", ".", "join", ...
Used to dynamically importing cube classes based on string slug name. Converts 'pkg_mod' -> pkg, mod, Cls eg: tw_tweet -> tw, tweet, Tweet Assumes `Metrique Cube Naming Convention` is used :param cube: cube name to use when searching for cube pkg.mod.class to load
[ "Used", "to", "dynamically", "importing", "cube", "classes", "based", "on", "string", "slug", "name", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L327-L344
kejbaly2/metrique
metrique/utils.py
debug_setup
def debug_setup(logger=None, level=None, log2file=None, log_file=None, log_format=None, log_dir=None, log2stdout=None, truncate=False): ''' Local object instance logger setup. Verbosity levels are determined as such:: if level in [-1, False]: logger.setLevel(logging.WARN) elif level in [0, None]: logger.setLevel(logging.INFO) elif level in [True, 1, 2]: logger.setLevel(logging.DEBUG) If (level == 2) `logging.DEBUG` will be set even for the "root logger". Configuration options available for customized logger behaivor: * debug (bool) * log2stdout (bool) * log2file (bool) * log_file (path) ''' log2stdout = False if log2stdout is None else log2stdout _log_format = "%(levelname)s.%(name)s.%(process)s:%(asctime)s:%(message)s" log_format = log_format or _log_format if isinstance(log_format, basestring): log_format = logging.Formatter(log_format, "%Y%m%dT%H%M%S") log2file = True if log2file is None else log2file logger = logger or 'metrique' if isinstance(logger, basestring): logger = logging.getLogger(logger) else: logger = logger or logging.getLogger(logger) logger.propagate = 0 logger.handlers = [] if log2file: log_dir = log_dir or LOGS_DIR log_file = log_file or 'metrique' log_file = os.path.join(log_dir, log_file) if truncate: # clear the existing data before writing (truncate) open(log_file, 'w+').close() hdlr = logging.FileHandler(log_file) hdlr.setFormatter(log_format) logger.addHandler(hdlr) else: log2stdout = True if log2stdout: hdlr = logging.StreamHandler() hdlr.setFormatter(log_format) logger.addHandler(hdlr) logger = _debug_set_level(logger, level) return logger
python
def debug_setup(logger=None, level=None, log2file=None, log_file=None, log_format=None, log_dir=None, log2stdout=None, truncate=False): ''' Local object instance logger setup. Verbosity levels are determined as such:: if level in [-1, False]: logger.setLevel(logging.WARN) elif level in [0, None]: logger.setLevel(logging.INFO) elif level in [True, 1, 2]: logger.setLevel(logging.DEBUG) If (level == 2) `logging.DEBUG` will be set even for the "root logger". Configuration options available for customized logger behaivor: * debug (bool) * log2stdout (bool) * log2file (bool) * log_file (path) ''' log2stdout = False if log2stdout is None else log2stdout _log_format = "%(levelname)s.%(name)s.%(process)s:%(asctime)s:%(message)s" log_format = log_format or _log_format if isinstance(log_format, basestring): log_format = logging.Formatter(log_format, "%Y%m%dT%H%M%S") log2file = True if log2file is None else log2file logger = logger or 'metrique' if isinstance(logger, basestring): logger = logging.getLogger(logger) else: logger = logger or logging.getLogger(logger) logger.propagate = 0 logger.handlers = [] if log2file: log_dir = log_dir or LOGS_DIR log_file = log_file or 'metrique' log_file = os.path.join(log_dir, log_file) if truncate: # clear the existing data before writing (truncate) open(log_file, 'w+').close() hdlr = logging.FileHandler(log_file) hdlr.setFormatter(log_format) logger.addHandler(hdlr) else: log2stdout = True if log2stdout: hdlr = logging.StreamHandler() hdlr.setFormatter(log_format) logger.addHandler(hdlr) logger = _debug_set_level(logger, level) return logger
[ "def", "debug_setup", "(", "logger", "=", "None", ",", "level", "=", "None", ",", "log2file", "=", "None", ",", "log_file", "=", "None", ",", "log_format", "=", "None", ",", "log_dir", "=", "None", ",", "log2stdout", "=", "None", ",", "truncate", "=", ...
Local object instance logger setup. Verbosity levels are determined as such:: if level in [-1, False]: logger.setLevel(logging.WARN) elif level in [0, None]: logger.setLevel(logging.INFO) elif level in [True, 1, 2]: logger.setLevel(logging.DEBUG) If (level == 2) `logging.DEBUG` will be set even for the "root logger". Configuration options available for customized logger behaivor: * debug (bool) * log2stdout (bool) * log2file (bool) * log_file (path)
[ "Local", "object", "instance", "logger", "setup", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L374-L429
kejbaly2/metrique
metrique/utils.py
dt2ts
def dt2ts(dt, drop_micro=False): ''' convert datetime objects to timestamp seconds (float) ''' is_true(HAS_DATEUTIL, "`pip install python_dateutil` required") if is_empty(dt, except_=False): ts = None elif isinstance(dt, (int, long, float)): # its a ts already ts = float(dt) elif isinstance(dt, basestring): # convert to datetime first try: parsed_dt = float(dt) except (TypeError, ValueError): parsed_dt = dt_parse(dt) ts = dt2ts(parsed_dt) else: assert isinstance(dt, (datetime, date)) # keep micros; see: http://stackoverflow.com/questions/7031031 ts = (( timegm(dt.timetuple()) * 1000.0) + (dt.microsecond / 1000.0)) / 1000.0 if ts is None: pass elif drop_micro: ts = float(int(ts)) else: ts = float(ts) return ts
python
def dt2ts(dt, drop_micro=False): ''' convert datetime objects to timestamp seconds (float) ''' is_true(HAS_DATEUTIL, "`pip install python_dateutil` required") if is_empty(dt, except_=False): ts = None elif isinstance(dt, (int, long, float)): # its a ts already ts = float(dt) elif isinstance(dt, basestring): # convert to datetime first try: parsed_dt = float(dt) except (TypeError, ValueError): parsed_dt = dt_parse(dt) ts = dt2ts(parsed_dt) else: assert isinstance(dt, (datetime, date)) # keep micros; see: http://stackoverflow.com/questions/7031031 ts = (( timegm(dt.timetuple()) * 1000.0) + (dt.microsecond / 1000.0)) / 1000.0 if ts is None: pass elif drop_micro: ts = float(int(ts)) else: ts = float(ts) return ts
[ "def", "dt2ts", "(", "dt", ",", "drop_micro", "=", "False", ")", ":", "is_true", "(", "HAS_DATEUTIL", ",", "\"`pip install python_dateutil` required\"", ")", "if", "is_empty", "(", "dt", ",", "except_", "=", "False", ")", ":", "ts", "=", "None", "elif", "i...
convert datetime objects to timestamp seconds (float)
[ "convert", "datetime", "objects", "to", "timestamp", "seconds", "(", "float", ")" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L432-L457
kejbaly2/metrique
metrique/utils.py
get_cube
def get_cube(cube, init=False, pkgs=None, cube_paths=None, config=None, backends=None, **kwargs): ''' Dynamically locate and load a metrique cube :param cube: name of the cube class to import from given module :param init: flag to request initialized instance or uninitialized class :param config: config dict to pass on initialization (implies init=True) :param pkgs: list of package names to search for the cubes in :param cube_path: additional paths to search for modules in (sys.path) :param kwargs: additional kwargs to pass to cube during initialization ''' pkgs = pkgs or ['cubes'] pkgs = [pkgs] if isinstance(pkgs, basestring) else pkgs # search in the given path too, if provided cube_paths = cube_paths or [] cube_paths_is_basestring = isinstance(cube_paths, basestring) cube_paths = [cube_paths] if cube_paths_is_basestring else cube_paths cube_paths = [os.path.expanduser(path) for path in cube_paths] # append paths which don't already exist in sys.path to sys.path [sys.path.append(path) for path in cube_paths if path not in sys.path] pkgs = pkgs + DEFAULT_PKGS err = False for pkg in pkgs: try: _cube = _load_cube_pkg(pkg, cube) except ImportError as err: _cube = None if _cube: break else: logger.error(err) raise RuntimeError('"%s" not found! %s; %s \n%s)' % ( cube, pkgs, cube_paths, sys.path)) if init: _cube = _cube(config=config, **kwargs) return _cube
python
def get_cube(cube, init=False, pkgs=None, cube_paths=None, config=None, backends=None, **kwargs): ''' Dynamically locate and load a metrique cube :param cube: name of the cube class to import from given module :param init: flag to request initialized instance or uninitialized class :param config: config dict to pass on initialization (implies init=True) :param pkgs: list of package names to search for the cubes in :param cube_path: additional paths to search for modules in (sys.path) :param kwargs: additional kwargs to pass to cube during initialization ''' pkgs = pkgs or ['cubes'] pkgs = [pkgs] if isinstance(pkgs, basestring) else pkgs # search in the given path too, if provided cube_paths = cube_paths or [] cube_paths_is_basestring = isinstance(cube_paths, basestring) cube_paths = [cube_paths] if cube_paths_is_basestring else cube_paths cube_paths = [os.path.expanduser(path) for path in cube_paths] # append paths which don't already exist in sys.path to sys.path [sys.path.append(path) for path in cube_paths if path not in sys.path] pkgs = pkgs + DEFAULT_PKGS err = False for pkg in pkgs: try: _cube = _load_cube_pkg(pkg, cube) except ImportError as err: _cube = None if _cube: break else: logger.error(err) raise RuntimeError('"%s" not found! %s; %s \n%s)' % ( cube, pkgs, cube_paths, sys.path)) if init: _cube = _cube(config=config, **kwargs) return _cube
[ "def", "get_cube", "(", "cube", ",", "init", "=", "False", ",", "pkgs", "=", "None", ",", "cube_paths", "=", "None", ",", "config", "=", "None", ",", "backends", "=", "None", ",", "*", "*", "kwargs", ")", ":", "pkgs", "=", "pkgs", "or", "[", "'cu...
Dynamically locate and load a metrique cube :param cube: name of the cube class to import from given module :param init: flag to request initialized instance or uninitialized class :param config: config dict to pass on initialization (implies init=True) :param pkgs: list of package names to search for the cubes in :param cube_path: additional paths to search for modules in (sys.path) :param kwargs: additional kwargs to pass to cube during initialization
[ "Dynamically", "locate", "and", "load", "a", "metrique", "cube" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L481-L520
kejbaly2/metrique
metrique/utils.py
get_timezone_converter
def get_timezone_converter(from_timezone, to_tz=None, tz_aware=False): ''' return a function that converts a given datetime object from a timezone to utc :param from_timezone: timezone name as string ''' if not from_timezone: return None is_true(HAS_DATEUTIL, "`pip install python_dateutil` required") is_true(HAS_PYTZ, "`pip install pytz` required") from_tz = pytz.timezone(from_timezone) return partial(_get_timezone_converter, from_tz=from_tz, to_tz=to_tz, tz_aware=tz_aware)
python
def get_timezone_converter(from_timezone, to_tz=None, tz_aware=False): ''' return a function that converts a given datetime object from a timezone to utc :param from_timezone: timezone name as string ''' if not from_timezone: return None is_true(HAS_DATEUTIL, "`pip install python_dateutil` required") is_true(HAS_PYTZ, "`pip install pytz` required") from_tz = pytz.timezone(from_timezone) return partial(_get_timezone_converter, from_tz=from_tz, to_tz=to_tz, tz_aware=tz_aware)
[ "def", "get_timezone_converter", "(", "from_timezone", ",", "to_tz", "=", "None", ",", "tz_aware", "=", "False", ")", ":", "if", "not", "from_timezone", ":", "return", "None", "is_true", "(", "HAS_DATEUTIL", ",", "\"`pip install python_dateutil` required\"", ")", ...
return a function that converts a given datetime object from a timezone to utc :param from_timezone: timezone name as string
[ "return", "a", "function", "that", "converts", "a", "given", "datetime", "object", "from", "a", "timezone", "to", "utc" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L570-L583
kejbaly2/metrique
metrique/utils.py
git_clone
def git_clone(uri, pull=True, reflect=False, cache_dir=None, chdir=True): ''' Given a git repo, clone (cache) it locally. :param uri: git repo uri :param pull: whether to pull after cloning (or loading cache) ''' cache_dir = cache_dir or CACHE_DIR # make the uri safe for filesystems repo_path = os.path.expanduser(os.path.join(cache_dir, safestr(uri))) if not os.path.exists(repo_path): from_cache = False logger.info( 'Locally caching git repo [%s] to [%s]' % (uri, repo_path)) cmd = 'git clone %s %s' % (uri, repo_path) sys_call(cmd) else: from_cache = True logger.info( 'GIT repo loaded from local cache [%s])' % (repo_path)) if pull and not from_cache: os.chdir(repo_path) cmd = 'git pull' sys_call(cmd, cwd=repo_path) if chdir: os.chdir(repo_path) if reflect: if not HAS_DULWICH: raise RuntimeError("`pip install dulwich` required!") return Repo(repo_path) else: return repo_path
python
def git_clone(uri, pull=True, reflect=False, cache_dir=None, chdir=True): ''' Given a git repo, clone (cache) it locally. :param uri: git repo uri :param pull: whether to pull after cloning (or loading cache) ''' cache_dir = cache_dir or CACHE_DIR # make the uri safe for filesystems repo_path = os.path.expanduser(os.path.join(cache_dir, safestr(uri))) if not os.path.exists(repo_path): from_cache = False logger.info( 'Locally caching git repo [%s] to [%s]' % (uri, repo_path)) cmd = 'git clone %s %s' % (uri, repo_path) sys_call(cmd) else: from_cache = True logger.info( 'GIT repo loaded from local cache [%s])' % (repo_path)) if pull and not from_cache: os.chdir(repo_path) cmd = 'git pull' sys_call(cmd, cwd=repo_path) if chdir: os.chdir(repo_path) if reflect: if not HAS_DULWICH: raise RuntimeError("`pip install dulwich` required!") return Repo(repo_path) else: return repo_path
[ "def", "git_clone", "(", "uri", ",", "pull", "=", "True", ",", "reflect", "=", "False", ",", "cache_dir", "=", "None", ",", "chdir", "=", "True", ")", ":", "cache_dir", "=", "cache_dir", "or", "CACHE_DIR", "# make the uri safe for filesystems", "repo_path", ...
Given a git repo, clone (cache) it locally. :param uri: git repo uri :param pull: whether to pull after cloning (or loading cache)
[ "Given", "a", "git", "repo", "clone", "(", "cache", ")", "it", "locally", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L586-L617
kejbaly2/metrique
metrique/utils.py
is_empty
def is_empty(value, msg=None, except_=None, inc_zeros=True): ''' is defined, but null or empty like value ''' if hasattr(value, 'empty'): # dataframes must check for .empty # since they don't define truth value attr # take the negative, since below we're # checking for cases where value 'is_null' value = not bool(value.empty) elif inc_zeros and value in ZEROS: # also consider 0, 0.0, 0L as 'empty' # will check for the negative below value = True else: pass _is_null = is_null(value, except_=False) result = bool(_is_null or not value) if except_: return is_true(result, msg=msg, except_=except_) else: return bool(result)
python
def is_empty(value, msg=None, except_=None, inc_zeros=True): ''' is defined, but null or empty like value ''' if hasattr(value, 'empty'): # dataframes must check for .empty # since they don't define truth value attr # take the negative, since below we're # checking for cases where value 'is_null' value = not bool(value.empty) elif inc_zeros and value in ZEROS: # also consider 0, 0.0, 0L as 'empty' # will check for the negative below value = True else: pass _is_null = is_null(value, except_=False) result = bool(_is_null or not value) if except_: return is_true(result, msg=msg, except_=except_) else: return bool(result)
[ "def", "is_empty", "(", "value", ",", "msg", "=", "None", ",", "except_", "=", "None", ",", "inc_zeros", "=", "True", ")", ":", "if", "hasattr", "(", "value", ",", "'empty'", ")", ":", "# dataframes must check for .empty", "# since they don't define truth value ...
is defined, but null or empty like value
[ "is", "defined", "but", "null", "or", "empty", "like", "value" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L637-L658
kejbaly2/metrique
metrique/utils.py
is_null
def is_null(value, msg=None, except_=None): ''' ie, "is not defined" ''' # dataframes, even if empty, are not considered null value = False if hasattr(value, 'empty') else value result = bool( value is None or value != value or repr(value) == 'NaT') if except_: return is_true(result, msg=msg, except_=except_) else: return bool(result)
python
def is_null(value, msg=None, except_=None): ''' ie, "is not defined" ''' # dataframes, even if empty, are not considered null value = False if hasattr(value, 'empty') else value result = bool( value is None or value != value or repr(value) == 'NaT') if except_: return is_true(result, msg=msg, except_=except_) else: return bool(result)
[ "def", "is_null", "(", "value", ",", "msg", "=", "None", ",", "except_", "=", "None", ")", ":", "# dataframes, even if empty, are not considered null", "value", "=", "False", "if", "hasattr", "(", "value", ",", "'empty'", ")", "else", "value", "result", "=", ...
ie, "is not defined"
[ "ie", "is", "not", "defined" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L669-L682
kejbaly2/metrique
metrique/utils.py
json_encode_default
def json_encode_default(obj): ''' Convert datetime.datetime to timestamp :param obj: value to (possibly) convert ''' if isinstance(obj, (datetime, date)): result = dt2ts(obj) else: result = json_encoder.default(obj) return to_encoding(result)
python
def json_encode_default(obj): ''' Convert datetime.datetime to timestamp :param obj: value to (possibly) convert ''' if isinstance(obj, (datetime, date)): result = dt2ts(obj) else: result = json_encoder.default(obj) return to_encoding(result)
[ "def", "json_encode_default", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "(", "datetime", ",", "date", ")", ")", ":", "result", "=", "dt2ts", "(", "obj", ")", "else", ":", "result", "=", "json_encoder", ".", "default", "(", "obj", ")"...
Convert datetime.datetime to timestamp :param obj: value to (possibly) convert
[ "Convert", "datetime", ".", "datetime", "to", "timestamp" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L706-L716
kejbaly2/metrique
metrique/utils.py
jsonhash
def jsonhash(obj, root=True, exclude=None, hash_func=_jsonhash_sha1): ''' calculate the objects hash based on all field values ''' if isinstance(obj, Mapping): # assumption: using in against set() is faster than in against list() if root and exclude: obj = {k: v for k, v in obj.iteritems() if k not in exclude} # frozenset's don't guarantee order; use sorted tuples # which means different python interpreters can return # back frozensets with different hash values even when # the content of the object is exactly the same result = sorted( (k, jsonhash(v, False)) for k, v in obj.iteritems()) elif isinstance(obj, list): # FIXME: should lists be sorted for consistent hashes? # when the object is the same, just different list order? result = tuple(jsonhash(e, False) for e in obj) else: result = obj if root: result = unicode(hash_func(result)) return result
python
def jsonhash(obj, root=True, exclude=None, hash_func=_jsonhash_sha1): ''' calculate the objects hash based on all field values ''' if isinstance(obj, Mapping): # assumption: using in against set() is faster than in against list() if root and exclude: obj = {k: v for k, v in obj.iteritems() if k not in exclude} # frozenset's don't guarantee order; use sorted tuples # which means different python interpreters can return # back frozensets with different hash values even when # the content of the object is exactly the same result = sorted( (k, jsonhash(v, False)) for k, v in obj.iteritems()) elif isinstance(obj, list): # FIXME: should lists be sorted for consistent hashes? # when the object is the same, just different list order? result = tuple(jsonhash(e, False) for e in obj) else: result = obj if root: result = unicode(hash_func(result)) return result
[ "def", "jsonhash", "(", "obj", ",", "root", "=", "True", ",", "exclude", "=", "None", ",", "hash_func", "=", "_jsonhash_sha1", ")", ":", "if", "isinstance", "(", "obj", ",", "Mapping", ")", ":", "# assumption: using in against set() is faster than in against list(...
calculate the objects hash based on all field values
[ "calculate", "the", "objects", "hash", "based", "on", "all", "field", "values" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L723-L745
kejbaly2/metrique
metrique/utils.py
load
def load(path, filetype=None, as_df=False, retries=None, _oid=None, quiet=False, **kwargs): '''Load multiple files from various file types automatically. Supports glob paths, eg:: path = 'data/*.csv' Filetypes are autodetected by common extension strings. Currently supports loadings from: * csv (pd.read_csv) * json (pd.read_json) :param path: path to config json file :param filetype: override filetype autodetection :param kwargs: additional filetype loader method kwargs ''' is_true(HAS_PANDAS, "`pip install pandas` required") set_oid = set_oid_func(_oid) # kwargs are for passing ftype load options (csv.delimiter, etc) # expect the use of globs; eg, file* might result in fileN (file1, # file2, file3), etc if not isinstance(path, basestring): # assume we're getting a raw dataframe objects = path if not isinstance(objects, pd.DataFrame): raise ValueError("loading raw values must be DataFrames") elif re.match('https?://', path): logger.debug('Saving %s to tmp file' % path) _path = urlretrieve(path, retries) logger.debug('%s saved to tmp file: %s' % (path, _path)) try: objects = load_file(_path, filetype, **kwargs) finally: remove_file(_path) else: path = re.sub('^file://', '', path) path = os.path.expanduser(path) # assume relative to cwd if not already absolute path path = path if os.path.isabs(path) else pjoin(os.getcwd(), path) files = sorted(glob.glob(os.path.expanduser(path))) if not files: raise IOError("failed to load: %s" % path) # buid up a single dataframe by concatting # all globbed files together objects = [] [objects.extend(load_file(ds, filetype, **kwargs)) for ds in files] if is_empty(objects, except_=False) and not quiet: raise RuntimeError("no objects extracted!") else: logger.debug("Data loaded successfully from %s" % path) if set_oid: # set _oids, if we have a _oid generator func defined objects = [set_oid(o) for o in objects] if as_df: return pd.DataFrame(objects) else: return objects
python
def load(path, filetype=None, as_df=False, retries=None, _oid=None, quiet=False, **kwargs): '''Load multiple files from various file types automatically. Supports glob paths, eg:: path = 'data/*.csv' Filetypes are autodetected by common extension strings. Currently supports loadings from: * csv (pd.read_csv) * json (pd.read_json) :param path: path to config json file :param filetype: override filetype autodetection :param kwargs: additional filetype loader method kwargs ''' is_true(HAS_PANDAS, "`pip install pandas` required") set_oid = set_oid_func(_oid) # kwargs are for passing ftype load options (csv.delimiter, etc) # expect the use of globs; eg, file* might result in fileN (file1, # file2, file3), etc if not isinstance(path, basestring): # assume we're getting a raw dataframe objects = path if not isinstance(objects, pd.DataFrame): raise ValueError("loading raw values must be DataFrames") elif re.match('https?://', path): logger.debug('Saving %s to tmp file' % path) _path = urlretrieve(path, retries) logger.debug('%s saved to tmp file: %s' % (path, _path)) try: objects = load_file(_path, filetype, **kwargs) finally: remove_file(_path) else: path = re.sub('^file://', '', path) path = os.path.expanduser(path) # assume relative to cwd if not already absolute path path = path if os.path.isabs(path) else pjoin(os.getcwd(), path) files = sorted(glob.glob(os.path.expanduser(path))) if not files: raise IOError("failed to load: %s" % path) # buid up a single dataframe by concatting # all globbed files together objects = [] [objects.extend(load_file(ds, filetype, **kwargs)) for ds in files] if is_empty(objects, except_=False) and not quiet: raise RuntimeError("no objects extracted!") else: logger.debug("Data loaded successfully from %s" % path) if set_oid: # set _oids, if we have a _oid generator func defined objects = [set_oid(o) for o in objects] if as_df: return pd.DataFrame(objects) else: return objects
[ "def", "load", "(", "path", ",", "filetype", "=", "None", ",", "as_df", "=", "False", ",", "retries", "=", "None", ",", "_oid", "=", "None", ",", "quiet", "=", "False", ",", "*", "*", "kwargs", ")", ":", "is_true", "(", "HAS_PANDAS", ",", "\"`pip i...
Load multiple files from various file types automatically. Supports glob paths, eg:: path = 'data/*.csv' Filetypes are autodetected by common extension strings. Currently supports loadings from: * csv (pd.read_csv) * json (pd.read_json) :param path: path to config json file :param filetype: override filetype autodetection :param kwargs: additional filetype loader method kwargs
[ "Load", "multiple", "files", "from", "various", "file", "types", "automatically", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L762-L825
kejbaly2/metrique
metrique/utils.py
_load_cube_pkg
def _load_cube_pkg(pkg, cube): ''' NOTE: all items in fromlist must be strings ''' try: # First, assume the cube module is available # with the name exactly as written fromlist = map(str, [cube]) mcubes = __import__(pkg, fromlist=fromlist) return getattr(mcubes, cube) except AttributeError: # if that fails, try to guess the cube module # based on cube 'standard naming convention' # ie, group_cube -> from group.cube import CubeClass _pkg, _mod, _cls = cube_pkg_mod_cls(cube) fromlist = map(str, [_cls]) mcubes = __import__('%s.%s.%s' % (pkg, _pkg, _mod), fromlist=fromlist) return getattr(mcubes, _cls)
python
def _load_cube_pkg(pkg, cube): ''' NOTE: all items in fromlist must be strings ''' try: # First, assume the cube module is available # with the name exactly as written fromlist = map(str, [cube]) mcubes = __import__(pkg, fromlist=fromlist) return getattr(mcubes, cube) except AttributeError: # if that fails, try to guess the cube module # based on cube 'standard naming convention' # ie, group_cube -> from group.cube import CubeClass _pkg, _mod, _cls = cube_pkg_mod_cls(cube) fromlist = map(str, [_cls]) mcubes = __import__('%s.%s.%s' % (pkg, _pkg, _mod), fromlist=fromlist) return getattr(mcubes, _cls)
[ "def", "_load_cube_pkg", "(", "pkg", ",", "cube", ")", ":", "try", ":", "# First, assume the cube module is available", "# with the name exactly as written", "fromlist", "=", "map", "(", "str", ",", "[", "cube", "]", ")", "mcubes", "=", "__import__", "(", "pkg", ...
NOTE: all items in fromlist must be strings
[ "NOTE", ":", "all", "items", "in", "fromlist", "must", "be", "strings" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L844-L862
kejbaly2/metrique
metrique/utils.py
read_file
def read_file(rel_path, paths=None, raw=False, as_list=False, as_iter=False, *args, **kwargs): ''' find a file that lives somewhere within a set of paths and return its contents. Default paths include 'static_dir' ''' if not rel_path: raise ValueError("rel_path can not be null!") paths = str2list(paths) # try looking the file up in a directory called static relative # to SRC_DIR, eg assuming metrique git repo is in ~/metrique # we'd look in ~/metrique/static paths.extend([STATIC_DIR, os.path.join(SRC_DIR, 'static')]) paths = [os.path.expanduser(p) for p in set(paths)] for path in paths: path = os.path.join(path, rel_path) logger.debug("trying to read: %s " % path) if os.path.exists(path): break else: raise IOError("path %s does not exist!" % rel_path) args = args if args else ['rU'] fd = open(path, *args, **kwargs) if raw: return fd if as_iter: return read_in_chunks(fd) else: fd_lines = fd.readlines() if as_list: return fd_lines else: return ''.join(fd_lines)
python
def read_file(rel_path, paths=None, raw=False, as_list=False, as_iter=False, *args, **kwargs): ''' find a file that lives somewhere within a set of paths and return its contents. Default paths include 'static_dir' ''' if not rel_path: raise ValueError("rel_path can not be null!") paths = str2list(paths) # try looking the file up in a directory called static relative # to SRC_DIR, eg assuming metrique git repo is in ~/metrique # we'd look in ~/metrique/static paths.extend([STATIC_DIR, os.path.join(SRC_DIR, 'static')]) paths = [os.path.expanduser(p) for p in set(paths)] for path in paths: path = os.path.join(path, rel_path) logger.debug("trying to read: %s " % path) if os.path.exists(path): break else: raise IOError("path %s does not exist!" % rel_path) args = args if args else ['rU'] fd = open(path, *args, **kwargs) if raw: return fd if as_iter: return read_in_chunks(fd) else: fd_lines = fd.readlines() if as_list: return fd_lines else: return ''.join(fd_lines)
[ "def", "read_file", "(", "rel_path", ",", "paths", "=", "None", ",", "raw", "=", "False", ",", "as_list", "=", "False", ",", "as_iter", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "rel_path", ":", "raise", "Value...
find a file that lives somewhere within a set of paths and return its contents. Default paths include 'static_dir'
[ "find", "a", "file", "that", "lives", "somewhere", "within", "a", "set", "of", "paths", "and", "return", "its", "contents", ".", "Default", "paths", "include", "static_dir" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L1003-L1037
kejbaly2/metrique
metrique/utils.py
rupdate
def rupdate(source, target): ''' recursively update nested dictionaries see: http://stackoverflow.com/a/3233356/1289080 ''' for k, v in target.iteritems(): if isinstance(v, Mapping): r = rupdate(source.get(k, {}), v) source[k] = r else: source[k] = target[k] return source
python
def rupdate(source, target): ''' recursively update nested dictionaries see: http://stackoverflow.com/a/3233356/1289080 ''' for k, v in target.iteritems(): if isinstance(v, Mapping): r = rupdate(source.get(k, {}), v) source[k] = r else: source[k] = target[k] return source
[ "def", "rupdate", "(", "source", ",", "target", ")", ":", "for", "k", ",", "v", "in", "target", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "v", ",", "Mapping", ")", ":", "r", "=", "rupdate", "(", "source", ".", "get", "(", "k", "...
recursively update nested dictionaries see: http://stackoverflow.com/a/3233356/1289080
[ "recursively", "update", "nested", "dictionaries", "see", ":", "http", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "3233356", "/", "1289080" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L1094-L1104
kejbaly2/metrique
metrique/utils.py
safestr
def safestr(str_): ''' get back an alphanumeric only version of source ''' str_ = str_ or "" return "".join(x for x in str_ if x.isalnum())
python
def safestr(str_): ''' get back an alphanumeric only version of source ''' str_ = str_ or "" return "".join(x for x in str_ if x.isalnum())
[ "def", "safestr", "(", "str_", ")", ":", "str_", "=", "str_", "or", "\"\"", "return", "\"\"", ".", "join", "(", "x", "for", "x", "in", "str_", "if", "x", ".", "isalnum", "(", ")", ")" ]
get back an alphanumeric only version of source
[ "get", "back", "an", "alphanumeric", "only", "version", "of", "source" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L1107-L1110
kejbaly2/metrique
metrique/utils.py
ts2dt
def ts2dt(ts, milli=False, tz_aware=False): ''' convert timestamp int's (seconds) to datetime objects ''' # anything already a datetime will still be returned # tz_aware, if set to true is_true(HAS_DATEUTIL, "`pip install python_dateutil` required") if isinstance(ts, (datetime, date)): pass elif is_empty(ts, except_=False): return None # its not a timestamp elif isinstance(ts, (int, float, long)) and ts < 0: return None elif isinstance(ts, basestring): try: ts = float(ts) except (TypeError, ValueError): # maybe we have a date like string already? try: ts = dt_parse(ts) except Exception: raise TypeError( "unable to derive datetime from timestamp string: %s" % ts) elif milli: ts = float(ts) / 1000. # convert milli to seconds else: ts = float(ts) # already in seconds return _get_datetime(ts, tz_aware)
python
def ts2dt(ts, milli=False, tz_aware=False): ''' convert timestamp int's (seconds) to datetime objects ''' # anything already a datetime will still be returned # tz_aware, if set to true is_true(HAS_DATEUTIL, "`pip install python_dateutil` required") if isinstance(ts, (datetime, date)): pass elif is_empty(ts, except_=False): return None # its not a timestamp elif isinstance(ts, (int, float, long)) and ts < 0: return None elif isinstance(ts, basestring): try: ts = float(ts) except (TypeError, ValueError): # maybe we have a date like string already? try: ts = dt_parse(ts) except Exception: raise TypeError( "unable to derive datetime from timestamp string: %s" % ts) elif milli: ts = float(ts) / 1000. # convert milli to seconds else: ts = float(ts) # already in seconds return _get_datetime(ts, tz_aware)
[ "def", "ts2dt", "(", "ts", ",", "milli", "=", "False", ",", "tz_aware", "=", "False", ")", ":", "# anything already a datetime will still be returned", "# tz_aware, if set to true", "is_true", "(", "HAS_DATEUTIL", ",", "\"`pip install python_dateutil` required\"", ")", "i...
convert timestamp int's (seconds) to datetime objects
[ "convert", "timestamp", "int", "s", "(", "seconds", ")", "to", "datetime", "objects" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L1269-L1294
kejbaly2/metrique
metrique/utils.py
urlretrieve
def urlretrieve(uri, saveas=None, retries=3, cache_dir=None): '''urllib.urlretrieve wrapper''' retries = int(retries) if retries else 3 # FIXME: make random filename (saveas) in cache_dir... # cache_dir = cache_dir or CACHE_DIR while retries: try: _path, headers = urllib.urlretrieve(uri, saveas) except Exception as e: retries -= 1 logger.warn( 'Failed getting uri "%s": %s (retry:%s in 1s)' % ( uri, e, retries)) time.sleep(.2) continue else: break else: raise RuntimeError("Failed to retrieve uri: %s" % uri) return _path
python
def urlretrieve(uri, saveas=None, retries=3, cache_dir=None): '''urllib.urlretrieve wrapper''' retries = int(retries) if retries else 3 # FIXME: make random filename (saveas) in cache_dir... # cache_dir = cache_dir or CACHE_DIR while retries: try: _path, headers = urllib.urlretrieve(uri, saveas) except Exception as e: retries -= 1 logger.warn( 'Failed getting uri "%s": %s (retry:%s in 1s)' % ( uri, e, retries)) time.sleep(.2) continue else: break else: raise RuntimeError("Failed to retrieve uri: %s" % uri) return _path
[ "def", "urlretrieve", "(", "uri", ",", "saveas", "=", "None", ",", "retries", "=", "3", ",", "cache_dir", "=", "None", ")", ":", "retries", "=", "int", "(", "retries", ")", "if", "retries", "else", "3", "# FIXME: make random filename (saveas) in cache_dir...",...
urllib.urlretrieve wrapper
[ "urllib", ".", "urlretrieve", "wrapper" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L1340-L1359
bitlabstudio/django-document-library
document_library/south_migrations/0019_set_persistent_categories_to_published.py
Migration.forwards
def forwards(self, orm): "Write your forwards methods here." for category in orm['document_library.DocumentCategory'].objects.all(): category.is_published = True category.save()
python
def forwards(self, orm): "Write your forwards methods here." for category in orm['document_library.DocumentCategory'].objects.all(): category.is_published = True category.save()
[ "def", "forwards", "(", "self", ",", "orm", ")", ":", "for", "category", "in", "orm", "[", "'document_library.DocumentCategory'", "]", ".", "objects", ".", "all", "(", ")", ":", "category", ".", "is_published", "=", "True", "category", ".", "save", "(", ...
Write your forwards methods here.
[ "Write", "your", "forwards", "methods", "here", "." ]
train
https://github.com/bitlabstudio/django-document-library/blob/508737277455f182e81780cfca8d8eceb989a45b/document_library/south_migrations/0019_set_persistent_categories_to_published.py#L24-L28
michaelpb/omnic
omnic/web/shortcuts.py
reverse_media_url
def reverse_media_url(target_type, url_string, *args, **kwargs): ''' Given a target type and an resource URL, generates a valid URL to this via ''' args_str = '<%s>' % '><'.join(args) kwargs_str = '<%s>' % '><'.join('%s:%s' % pair for pair in kwargs.items()) url_str = ''.join([url_string, args_str, kwargs_str]) normalized_url = str(ResourceURL(url_str)) query_tuples = [] if singletons.settings.SECURITY and 'Sha1' in singletons.settings.SECURITY: secret = singletons.settings.HMAC_SECRET digest = get_hmac_sha1_digest(secret, normalized_url, target_type) query_tuples.append(('digest', digest)) # Add in URL as last querystring argument query_tuples.append(('url', normalized_url)) querystring = urlencode(query_tuples) scheme = singletons.settings.EXTERNAL_SCHEME host = singletons.settings.EXTERNAL_HOST port = singletons.settings.EXTERNAL_PORT if not host: host = singletons.settings.HOST if not port: port = singletons.settings.PORT port_suffix = ':%s' % port if port != 80 else '' typestring_normalized = str(TypeString(target_type)) return '%s://%s%s/media/%s/?%s' % ( scheme, host, port_suffix, typestring_normalized, querystring, )
python
def reverse_media_url(target_type, url_string, *args, **kwargs): ''' Given a target type and an resource URL, generates a valid URL to this via ''' args_str = '<%s>' % '><'.join(args) kwargs_str = '<%s>' % '><'.join('%s:%s' % pair for pair in kwargs.items()) url_str = ''.join([url_string, args_str, kwargs_str]) normalized_url = str(ResourceURL(url_str)) query_tuples = [] if singletons.settings.SECURITY and 'Sha1' in singletons.settings.SECURITY: secret = singletons.settings.HMAC_SECRET digest = get_hmac_sha1_digest(secret, normalized_url, target_type) query_tuples.append(('digest', digest)) # Add in URL as last querystring argument query_tuples.append(('url', normalized_url)) querystring = urlencode(query_tuples) scheme = singletons.settings.EXTERNAL_SCHEME host = singletons.settings.EXTERNAL_HOST port = singletons.settings.EXTERNAL_PORT if not host: host = singletons.settings.HOST if not port: port = singletons.settings.PORT port_suffix = ':%s' % port if port != 80 else '' typestring_normalized = str(TypeString(target_type)) return '%s://%s%s/media/%s/?%s' % ( scheme, host, port_suffix, typestring_normalized, querystring, )
[ "def", "reverse_media_url", "(", "target_type", ",", "url_string", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "args_str", "=", "'<%s>'", "%", "'><'", ".", "join", "(", "args", ")", "kwargs_str", "=", "'<%s>'", "%", "'><'", ".", "join", "(", ...
Given a target type and an resource URL, generates a valid URL to this via
[ "Given", "a", "target", "type", "and", "an", "resource", "URL", "generates", "a", "valid", "URL", "to", "this", "via" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/web/shortcuts.py#L9-L46
JNRowe/pyisbn
pyisbn/__init__.py
_isbn_cleanse
def _isbn_cleanse(isbn, checksum=True): """Check ISBN is a string, and passes basic sanity checks. Args: isbn (str): SBN, ISBN-10 or ISBN-13 checksum (bool): ``True`` if ``isbn`` includes checksum character Returns: ``str``: ISBN with hyphenation removed, including when called with a SBN Raises: TypeError: ``isbn`` is not a ``str`` type IsbnError: Incorrect length for ``isbn`` IsbnError: Incorrect SBN or ISBN formatting """ if not isinstance(isbn, string_types): raise TypeError('ISBN must be a string, received %r' % isbn) if PY2 and isinstance(isbn, str): # pragma: Python 2 isbn = unicode(isbn) uni_input = False else: # pragma: Python 3 uni_input = True for dash in DASHES: isbn = isbn.replace(dash, unicode()) if checksum: if not isbn[:-1].isdigit(): raise IsbnError('non-digit parts') if len(isbn) == 9: isbn = '0' + isbn if len(isbn) == 10: if not (isbn[-1].isdigit() or isbn[-1] in 'Xx'): raise IsbnError('non-digit or X checksum') elif len(isbn) == 13: if not isbn[-1].isdigit(): raise IsbnError('non-digit checksum') if not isbn.startswith(('978', '979')): raise IsbnError('invalid Bookland region') else: raise IsbnError('ISBN must be either 10 or 13 characters long') else: if len(isbn) == 8: isbn = '0' + isbn elif len(isbn) == 12 and not isbn[:3].startswith(('978', '979')): raise IsbnError('invalid Bookland region') if not isbn.isdigit(): raise IsbnError('non-digit parts') if not len(isbn) in (9, 12): raise IsbnError('ISBN must be either 9 or 12 characters long ' 'without checksum') if PY2 and not uni_input: # pragma: Python 2 # Sadly, type ping-pong is required to maintain backwards compatibility # with previous pyisbn releases for Python 2 users. return str(isbn) else: # pragma: Python 3 return isbn
python
def _isbn_cleanse(isbn, checksum=True): """Check ISBN is a string, and passes basic sanity checks. Args: isbn (str): SBN, ISBN-10 or ISBN-13 checksum (bool): ``True`` if ``isbn`` includes checksum character Returns: ``str``: ISBN with hyphenation removed, including when called with a SBN Raises: TypeError: ``isbn`` is not a ``str`` type IsbnError: Incorrect length for ``isbn`` IsbnError: Incorrect SBN or ISBN formatting """ if not isinstance(isbn, string_types): raise TypeError('ISBN must be a string, received %r' % isbn) if PY2 and isinstance(isbn, str): # pragma: Python 2 isbn = unicode(isbn) uni_input = False else: # pragma: Python 3 uni_input = True for dash in DASHES: isbn = isbn.replace(dash, unicode()) if checksum: if not isbn[:-1].isdigit(): raise IsbnError('non-digit parts') if len(isbn) == 9: isbn = '0' + isbn if len(isbn) == 10: if not (isbn[-1].isdigit() or isbn[-1] in 'Xx'): raise IsbnError('non-digit or X checksum') elif len(isbn) == 13: if not isbn[-1].isdigit(): raise IsbnError('non-digit checksum') if not isbn.startswith(('978', '979')): raise IsbnError('invalid Bookland region') else: raise IsbnError('ISBN must be either 10 or 13 characters long') else: if len(isbn) == 8: isbn = '0' + isbn elif len(isbn) == 12 and not isbn[:3].startswith(('978', '979')): raise IsbnError('invalid Bookland region') if not isbn.isdigit(): raise IsbnError('non-digit parts') if not len(isbn) in (9, 12): raise IsbnError('ISBN must be either 9 or 12 characters long ' 'without checksum') if PY2 and not uni_input: # pragma: Python 2 # Sadly, type ping-pong is required to maintain backwards compatibility # with previous pyisbn releases for Python 2 users. return str(isbn) else: # pragma: Python 3 return isbn
[ "def", "_isbn_cleanse", "(", "isbn", ",", "checksum", "=", "True", ")", ":", "if", "not", "isinstance", "(", "isbn", ",", "string_types", ")", ":", "raise", "TypeError", "(", "'ISBN must be a string, received %r'", "%", "isbn", ")", "if", "PY2", "and", "isin...
Check ISBN is a string, and passes basic sanity checks. Args: isbn (str): SBN, ISBN-10 or ISBN-13 checksum (bool): ``True`` if ``isbn`` includes checksum character Returns: ``str``: ISBN with hyphenation removed, including when called with a SBN Raises: TypeError: ``isbn`` is not a ``str`` type IsbnError: Incorrect length for ``isbn`` IsbnError: Incorrect SBN or ISBN formatting
[ "Check", "ISBN", "is", "a", "string", "and", "passes", "basic", "sanity", "checks", "." ]
train
https://github.com/JNRowe/pyisbn/blob/653cb1798d4f231d552991a1011d6aba1c4de396/pyisbn/__init__.py#L385-L444
JNRowe/pyisbn
pyisbn/__init__.py
calculate_checksum
def calculate_checksum(isbn): """Calculate ISBN checksum. Args: isbn (str): SBN, ISBN-10 or ISBN-13 Returns: ``str``: Checksum for given ISBN or SBN """ isbn = [int(i) for i in _isbn_cleanse(isbn, checksum=False)] if len(isbn) == 9: products = [x * y for x, y in zip(isbn, range(1, 10))] check = sum(products) % 11 if check == 10: check = 'X' else: # As soon as Python 2.4 support is dumped # [(isbn[i] if i % 2 == 0 else isbn[i] * 3) for i in range(12)] products = [] for i in range(12): if i % 2 == 0: products.append(isbn[i]) else: products.append(isbn[i] * 3) check = 10 - sum(products) % 10 if check == 10: check = 0 return str(check)
python
def calculate_checksum(isbn): """Calculate ISBN checksum. Args: isbn (str): SBN, ISBN-10 or ISBN-13 Returns: ``str``: Checksum for given ISBN or SBN """ isbn = [int(i) for i in _isbn_cleanse(isbn, checksum=False)] if len(isbn) == 9: products = [x * y for x, y in zip(isbn, range(1, 10))] check = sum(products) % 11 if check == 10: check = 'X' else: # As soon as Python 2.4 support is dumped # [(isbn[i] if i % 2 == 0 else isbn[i] * 3) for i in range(12)] products = [] for i in range(12): if i % 2 == 0: products.append(isbn[i]) else: products.append(isbn[i] * 3) check = 10 - sum(products) % 10 if check == 10: check = 0 return str(check)
[ "def", "calculate_checksum", "(", "isbn", ")", ":", "isbn", "=", "[", "int", "(", "i", ")", "for", "i", "in", "_isbn_cleanse", "(", "isbn", ",", "checksum", "=", "False", ")", "]", "if", "len", "(", "isbn", ")", "==", "9", ":", "products", "=", "...
Calculate ISBN checksum. Args: isbn (str): SBN, ISBN-10 or ISBN-13 Returns: ``str``: Checksum for given ISBN or SBN
[ "Calculate", "ISBN", "checksum", "." ]
train
https://github.com/JNRowe/pyisbn/blob/653cb1798d4f231d552991a1011d6aba1c4de396/pyisbn/__init__.py#L447-L475
JNRowe/pyisbn
pyisbn/__init__.py
convert
def convert(isbn, code='978'): """Convert ISBNs between ISBN-10 and ISBN-13. Note: No attempt to hyphenate converted ISBNs is made, because the specification requires that *any* hyphenation must be correct but allows ISBNs without hyphenation. Args: isbn (str): SBN, ISBN-10 or ISBN-13 code (str): EAN Bookland code Returns: ``str``: Converted ISBN-10 or ISBN-13 Raise: IsbnError: When ISBN-13 isn't convertible to an ISBN-10 """ isbn = _isbn_cleanse(isbn) if len(isbn) == 10: isbn = code + isbn[:-1] return isbn + calculate_checksum(isbn) else: if isbn.startswith('978'): return isbn[3:-1] + calculate_checksum(isbn[3:-1]) else: raise IsbnError('Only ISBN-13s with 978 Bookland code can be ' 'converted to ISBN-10.')
python
def convert(isbn, code='978'): """Convert ISBNs between ISBN-10 and ISBN-13. Note: No attempt to hyphenate converted ISBNs is made, because the specification requires that *any* hyphenation must be correct but allows ISBNs without hyphenation. Args: isbn (str): SBN, ISBN-10 or ISBN-13 code (str): EAN Bookland code Returns: ``str``: Converted ISBN-10 or ISBN-13 Raise: IsbnError: When ISBN-13 isn't convertible to an ISBN-10 """ isbn = _isbn_cleanse(isbn) if len(isbn) == 10: isbn = code + isbn[:-1] return isbn + calculate_checksum(isbn) else: if isbn.startswith('978'): return isbn[3:-1] + calculate_checksum(isbn[3:-1]) else: raise IsbnError('Only ISBN-13s with 978 Bookland code can be ' 'converted to ISBN-10.')
[ "def", "convert", "(", "isbn", ",", "code", "=", "'978'", ")", ":", "isbn", "=", "_isbn_cleanse", "(", "isbn", ")", "if", "len", "(", "isbn", ")", "==", "10", ":", "isbn", "=", "code", "+", "isbn", "[", ":", "-", "1", "]", "return", "isbn", "+"...
Convert ISBNs between ISBN-10 and ISBN-13. Note: No attempt to hyphenate converted ISBNs is made, because the specification requires that *any* hyphenation must be correct but allows ISBNs without hyphenation. Args: isbn (str): SBN, ISBN-10 or ISBN-13 code (str): EAN Bookland code Returns: ``str``: Converted ISBN-10 or ISBN-13 Raise: IsbnError: When ISBN-13 isn't convertible to an ISBN-10
[ "Convert", "ISBNs", "between", "ISBN", "-", "10", "and", "ISBN", "-", "13", "." ]
train
https://github.com/JNRowe/pyisbn/blob/653cb1798d4f231d552991a1011d6aba1c4de396/pyisbn/__init__.py#L478-L506
JNRowe/pyisbn
pyisbn/__init__.py
Isbn.calculate_checksum
def calculate_checksum(self): """Calculate ISBN checksum. Returns: ``str``: ISBN checksum value """ if len(self.isbn) in (9, 12): return calculate_checksum(self.isbn) else: return calculate_checksum(self.isbn[:-1])
python
def calculate_checksum(self): """Calculate ISBN checksum. Returns: ``str``: ISBN checksum value """ if len(self.isbn) in (9, 12): return calculate_checksum(self.isbn) else: return calculate_checksum(self.isbn[:-1])
[ "def", "calculate_checksum", "(", "self", ")", ":", "if", "len", "(", "self", ".", "isbn", ")", "in", "(", "9", ",", "12", ")", ":", "return", "calculate_checksum", "(", "self", ".", "isbn", ")", "else", ":", "return", "calculate_checksum", "(", "self"...
Calculate ISBN checksum. Returns: ``str``: ISBN checksum value
[ "Calculate", "ISBN", "checksum", "." ]
train
https://github.com/JNRowe/pyisbn/blob/653cb1798d4f231d552991a1011d6aba1c4de396/pyisbn/__init__.py#L173-L183
JNRowe/pyisbn
pyisbn/__init__.py
Isbn.to_url
def to_url(self, site='amazon', country='us'): """Generate a link to an online book site. Args: site (str): Site to create link to country (str): Country specific version of ``site`` Returns: ``str``: URL on ``site`` for book Raises: SiteError: Unknown site value CountryError: Unknown country value """ try: try: url, tlds = URL_MAP[site] except ValueError: tlds = None url = URL_MAP[site] except KeyError: raise SiteError(site) inject = {'isbn': self._isbn} if tlds: if country not in tlds: raise CountryError(country) tld = tlds[country] if not tld: tld = country inject['tld'] = tld return url % inject
python
def to_url(self, site='amazon', country='us'): """Generate a link to an online book site. Args: site (str): Site to create link to country (str): Country specific version of ``site`` Returns: ``str``: URL on ``site`` for book Raises: SiteError: Unknown site value CountryError: Unknown country value """ try: try: url, tlds = URL_MAP[site] except ValueError: tlds = None url = URL_MAP[site] except KeyError: raise SiteError(site) inject = {'isbn': self._isbn} if tlds: if country not in tlds: raise CountryError(country) tld = tlds[country] if not tld: tld = country inject['tld'] = tld return url % inject
[ "def", "to_url", "(", "self", ",", "site", "=", "'amazon'", ",", "country", "=", "'us'", ")", ":", "try", ":", "try", ":", "url", ",", "tlds", "=", "URL_MAP", "[", "site", "]", "except", "ValueError", ":", "tlds", "=", "None", "url", "=", "URL_MAP"...
Generate a link to an online book site. Args: site (str): Site to create link to country (str): Country specific version of ``site`` Returns: ``str``: URL on ``site`` for book Raises: SiteError: Unknown site value CountryError: Unknown country value
[ "Generate", "a", "link", "to", "an", "online", "book", "site", "." ]
train
https://github.com/JNRowe/pyisbn/blob/653cb1798d4f231d552991a1011d6aba1c4de396/pyisbn/__init__.py#L206-L237
halfak/deltas
deltas/tokenizers/tokenizer.py
RegexTokenizer._tokenize
def _tokenize(self, text, token_class=None): """ Tokenizes a text :Returns: A `list` of tokens """ token_class = token_class or Token tokens = {} for i, match in enumerate(self.regex.finditer(text)): value = match.group(0) try: token = tokens[value] except KeyError: type = match.lastgroup token = token_class(value, type=type) tokens[value] = token yield token
python
def _tokenize(self, text, token_class=None): """ Tokenizes a text :Returns: A `list` of tokens """ token_class = token_class or Token tokens = {} for i, match in enumerate(self.regex.finditer(text)): value = match.group(0) try: token = tokens[value] except KeyError: type = match.lastgroup token = token_class(value, type=type) tokens[value] = token yield token
[ "def", "_tokenize", "(", "self", ",", "text", ",", "token_class", "=", "None", ")", ":", "token_class", "=", "token_class", "or", "Token", "tokens", "=", "{", "}", "for", "i", ",", "match", "in", "enumerate", "(", "self", ".", "regex", ".", "finditer",...
Tokenizes a text :Returns: A `list` of tokens
[ "Tokenizes", "a", "text" ]
train
https://github.com/halfak/deltas/blob/4173f4215b93426a877f4bb4a7a3547834e60ac3/deltas/tokenizers/tokenizer.py#L40-L60
knorby/facterpy
facter/__init__.py
_parse_cli_facter_results
def _parse_cli_facter_results(facter_results): '''Parse key value pairs printed with "=>" separators. YAML is preferred output scheme for facter. >>> list(_parse_cli_facter_results("""foo => bar ... baz => 1 ... foo_bar => True""")) [('foo', 'bar'), ('baz', '1'), ('foo_bar', 'True')] >>> list(_parse_cli_facter_results("""foo => bar ... babababababababab ... baz => 2""")) [('foo', 'bar\nbabababababababab'), ('baz', '2')] >>> list(_parse_cli_facter_results("""3434""")) Traceback (most recent call last): ... ValueError: parse error Uses a generator interface: >>> _parse_cli_facter_results("foo => bar").next() ('foo', 'bar') ''' last_key, last_value = None, [] for line in filter(None, facter_results.splitlines()): res = line.split(six.u(" => "), 1) if len(res)==1: if not last_key: raise ValueError("parse error") else: last_value.append(res[0]) else: if last_key: yield last_key, os.linesep.join(last_value) last_key, last_value = res[0], [res[1]] else: if last_key: yield last_key, os.linesep.join(last_value)
python
def _parse_cli_facter_results(facter_results): '''Parse key value pairs printed with "=>" separators. YAML is preferred output scheme for facter. >>> list(_parse_cli_facter_results("""foo => bar ... baz => 1 ... foo_bar => True""")) [('foo', 'bar'), ('baz', '1'), ('foo_bar', 'True')] >>> list(_parse_cli_facter_results("""foo => bar ... babababababababab ... baz => 2""")) [('foo', 'bar\nbabababababababab'), ('baz', '2')] >>> list(_parse_cli_facter_results("""3434""")) Traceback (most recent call last): ... ValueError: parse error Uses a generator interface: >>> _parse_cli_facter_results("foo => bar").next() ('foo', 'bar') ''' last_key, last_value = None, [] for line in filter(None, facter_results.splitlines()): res = line.split(six.u(" => "), 1) if len(res)==1: if not last_key: raise ValueError("parse error") else: last_value.append(res[0]) else: if last_key: yield last_key, os.linesep.join(last_value) last_key, last_value = res[0], [res[1]] else: if last_key: yield last_key, os.linesep.join(last_value)
[ "def", "_parse_cli_facter_results", "(", "facter_results", ")", ":", "last_key", ",", "last_value", "=", "None", ",", "[", "]", "for", "line", "in", "filter", "(", "None", ",", "facter_results", ".", "splitlines", "(", ")", ")", ":", "res", "=", "line", ...
Parse key value pairs printed with "=>" separators. YAML is preferred output scheme for facter. >>> list(_parse_cli_facter_results("""foo => bar ... baz => 1 ... foo_bar => True""")) [('foo', 'bar'), ('baz', '1'), ('foo_bar', 'True')] >>> list(_parse_cli_facter_results("""foo => bar ... babababababababab ... baz => 2""")) [('foo', 'bar\nbabababababababab'), ('baz', '2')] >>> list(_parse_cli_facter_results("""3434""")) Traceback (most recent call last): ... ValueError: parse error Uses a generator interface: >>> _parse_cli_facter_results("foo => bar").next() ('foo', 'bar')
[ "Parse", "key", "value", "pairs", "printed", "with", "=", ">", "separators", ".", "YAML", "is", "preferred", "output", "scheme", "for", "facter", "." ]
train
https://github.com/knorby/facterpy/blob/4799b020cc8c1bf69b2a828b90d6e20862771a33/facter/__init__.py#L12-L48
knorby/facterpy
facter/__init__.py
Facter.run_facter
def run_facter(self, key=None): """Run the facter executable with an optional specfic fact. Output is parsed to yaml if available and selected. Puppet facts are always selected. Returns a dictionary if no key is given, and the value if a key is passed.""" args = [self.facter_path] #this seems to not cause problems, but leaving it separate args.append("--puppet") if self.external_dir is not None: args.append('--external-dir') args.append(self.external_dir) if self.uses_yaml: args.append("--yaml") if key is not None: args.append(key) proc = subprocess.Popen(args, stdout=subprocess.PIPE) results = proc.stdout.read() if self.uses_yaml: parsed_results = yaml.load(results) if key is not None: return parsed_results[key] else: return parsed_results results = results.decode() if key is not None: return results.strip() else: return dict(_parse_cli_facter_results(results))
python
def run_facter(self, key=None): """Run the facter executable with an optional specfic fact. Output is parsed to yaml if available and selected. Puppet facts are always selected. Returns a dictionary if no key is given, and the value if a key is passed.""" args = [self.facter_path] #this seems to not cause problems, but leaving it separate args.append("--puppet") if self.external_dir is not None: args.append('--external-dir') args.append(self.external_dir) if self.uses_yaml: args.append("--yaml") if key is not None: args.append(key) proc = subprocess.Popen(args, stdout=subprocess.PIPE) results = proc.stdout.read() if self.uses_yaml: parsed_results = yaml.load(results) if key is not None: return parsed_results[key] else: return parsed_results results = results.decode() if key is not None: return results.strip() else: return dict(_parse_cli_facter_results(results))
[ "def", "run_facter", "(", "self", ",", "key", "=", "None", ")", ":", "args", "=", "[", "self", ".", "facter_path", "]", "#this seems to not cause problems, but leaving it separate", "args", ".", "append", "(", "\"--puppet\"", ")", "if", "self", ".", "external_di...
Run the facter executable with an optional specfic fact. Output is parsed to yaml if available and selected. Puppet facts are always selected. Returns a dictionary if no key is given, and the value if a key is passed.
[ "Run", "the", "facter", "executable", "with", "an", "optional", "specfic", "fact", ".", "Output", "is", "parsed", "to", "yaml", "if", "available", "and", "selected", ".", "Puppet", "facts", "are", "always", "selected", ".", "Returns", "a", "dictionary", "if"...
train
https://github.com/knorby/facterpy/blob/4799b020cc8c1bf69b2a828b90d6e20862771a33/facter/__init__.py#L66-L94
knorby/facterpy
facter/__init__.py
Facter.has_cache
def has_cache(self): """Intended to be called before any call that might access the cache. If the cache is not selected, then returns False, otherwise the cache is build if needed and returns True.""" if not self.cache_enabled: return False if self._cache is None: self.build_cache() return True
python
def has_cache(self): """Intended to be called before any call that might access the cache. If the cache is not selected, then returns False, otherwise the cache is build if needed and returns True.""" if not self.cache_enabled: return False if self._cache is None: self.build_cache() return True
[ "def", "has_cache", "(", "self", ")", ":", "if", "not", "self", ".", "cache_enabled", ":", "return", "False", "if", "self", ".", "_cache", "is", "None", ":", "self", ".", "build_cache", "(", ")", "return", "True" ]
Intended to be called before any call that might access the cache. If the cache is not selected, then returns False, otherwise the cache is build if needed and returns True.
[ "Intended", "to", "be", "called", "before", "any", "call", "that", "might", "access", "the", "cache", ".", "If", "the", "cache", "is", "not", "selected", "then", "returns", "False", "otherwise", "the", "cache", "is", "build", "if", "needed", "and", "return...
train
https://github.com/knorby/facterpy/blob/4799b020cc8c1bf69b2a828b90d6e20862771a33/facter/__init__.py#L104-L112
knorby/facterpy
facter/__init__.py
Facter.lookup
def lookup(self, fact, cache=True): """Return the value of a given fact and raise a KeyError if it is not available. If `cache` is False, force the lookup of the fact.""" if (not cache) or (not self.has_cache()): val = self.run_facter(fact) if val is None or val == '': raise KeyError(fact) return val return self._cache[fact]
python
def lookup(self, fact, cache=True): """Return the value of a given fact and raise a KeyError if it is not available. If `cache` is False, force the lookup of the fact.""" if (not cache) or (not self.has_cache()): val = self.run_facter(fact) if val is None or val == '': raise KeyError(fact) return val return self._cache[fact]
[ "def", "lookup", "(", "self", ",", "fact", ",", "cache", "=", "True", ")", ":", "if", "(", "not", "cache", ")", "or", "(", "not", "self", ".", "has_cache", "(", ")", ")", ":", "val", "=", "self", ".", "run_facter", "(", "fact", ")", "if", "val"...
Return the value of a given fact and raise a KeyError if it is not available. If `cache` is False, force the lookup of the fact.
[ "Return", "the", "value", "of", "a", "given", "fact", "and", "raise", "a", "KeyError", "if", "it", "is", "not", "available", ".", "If", "cache", "is", "False", "force", "the", "lookup", "of", "the", "fact", "." ]
train
https://github.com/knorby/facterpy/blob/4799b020cc8c1bf69b2a828b90d6e20862771a33/facter/__init__.py#L114-L123
9b/frisbee
frisbee/__init__.py
Frisbee._reset
def _reset(self) -> None: """Reset some of the state in the class for multi-searches.""" self.project: str = namesgenerator.get_random_name() self._processed: List = list() self.results: List = list()
python
def _reset(self) -> None: """Reset some of the state in the class for multi-searches.""" self.project: str = namesgenerator.get_random_name() self._processed: List = list() self.results: List = list()
[ "def", "_reset", "(", "self", ")", "->", "None", ":", "self", ".", "project", ":", "str", "=", "namesgenerator", ".", "get_random_name", "(", ")", "self", ".", "_processed", ":", "List", "=", "list", "(", ")", "self", ".", "results", ":", "List", "="...
Reset some of the state in the class for multi-searches.
[ "Reset", "some", "of", "the", "state", "in", "the", "class", "for", "multi", "-", "searches", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/__init__.py#L48-L52
9b/frisbee
frisbee/__init__.py
Frisbee._config_bootstrap
def _config_bootstrap(self) -> None: """Handle the basic setup of the tool prior to user control. Bootstrap will load all the available modules for searching and set them up for use by this main class. """ if self.output: self.folder: str = os.getcwd() + "/" + self.project os.mkdir(self.folder)
python
def _config_bootstrap(self) -> None: """Handle the basic setup of the tool prior to user control. Bootstrap will load all the available modules for searching and set them up for use by this main class. """ if self.output: self.folder: str = os.getcwd() + "/" + self.project os.mkdir(self.folder)
[ "def", "_config_bootstrap", "(", "self", ")", "->", "None", ":", "if", "self", ".", "output", ":", "self", ".", "folder", ":", "str", "=", "os", ".", "getcwd", "(", ")", "+", "\"/\"", "+", "self", ".", "project", "os", ".", "mkdir", "(", "self", ...
Handle the basic setup of the tool prior to user control. Bootstrap will load all the available modules for searching and set them up for use by this main class.
[ "Handle", "the", "basic", "setup", "of", "the", "tool", "prior", "to", "user", "control", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/__init__.py#L54-L62
9b/frisbee
frisbee/__init__.py
Frisbee._dyn_loader
def _dyn_loader(self, module: str, kwargs: str): """Dynamically load a specific module instance.""" package_directory: str = os.path.dirname(os.path.abspath(__file__)) modules: str = package_directory + "/modules" module = module + ".py" if module not in os.listdir(modules): raise Exception("Module %s is not valid" % module) module_name: str = module[:-3] import_path: str = "%s.%s" % (self.MODULE_PATH, module_name) imported = import_module(import_path) obj = getattr(imported, 'Module') return obj(**kwargs)
python
def _dyn_loader(self, module: str, kwargs: str): """Dynamically load a specific module instance.""" package_directory: str = os.path.dirname(os.path.abspath(__file__)) modules: str = package_directory + "/modules" module = module + ".py" if module not in os.listdir(modules): raise Exception("Module %s is not valid" % module) module_name: str = module[:-3] import_path: str = "%s.%s" % (self.MODULE_PATH, module_name) imported = import_module(import_path) obj = getattr(imported, 'Module') return obj(**kwargs)
[ "def", "_dyn_loader", "(", "self", ",", "module", ":", "str", ",", "kwargs", ":", "str", ")", ":", "package_directory", ":", "str", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "modules", ...
Dynamically load a specific module instance.
[ "Dynamically", "load", "a", "specific", "module", "instance", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/__init__.py#L64-L75
9b/frisbee
frisbee/__init__.py
Frisbee._job_handler
def _job_handler(self) -> bool: """Process the work items.""" while True: try: task = self._unfullfilled.get_nowait() except queue.Empty: break else: self._log.debug("Job: %s" % str(task)) engine = self._dyn_loader(task['engine'], task) task['start_time'] = now_time() results = engine.search() task['end_time'] = now_time() duration: str = str((task['end_time'] - task['start_time']).seconds) task['duration'] = duration task.update({'results': results}) self._fulfilled.put(task) return True
python
def _job_handler(self) -> bool: """Process the work items.""" while True: try: task = self._unfullfilled.get_nowait() except queue.Empty: break else: self._log.debug("Job: %s" % str(task)) engine = self._dyn_loader(task['engine'], task) task['start_time'] = now_time() results = engine.search() task['end_time'] = now_time() duration: str = str((task['end_time'] - task['start_time']).seconds) task['duration'] = duration task.update({'results': results}) self._fulfilled.put(task) return True
[ "def", "_job_handler", "(", "self", ")", "->", "bool", ":", "while", "True", ":", "try", ":", "task", "=", "self", ".", "_unfullfilled", ".", "get_nowait", "(", ")", "except", "queue", ".", "Empty", ":", "break", "else", ":", "self", ".", "_log", "."...
Process the work items.
[ "Process", "the", "work", "items", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/__init__.py#L77-L94
9b/frisbee
frisbee/__init__.py
Frisbee._save
def _save(self) -> None: """Save output to a directory.""" self._log.info("Saving results to '%s'" % self.folder) path: str = self.folder + "/" for job in self.results: if job['domain'] in self.saved: continue job['start_time'] = str_datetime(job['start_time']) job['end_time'] = str_datetime(job['end_time']) jid: int = random.randint(100000, 999999) filename: str = "%s_%s_%d_job.json" % (self.project, job['domain'], jid) handle = open(path + filename, 'w') handle.write(json.dumps(job, indent=4)) handle.close() filename = "%s_%s_%d_emails.txt" % (self.project, job['domain'], jid) handle = open(path + filename, 'w') for email in job['results']['emails']: handle.write(email + "\n") handle.close() self.saved.append(job['domain'])
python
def _save(self) -> None: """Save output to a directory.""" self._log.info("Saving results to '%s'" % self.folder) path: str = self.folder + "/" for job in self.results: if job['domain'] in self.saved: continue job['start_time'] = str_datetime(job['start_time']) job['end_time'] = str_datetime(job['end_time']) jid: int = random.randint(100000, 999999) filename: str = "%s_%s_%d_job.json" % (self.project, job['domain'], jid) handle = open(path + filename, 'w') handle.write(json.dumps(job, indent=4)) handle.close() filename = "%s_%s_%d_emails.txt" % (self.project, job['domain'], jid) handle = open(path + filename, 'w') for email in job['results']['emails']: handle.write(email + "\n") handle.close() self.saved.append(job['domain'])
[ "def", "_save", "(", "self", ")", "->", "None", ":", "self", ".", "_log", ".", "info", "(", "\"Saving results to '%s'\"", "%", "self", ".", "folder", ")", "path", ":", "str", "=", "self", ".", "folder", "+", "\"/\"", "for", "job", "in", "self", ".", ...
Save output to a directory.
[ "Save", "output", "to", "a", "directory", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/__init__.py#L96-L116
9b/frisbee
frisbee/__init__.py
Frisbee.search
def search(self, jobs: List[Dict[str, str]]) -> None: """Perform searches based on job orders.""" if not isinstance(jobs, list): raise Exception("Jobs must be of type list.") self._log.info("Project: %s" % self.project) self._log.info("Processing jobs: %d", len(jobs)) for _, job in enumerate(jobs): self._unfullfilled.put(job) for _ in range(self.PROCESSES): proc: Process = Process(target=self._job_handler) self._processes.append(proc) proc.start() for proc in self._processes: proc.join() while not self._fulfilled.empty(): output: Dict = self._fulfilled.get() output.update({'project': self.project}) self._processed.append(output['domain']) self.results.append(output) if output['greedy']: bonus_jobs: List = list() observed: List = list() for item in output['results']['emails']: found: str = item.split('@')[1] if found in self._processed or found in observed: continue observed.append(found) base: Dict = dict() base['limit'] = output['limit'] base['modifier'] = output['modifier'] base['engine'] = output['engine'] base['greedy'] = False base['domain'] = found bonus_jobs.append(base) if len(bonus_jobs) > 0: self.search(bonus_jobs) self._log.info("All jobs processed") if self.output: self._save()
python
def search(self, jobs: List[Dict[str, str]]) -> None: """Perform searches based on job orders.""" if not isinstance(jobs, list): raise Exception("Jobs must be of type list.") self._log.info("Project: %s" % self.project) self._log.info("Processing jobs: %d", len(jobs)) for _, job in enumerate(jobs): self._unfullfilled.put(job) for _ in range(self.PROCESSES): proc: Process = Process(target=self._job_handler) self._processes.append(proc) proc.start() for proc in self._processes: proc.join() while not self._fulfilled.empty(): output: Dict = self._fulfilled.get() output.update({'project': self.project}) self._processed.append(output['domain']) self.results.append(output) if output['greedy']: bonus_jobs: List = list() observed: List = list() for item in output['results']['emails']: found: str = item.split('@')[1] if found in self._processed or found in observed: continue observed.append(found) base: Dict = dict() base['limit'] = output['limit'] base['modifier'] = output['modifier'] base['engine'] = output['engine'] base['greedy'] = False base['domain'] = found bonus_jobs.append(base) if len(bonus_jobs) > 0: self.search(bonus_jobs) self._log.info("All jobs processed") if self.output: self._save()
[ "def", "search", "(", "self", ",", "jobs", ":", "List", "[", "Dict", "[", "str", ",", "str", "]", "]", ")", "->", "None", ":", "if", "not", "isinstance", "(", "jobs", ",", "list", ")", ":", "raise", "Exception", "(", "\"Jobs must be of type list.\"", ...
Perform searches based on job orders.
[ "Perform", "searches", "based", "on", "job", "orders", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/__init__.py#L118-L162
rstoneback/pysatMagVect
pysatMagVect/_core.py
geocentric_to_ecef
def geocentric_to_ecef(latitude, longitude, altitude): """Convert geocentric coordinates into ECEF Parameters ---------- latitude : float or array_like Geocentric latitude (degrees) longitude : float or array_like Geocentric longitude (degrees) altitude : float or array_like Height (km) above presumed spherical Earth with radius 6371 km. Returns ------- x, y, z numpy arrays of x, y, z locations in km """ r = earth_geo_radius + altitude x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude)) y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude)) z = r * np.sin(np.deg2rad(latitude)) return x, y, z
python
def geocentric_to_ecef(latitude, longitude, altitude): """Convert geocentric coordinates into ECEF Parameters ---------- latitude : float or array_like Geocentric latitude (degrees) longitude : float or array_like Geocentric longitude (degrees) altitude : float or array_like Height (km) above presumed spherical Earth with radius 6371 km. Returns ------- x, y, z numpy arrays of x, y, z locations in km """ r = earth_geo_radius + altitude x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude)) y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude)) z = r * np.sin(np.deg2rad(latitude)) return x, y, z
[ "def", "geocentric_to_ecef", "(", "latitude", ",", "longitude", ",", "altitude", ")", ":", "r", "=", "earth_geo_radius", "+", "altitude", "x", "=", "r", "*", "np", ".", "cos", "(", "np", ".", "deg2rad", "(", "latitude", ")", ")", "*", "np", ".", "cos...
Convert geocentric coordinates into ECEF Parameters ---------- latitude : float or array_like Geocentric latitude (degrees) longitude : float or array_like Geocentric longitude (degrees) altitude : float or array_like Height (km) above presumed spherical Earth with radius 6371 km. Returns ------- x, y, z numpy arrays of x, y, z locations in km
[ "Convert", "geocentric", "coordinates", "into", "ECEF", "Parameters", "----------", "latitude", ":", "float", "or", "array_like", "Geocentric", "latitude", "(", "degrees", ")", "longitude", ":", "float", "or", "array_like", "Geocentric", "longitude", "(", "degrees",...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L24-L48
rstoneback/pysatMagVect
pysatMagVect/_core.py
ecef_to_geocentric
def ecef_to_geocentric(x, y, z, ref_height=None): """Convert ECEF into geocentric coordinates Parameters ---------- x : float or array_like ECEF-X in km y : float or array_like ECEF-Y in km z : float or array_like ECEF-Z in km ref_height : float or array_like Reference radius used for calculating height. Defaults to average radius of 6371 km Returns ------- latitude, longitude, altitude numpy arrays of locations in degrees, degrees, and km """ if ref_height is None: ref_height = earth_geo_radius r = np.sqrt(x ** 2 + y ** 2 + z ** 2) colatitude = np.rad2deg(np.arccos(z / r)) longitude = np.rad2deg(np.arctan2(y, x)) latitude = 90. - colatitude return latitude, longitude, r - ref_height
python
def ecef_to_geocentric(x, y, z, ref_height=None): """Convert ECEF into geocentric coordinates Parameters ---------- x : float or array_like ECEF-X in km y : float or array_like ECEF-Y in km z : float or array_like ECEF-Z in km ref_height : float or array_like Reference radius used for calculating height. Defaults to average radius of 6371 km Returns ------- latitude, longitude, altitude numpy arrays of locations in degrees, degrees, and km """ if ref_height is None: ref_height = earth_geo_radius r = np.sqrt(x ** 2 + y ** 2 + z ** 2) colatitude = np.rad2deg(np.arccos(z / r)) longitude = np.rad2deg(np.arctan2(y, x)) latitude = 90. - colatitude return latitude, longitude, r - ref_height
[ "def", "ecef_to_geocentric", "(", "x", ",", "y", ",", "z", ",", "ref_height", "=", "None", ")", ":", "if", "ref_height", "is", "None", ":", "ref_height", "=", "earth_geo_radius", "r", "=", "np", ".", "sqrt", "(", "x", "**", "2", "+", "y", "**", "2"...
Convert ECEF into geocentric coordinates Parameters ---------- x : float or array_like ECEF-X in km y : float or array_like ECEF-Y in km z : float or array_like ECEF-Z in km ref_height : float or array_like Reference radius used for calculating height. Defaults to average radius of 6371 km Returns ------- latitude, longitude, altitude numpy arrays of locations in degrees, degrees, and km
[ "Convert", "ECEF", "into", "geocentric", "coordinates", "Parameters", "----------", "x", ":", "float", "or", "array_like", "ECEF", "-", "X", "in", "km", "y", ":", "float", "or", "array_like", "ECEF", "-", "Y", "in", "km", "z", ":", "float", "or", "array_...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L51-L79
rstoneback/pysatMagVect
pysatMagVect/_core.py
geodetic_to_ecef
def geodetic_to_ecef(latitude, longitude, altitude): """Convert WGS84 geodetic coordinates into ECEF Parameters ---------- latitude : float or array_like Geodetic latitude (degrees) longitude : float or array_like Geodetic longitude (degrees) altitude : float or array_like Geodetic Height (km) above WGS84 reference ellipsoid. Returns ------- x, y, z numpy arrays of x, y, z locations in km """ ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2) r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2) # colatitude = 90. - latitude x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude)) y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude)) z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude)) return x, y, z
python
def geodetic_to_ecef(latitude, longitude, altitude): """Convert WGS84 geodetic coordinates into ECEF Parameters ---------- latitude : float or array_like Geodetic latitude (degrees) longitude : float or array_like Geodetic longitude (degrees) altitude : float or array_like Geodetic Height (km) above WGS84 reference ellipsoid. Returns ------- x, y, z numpy arrays of x, y, z locations in km """ ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2) r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2) # colatitude = 90. - latitude x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude)) y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude)) z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude)) return x, y, z
[ "def", "geodetic_to_ecef", "(", "latitude", ",", "longitude", ",", "altitude", ")", ":", "ellip", "=", "np", ".", "sqrt", "(", "1.", "-", "earth_b", "**", "2", "/", "earth_a", "**", "2", ")", "r_n", "=", "earth_a", "/", "np", ".", "sqrt", "(", "1."...
Convert WGS84 geodetic coordinates into ECEF Parameters ---------- latitude : float or array_like Geodetic latitude (degrees) longitude : float or array_like Geodetic longitude (degrees) altitude : float or array_like Geodetic Height (km) above WGS84 reference ellipsoid. Returns ------- x, y, z numpy arrays of x, y, z locations in km
[ "Convert", "WGS84", "geodetic", "coordinates", "into", "ECEF", "Parameters", "----------", "latitude", ":", "float", "or", "array_like", "Geodetic", "latitude", "(", "degrees", ")", "longitude", ":", "float", "or", "array_like", "Geodetic", "longitude", "(", "degr...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L82-L110
rstoneback/pysatMagVect
pysatMagVect/_core.py
ecef_to_geodetic
def ecef_to_geodetic(x, y, z, method=None): """Convert ECEF into Geodetic WGS84 coordinates Parameters ---------- x : float or array_like ECEF-X in km y : float or array_like ECEF-Y in km z : float or array_like ECEF-Z in km method : 'iterative' or 'closed' ('closed' is deafult) String selects method of conversion. Closed for mathematical solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1) or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf). Returns ------- latitude, longitude, altitude numpy arrays of locations in degrees, degrees, and km """ # quick notes on ECEF to Geodetic transformations # http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html method = method or 'closed' # ellipticity of Earth ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2) # first eccentricity squared e2 = ellip ** 2 # 6.6943799901377997E-3 longitude = np.arctan2(y, x) # cylindrical radius p = np.sqrt(x ** 2 + y ** 2) # closed form solution # a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1 if method == 'closed': e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2) theta = np.arctan2(z*earth_a, p*earth_b) latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3) r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2) h = p / np.cos(latitude) - r_n # another possibility # http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf ## iterative method # http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf if method == 'iterative': latitude = np.arctan2(p, z) r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2) for i in np.arange(6): # print latitude r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2) h = p / np.cos(latitude) - r_n latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h)))) # print h # final ellipsoidal height update h = p / np.cos(latitude) - r_n return np.rad2deg(latitude), np.rad2deg(longitude), h
python
def ecef_to_geodetic(x, y, z, method=None): """Convert ECEF into Geodetic WGS84 coordinates Parameters ---------- x : float or array_like ECEF-X in km y : float or array_like ECEF-Y in km z : float or array_like ECEF-Z in km method : 'iterative' or 'closed' ('closed' is deafult) String selects method of conversion. Closed for mathematical solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1) or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf). Returns ------- latitude, longitude, altitude numpy arrays of locations in degrees, degrees, and km """ # quick notes on ECEF to Geodetic transformations # http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html method = method or 'closed' # ellipticity of Earth ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2) # first eccentricity squared e2 = ellip ** 2 # 6.6943799901377997E-3 longitude = np.arctan2(y, x) # cylindrical radius p = np.sqrt(x ** 2 + y ** 2) # closed form solution # a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1 if method == 'closed': e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2) theta = np.arctan2(z*earth_a, p*earth_b) latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3) r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2) h = p / np.cos(latitude) - r_n # another possibility # http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf ## iterative method # http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf if method == 'iterative': latitude = np.arctan2(p, z) r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2) for i in np.arange(6): # print latitude r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2) h = p / np.cos(latitude) - r_n latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h)))) # print h # final ellipsoidal height update h = p / np.cos(latitude) - r_n return np.rad2deg(latitude), np.rad2deg(longitude), h
[ "def", "ecef_to_geodetic", "(", "x", ",", "y", ",", "z", ",", "method", "=", "None", ")", ":", "# quick notes on ECEF to Geodetic transformations ", "# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html", "method", "=", "method", "or", "'closed'...
Convert ECEF into Geodetic WGS84 coordinates Parameters ---------- x : float or array_like ECEF-X in km y : float or array_like ECEF-Y in km z : float or array_like ECEF-Z in km method : 'iterative' or 'closed' ('closed' is deafult) String selects method of conversion. Closed for mathematical solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1) or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf). Returns ------- latitude, longitude, altitude numpy arrays of locations in degrees, degrees, and km
[ "Convert", "ECEF", "into", "Geodetic", "WGS84", "coordinates", "Parameters", "----------", "x", ":", "float", "or", "array_like", "ECEF", "-", "X", "in", "km", "y", ":", "float", "or", "array_like", "ECEF", "-", "Y", "in", "km", "z", ":", "float", "or", ...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L113-L176
rstoneback/pysatMagVect
pysatMagVect/_core.py
enu_to_ecef_vector
def enu_to_ecef_vector(east, north, up, glat, glong): """Converts vector from East, North, Up components to ECEF Position of vector in geospace may be specified in either geocentric or geodetic coordinates, with corresponding expression of the vector using radial or ellipsoidal unit vectors. Parameters ---------- east : float or array-like Eastward component of vector north : float or array-like Northward component of vector up : float or array-like Upward component of vector latitude : float or array_like Geodetic or geocentric latitude (degrees) longitude : float or array_like Geodetic or geocentric longitude (degrees) Returns ------- x, y, z Vector components along ECEF x, y, and z directions """ # convert lat and lon in degrees to radians rlat = np.radians(glat) rlon = np.radians(glong) x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat) y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat) z = north*np.cos(rlat) + up*np.sin(rlat) return x, y, z
python
def enu_to_ecef_vector(east, north, up, glat, glong): """Converts vector from East, North, Up components to ECEF Position of vector in geospace may be specified in either geocentric or geodetic coordinates, with corresponding expression of the vector using radial or ellipsoidal unit vectors. Parameters ---------- east : float or array-like Eastward component of vector north : float or array-like Northward component of vector up : float or array-like Upward component of vector latitude : float or array_like Geodetic or geocentric latitude (degrees) longitude : float or array_like Geodetic or geocentric longitude (degrees) Returns ------- x, y, z Vector components along ECEF x, y, and z directions """ # convert lat and lon in degrees to radians rlat = np.radians(glat) rlon = np.radians(glong) x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat) y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat) z = north*np.cos(rlat) + up*np.sin(rlat) return x, y, z
[ "def", "enu_to_ecef_vector", "(", "east", ",", "north", ",", "up", ",", "glat", ",", "glong", ")", ":", "# convert lat and lon in degrees to radians", "rlat", "=", "np", ".", "radians", "(", "glat", ")", "rlon", "=", "np", ".", "radians", "(", "glong", ")"...
Converts vector from East, North, Up components to ECEF Position of vector in geospace may be specified in either geocentric or geodetic coordinates, with corresponding expression of the vector using radial or ellipsoidal unit vectors. Parameters ---------- east : float or array-like Eastward component of vector north : float or array-like Northward component of vector up : float or array-like Upward component of vector latitude : float or array_like Geodetic or geocentric latitude (degrees) longitude : float or array_like Geodetic or geocentric longitude (degrees) Returns ------- x, y, z Vector components along ECEF x, y, and z directions
[ "Converts", "vector", "from", "East", "North", "Up", "components", "to", "ECEF", "Position", "of", "vector", "in", "geospace", "may", "be", "specified", "in", "either", "geocentric", "or", "geodetic", "coordinates", "with", "corresponding", "expression", "of", "...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L178-L213
rstoneback/pysatMagVect
pysatMagVect/_core.py
project_ecef_vector_onto_basis
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz): """Projects vector in ecef onto different basis, with components also expressed in ECEF Parameters ---------- x : float or array-like ECEF-X component of vector y : float or array-like ECEF-Y component of vector z : float or array-like ECEF-Z component of vector xx : float or array-like ECEF-X component of the x unit vector of new basis xy : float or array-like ECEF-Y component of the x unit vector of new basis xz : float or array-like ECEF-Z component of the x unit vector of new basis """ out_x = x*xx + y*xy + z*xz out_y = x*yx + y*yy + z*yz out_z = x*zx + y*zy + z*zz return out_x, out_y, out_z
python
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz): """Projects vector in ecef onto different basis, with components also expressed in ECEF Parameters ---------- x : float or array-like ECEF-X component of vector y : float or array-like ECEF-Y component of vector z : float or array-like ECEF-Z component of vector xx : float or array-like ECEF-X component of the x unit vector of new basis xy : float or array-like ECEF-Y component of the x unit vector of new basis xz : float or array-like ECEF-Z component of the x unit vector of new basis """ out_x = x*xx + y*xy + z*xz out_y = x*yx + y*yy + z*yz out_z = x*zx + y*zy + z*zz return out_x, out_y, out_z
[ "def", "project_ecef_vector_onto_basis", "(", "x", ",", "y", ",", "z", ",", "xx", ",", "xy", ",", "xz", ",", "yx", ",", "yy", ",", "yz", ",", "zx", ",", "zy", ",", "zz", ")", ":", "out_x", "=", "x", "*", "xx", "+", "y", "*", "xy", "+", "z",...
Projects vector in ecef onto different basis, with components also expressed in ECEF Parameters ---------- x : float or array-like ECEF-X component of vector y : float or array-like ECEF-Y component of vector z : float or array-like ECEF-Z component of vector xx : float or array-like ECEF-X component of the x unit vector of new basis xy : float or array-like ECEF-Y component of the x unit vector of new basis xz : float or array-like ECEF-Z component of the x unit vector of new basis
[ "Projects", "vector", "in", "ecef", "onto", "different", "basis", "with", "components", "also", "expressed", "in", "ECEF", "Parameters", "----------", "x", ":", "float", "or", "array", "-", "like", "ECEF", "-", "X", "component", "of", "vector", "y", ":", "...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L253-L277
rstoneback/pysatMagVect
pysatMagVect/_core.py
normalize_vector
def normalize_vector(x, y, z): """ Normalizes vector to produce a unit vector. Parameters ---------- x : float or array-like X component of vector y : float or array-like Y component of vector z : float or array-like Z component of vector Returns ------- x, y, z Unit vector x,y,z components """ mag = np.sqrt(x**2 + y**2 + z**2) x = x/mag y = y/mag z = z/mag return x, y, z
python
def normalize_vector(x, y, z): """ Normalizes vector to produce a unit vector. Parameters ---------- x : float or array-like X component of vector y : float or array-like Y component of vector z : float or array-like Z component of vector Returns ------- x, y, z Unit vector x,y,z components """ mag = np.sqrt(x**2 + y**2 + z**2) x = x/mag y = y/mag z = z/mag return x, y, z
[ "def", "normalize_vector", "(", "x", ",", "y", ",", "z", ")", ":", "mag", "=", "np", ".", "sqrt", "(", "x", "**", "2", "+", "y", "**", "2", "+", "z", "**", "2", ")", "x", "=", "x", "/", "mag", "y", "=", "y", "/", "mag", "z", "=", "z", ...
Normalizes vector to produce a unit vector. Parameters ---------- x : float or array-like X component of vector y : float or array-like Y component of vector z : float or array-like Z component of vector Returns ------- x, y, z Unit vector x,y,z components
[ "Normalizes", "vector", "to", "produce", "a", "unit", "vector", ".", "Parameters", "----------", "x", ":", "float", "or", "array", "-", "like", "X", "component", "of", "vector", "y", ":", "float", "or", "array", "-", "like", "Y", "component", "of", "vect...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L279-L303
rstoneback/pysatMagVect
pysatMagVect/_core.py
cross_product
def cross_product(x1, y1, z1, x2, y2, z2): """ Cross product of two vectors, v1 x v2 Parameters ---------- x1 : float or array-like X component of vector 1 y1 : float or array-like Y component of vector 1 z1 : float or array-like Z component of vector 1 x2 : float or array-like X component of vector 2 y2 : float or array-like Y component of vector 2 z2 : float or array-like Z component of vector 2 Returns ------- x, y, z Unit vector x,y,z components """ x = y1*z2 - y2*z1 y = z1*x2 - x1*z2 z = x1*y2 - y1*x2 return x, y, z
python
def cross_product(x1, y1, z1, x2, y2, z2): """ Cross product of two vectors, v1 x v2 Parameters ---------- x1 : float or array-like X component of vector 1 y1 : float or array-like Y component of vector 1 z1 : float or array-like Z component of vector 1 x2 : float or array-like X component of vector 2 y2 : float or array-like Y component of vector 2 z2 : float or array-like Z component of vector 2 Returns ------- x, y, z Unit vector x,y,z components """ x = y1*z2 - y2*z1 y = z1*x2 - x1*z2 z = x1*y2 - y1*x2 return x, y, z
[ "def", "cross_product", "(", "x1", ",", "y1", ",", "z1", ",", "x2", ",", "y2", ",", "z2", ")", ":", "x", "=", "y1", "*", "z2", "-", "y2", "*", "z1", "y", "=", "z1", "*", "x2", "-", "x1", "*", "z2", "z", "=", "x1", "*", "y2", "-", "y1", ...
Cross product of two vectors, v1 x v2 Parameters ---------- x1 : float or array-like X component of vector 1 y1 : float or array-like Y component of vector 1 z1 : float or array-like Z component of vector 1 x2 : float or array-like X component of vector 2 y2 : float or array-like Y component of vector 2 z2 : float or array-like Z component of vector 2 Returns ------- x, y, z Unit vector x,y,z components
[ "Cross", "product", "of", "two", "vectors", "v1", "x", "v2", "Parameters", "----------", "x1", ":", "float", "or", "array", "-", "like", "X", "component", "of", "vector", "1", "y1", ":", "float", "or", "array", "-", "like", "Y", "component", "of", "vec...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L305-L333
rstoneback/pysatMagVect
pysatMagVect/_core.py
field_line_trace
def field_line_trace(init, date, direction, height, steps=None, max_steps=1E4, step_size=10., recursive_loop_count=None, recurse=True): """Perform field line tracing using IGRF and scipy.integrate.odeint. Parameters ---------- init : array-like of floats Position to begin field line tracing from in ECEF (x,y,z) km date : datetime or float Date to perform tracing on (year + day/365 + hours/24. + etc.) Accounts for leap year if datetime provided. direction : int 1 : field aligned, generally south to north. -1 : anti-field aligned, generally north to south. height : float Altitude to terminate trace, geodetic WGS84 (km) steps : array-like of ints or floats Number of steps along field line when field line trace positions should be reported. By default, each step is reported; steps=np.arange(max_steps). max_steps : float Maximum number of steps along field line that should be taken step_size : float Distance in km for each large integration step. Multiple substeps are taken as determined by scipy.integrate.odeint Returns ------- numpy array 2D array. [0,:] has the x,y,z location for initial point [:,0] is the x positions over the integration. Positions are reported in ECEF (km). """ if recursive_loop_count is None: recursive_loop_count = 0 # if steps is None: steps = np.arange(max_steps) if not isinstance(date, float): # recast from datetime to float, as required by IGRF12 code doy = (date - datetime.datetime(date.year,1,1)).days # number of days in year, works for leap years num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24. trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(), steps, args=(date, step_size, direction, height), full_output=False, printmessg=False, ixpr=False) #, # mxstep=500) # check that we reached final altitude check = trace_north[-1, :] x, y, z = ecef_to_geodetic(*check) if height == 0: check_height = 1. else: check_height = height # fortran integration gets close to target height if recurse & (z > check_height*1.000001): if (recursive_loop_count < 1000): # When we have not reached the reference height, call field_line_trace # again by taking check value as init - recursive call recursive_loop_count = recursive_loop_count + 1 trace_north1 = field_line_trace(check, date, direction, height, step_size=step_size, max_steps=max_steps, recursive_loop_count=recursive_loop_count, steps=steps) else: raise RuntimeError("After 1000 iterations couldn't reach target altitude") return np.vstack((trace_north, trace_north1)) else: # return results if we make it to the target altitude # filter points to terminate at point closest to target height # code below not correct, we want the first poiint that goes below target # height # code also introduces a variable length return, though I suppose # that already exists with the recursive functionality # x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2]) # idx = np.argmin(np.abs(check_height - z)) return trace_north
python
def field_line_trace(init, date, direction, height, steps=None, max_steps=1E4, step_size=10., recursive_loop_count=None, recurse=True): """Perform field line tracing using IGRF and scipy.integrate.odeint. Parameters ---------- init : array-like of floats Position to begin field line tracing from in ECEF (x,y,z) km date : datetime or float Date to perform tracing on (year + day/365 + hours/24. + etc.) Accounts for leap year if datetime provided. direction : int 1 : field aligned, generally south to north. -1 : anti-field aligned, generally north to south. height : float Altitude to terminate trace, geodetic WGS84 (km) steps : array-like of ints or floats Number of steps along field line when field line trace positions should be reported. By default, each step is reported; steps=np.arange(max_steps). max_steps : float Maximum number of steps along field line that should be taken step_size : float Distance in km for each large integration step. Multiple substeps are taken as determined by scipy.integrate.odeint Returns ------- numpy array 2D array. [0,:] has the x,y,z location for initial point [:,0] is the x positions over the integration. Positions are reported in ECEF (km). """ if recursive_loop_count is None: recursive_loop_count = 0 # if steps is None: steps = np.arange(max_steps) if not isinstance(date, float): # recast from datetime to float, as required by IGRF12 code doy = (date - datetime.datetime(date.year,1,1)).days # number of days in year, works for leap years num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24. trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(), steps, args=(date, step_size, direction, height), full_output=False, printmessg=False, ixpr=False) #, # mxstep=500) # check that we reached final altitude check = trace_north[-1, :] x, y, z = ecef_to_geodetic(*check) if height == 0: check_height = 1. else: check_height = height # fortran integration gets close to target height if recurse & (z > check_height*1.000001): if (recursive_loop_count < 1000): # When we have not reached the reference height, call field_line_trace # again by taking check value as init - recursive call recursive_loop_count = recursive_loop_count + 1 trace_north1 = field_line_trace(check, date, direction, height, step_size=step_size, max_steps=max_steps, recursive_loop_count=recursive_loop_count, steps=steps) else: raise RuntimeError("After 1000 iterations couldn't reach target altitude") return np.vstack((trace_north, trace_north1)) else: # return results if we make it to the target altitude # filter points to terminate at point closest to target height # code below not correct, we want the first poiint that goes below target # height # code also introduces a variable length return, though I suppose # that already exists with the recursive functionality # x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2]) # idx = np.argmin(np.abs(check_height - z)) return trace_north
[ "def", "field_line_trace", "(", "init", ",", "date", ",", "direction", ",", "height", ",", "steps", "=", "None", ",", "max_steps", "=", "1E4", ",", "step_size", "=", "10.", ",", "recursive_loop_count", "=", "None", ",", "recurse", "=", "True", ")", ":", ...
Perform field line tracing using IGRF and scipy.integrate.odeint. Parameters ---------- init : array-like of floats Position to begin field line tracing from in ECEF (x,y,z) km date : datetime or float Date to perform tracing on (year + day/365 + hours/24. + etc.) Accounts for leap year if datetime provided. direction : int 1 : field aligned, generally south to north. -1 : anti-field aligned, generally north to south. height : float Altitude to terminate trace, geodetic WGS84 (km) steps : array-like of ints or floats Number of steps along field line when field line trace positions should be reported. By default, each step is reported; steps=np.arange(max_steps). max_steps : float Maximum number of steps along field line that should be taken step_size : float Distance in km for each large integration step. Multiple substeps are taken as determined by scipy.integrate.odeint Returns ------- numpy array 2D array. [0,:] has the x,y,z location for initial point [:,0] is the x positions over the integration. Positions are reported in ECEF (km).
[ "Perform", "field", "line", "tracing", "using", "IGRF", "and", "scipy", ".", "integrate", ".", "odeint", ".", "Parameters", "----------", "init", ":", "array", "-", "like", "of", "floats", "Position", "to", "begin", "field", "line", "tracing", "from", "in", ...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L336-L423
rstoneback/pysatMagVect
pysatMagVect/_core.py
full_field_line
def full_field_line(init, date, height, step_size=100., max_steps=1000, steps=None, **kwargs): """Perform field line tracing using IGRF and scipy.integrate.odeint. Parameters ---------- init : array-like of floats Position to begin field line tracing from in ECEF (x,y,z) km date : datetime or float Date to perform tracing on (year + day/365 + hours/24. + etc.) Accounts for leap year if datetime provided. height : float Altitude to terminate trace, geodetic WGS84 (km) max_steps : float Maximum number of steps along field line that should be taken step_size : float Distance in km for each large integration step. Multiple substeps are taken as determined by scipy.integrate.odeint steps : array-like of ints or floats Number of steps along field line when field line trace positions should be reported. By default, each step is reported; steps=np.arange(max_steps). Two traces are made, one north, the other south, thus the output array could have double max_steps, or more via recursion. Returns ------- numpy array 2D array. [0,:] has the x,y,z location for southern footpoint [:,0] is the x positions over the integration. Positions are reported in ECEF (km). """ if steps is None: steps = np.arange(max_steps) # trace north, then south, and combine trace_south = field_line_trace(init, date, -1., height, steps=steps, step_size=step_size, max_steps=max_steps, **kwargs) trace_north = field_line_trace(init, date, 1., height, steps=steps, step_size=step_size, max_steps=max_steps, **kwargs) # order of field points is generally along the field line, south to north # don't want to include the initial point twice trace = np.vstack((trace_south[::-1][:-1,:], trace_north)) return trace
python
def full_field_line(init, date, height, step_size=100., max_steps=1000, steps=None, **kwargs): """Perform field line tracing using IGRF and scipy.integrate.odeint. Parameters ---------- init : array-like of floats Position to begin field line tracing from in ECEF (x,y,z) km date : datetime or float Date to perform tracing on (year + day/365 + hours/24. + etc.) Accounts for leap year if datetime provided. height : float Altitude to terminate trace, geodetic WGS84 (km) max_steps : float Maximum number of steps along field line that should be taken step_size : float Distance in km for each large integration step. Multiple substeps are taken as determined by scipy.integrate.odeint steps : array-like of ints or floats Number of steps along field line when field line trace positions should be reported. By default, each step is reported; steps=np.arange(max_steps). Two traces are made, one north, the other south, thus the output array could have double max_steps, or more via recursion. Returns ------- numpy array 2D array. [0,:] has the x,y,z location for southern footpoint [:,0] is the x positions over the integration. Positions are reported in ECEF (km). """ if steps is None: steps = np.arange(max_steps) # trace north, then south, and combine trace_south = field_line_trace(init, date, -1., height, steps=steps, step_size=step_size, max_steps=max_steps, **kwargs) trace_north = field_line_trace(init, date, 1., height, steps=steps, step_size=step_size, max_steps=max_steps, **kwargs) # order of field points is generally along the field line, south to north # don't want to include the initial point twice trace = np.vstack((trace_south[::-1][:-1,:], trace_north)) return trace
[ "def", "full_field_line", "(", "init", ",", "date", ",", "height", ",", "step_size", "=", "100.", ",", "max_steps", "=", "1000", ",", "steps", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "steps", "is", "None", ":", "steps", "=", "np", "."...
Perform field line tracing using IGRF and scipy.integrate.odeint. Parameters ---------- init : array-like of floats Position to begin field line tracing from in ECEF (x,y,z) km date : datetime or float Date to perform tracing on (year + day/365 + hours/24. + etc.) Accounts for leap year if datetime provided. height : float Altitude to terminate trace, geodetic WGS84 (km) max_steps : float Maximum number of steps along field line that should be taken step_size : float Distance in km for each large integration step. Multiple substeps are taken as determined by scipy.integrate.odeint steps : array-like of ints or floats Number of steps along field line when field line trace positions should be reported. By default, each step is reported; steps=np.arange(max_steps). Two traces are made, one north, the other south, thus the output array could have double max_steps, or more via recursion. Returns ------- numpy array 2D array. [0,:] has the x,y,z location for southern footpoint [:,0] is the x positions over the integration. Positions are reported in ECEF (km).
[ "Perform", "field", "line", "tracing", "using", "IGRF", "and", "scipy", ".", "integrate", ".", "odeint", ".", "Parameters", "----------", "init", ":", "array", "-", "like", "of", "floats", "Position", "to", "begin", "field", "line", "tracing", "from", "in", ...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L426-L476
rstoneback/pysatMagVect
pysatMagVect/_core.py
calculate_mag_drift_unit_vectors_ecef
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes, steps=None, max_steps=1000, step_size=100., ref_height=120., filter_zonal=True): """Calculates unit vectors expressing the ion drift coordinate system organized by the geomagnetic field. Unit vectors are expressed in ECEF coordinates. Note ---- The zonal vector is calculated by field-line tracing from the input locations toward the footpoint locations at ref_height. The cross product of these two vectors is taken to define the plane of the magnetic field. This vector is not always orthogonal with the local field-aligned vector (IGRF), thus any component of the zonal vector with the field-aligned direction is removed (optional). The meridional unit vector is defined via the cross product of the zonal and field-aligned directions. Parameters ---------- latitude : array-like of floats (degrees) Latitude of location, degrees, WGS84 longitude : array-like of floats (degrees) Longitude of location, degrees, WGS84 altitude : array-like of floats (km) Altitude of location, height above surface, WGS84 datetimes : array-like of datetimes Time to calculate vectors max_steps : int Maximum number of steps allowed for field line tracing step_size : float Maximum step size (km) allowed when field line tracing ref_height : float Altitude used as cutoff for labeling a field line location a footpoint filter_zonal : bool If True, removes any field aligned component from the calculated zonal unit vector. Resulting coordinate system is not-orthogonal. Returns ------- zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z """ if steps is None: steps = np.arange(max_steps) # calculate satellite position in ECEF coordinates ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude) # also get position in geocentric coordinates geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z, ref_height=0.) # filter longitudes (could use pysat's function here) idx, = np.where(geo_long < 0) geo_long[idx] = geo_long[idx] + 360. # prepare output lists north_x = []; north_y = []; north_z = [] south_x = []; south_y = []; south_z = [] bn = []; be = []; bd = [] for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z, geo_alt, np.deg2rad(90. - geo_lat), np.deg2rad(geo_long), datetimes): init = np.array([x, y, z]) # date = inst.yr + inst.doy / 366. # trace = full_field_line(init, time, ref_height, step_size=step_size, # max_steps=max_steps, # steps=steps) trace_north = field_line_trace(init, time, 1., ref_height, steps=steps, step_size=step_size, max_steps=max_steps) trace_south = field_line_trace(init, time, -1., ref_height, steps=steps, step_size=step_size, max_steps=max_steps) # store final location, full trace goes south to north trace_north = trace_north[-1, :] trace_south = trace_south[-1, :] # magnetic field at spacecraft location, using geocentric inputs # to get magnetic field in geocentric output # recast from datetime to float, as required by IGRF12 code doy = (time - datetime.datetime(time.year,1,1)).days # number of days in year, works for leap years num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24. # get IGRF field components # tbn, tbe, tbd, tbmag are in nT tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong) # collect outputs south_x.append(trace_south[0]) south_y.append(trace_south[1]) south_z.append(trace_south[2]) north_x.append(trace_north[0]) north_y.append(trace_north[1]) north_z.append(trace_north[2]) bn.append(tbn); be.append(tbe); bd.append(tbd) north_x = np.array(north_x) north_y = np.array(north_y) north_z = np.array(north_z) south_x = np.array(south_x) south_y = np.array(south_y) south_z = np.array(south_z) bn = np.array(bn) be = np.array(be) bd = np.array(bd) # calculate vector from satellite to northern/southern footpoints north_x = north_x - ecef_x north_y = north_y - ecef_y north_z = north_z - ecef_z north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z) south_x = south_x - ecef_x south_y = south_y - ecef_y south_z = south_z - ecef_z south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z) # calculate magnetic unit vector bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long) bx, by, bz = normalize_vector(bx, by, bz) # take cross product of southward and northward vectors to get the zonal vector zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z, north_x, north_y, north_z) # getting zonal vector utilizing magnetic field vector instead zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z, bx, by, bz) # getting zonal vector utilizing magnetic field vector instead and southern point zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z, bx, by, bz) # normalize the vectors norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2) # calculate zonal vector zvx = zvx_foot / norm_foot zvy = zvy_foot / norm_foot zvz = zvz_foot / norm_foot # remove any field aligned component to the zonal vector dot_fa = zvx * bx + zvy * by + zvz * bz zvx -= dot_fa * bx zvy -= dot_fa * by zvz -= dot_fa * bz zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz) # compute meridional vector # cross product of zonal and magnetic unit vector mx, my, mz = cross_product(zvx, zvy, zvz, bx, by, bz) # add unit vectors for magnetic drifts in ecef coordinates return zvx, zvy, zvz, bx, by, bz, mx, my, mz
python
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes, steps=None, max_steps=1000, step_size=100., ref_height=120., filter_zonal=True): """Calculates unit vectors expressing the ion drift coordinate system organized by the geomagnetic field. Unit vectors are expressed in ECEF coordinates. Note ---- The zonal vector is calculated by field-line tracing from the input locations toward the footpoint locations at ref_height. The cross product of these two vectors is taken to define the plane of the magnetic field. This vector is not always orthogonal with the local field-aligned vector (IGRF), thus any component of the zonal vector with the field-aligned direction is removed (optional). The meridional unit vector is defined via the cross product of the zonal and field-aligned directions. Parameters ---------- latitude : array-like of floats (degrees) Latitude of location, degrees, WGS84 longitude : array-like of floats (degrees) Longitude of location, degrees, WGS84 altitude : array-like of floats (km) Altitude of location, height above surface, WGS84 datetimes : array-like of datetimes Time to calculate vectors max_steps : int Maximum number of steps allowed for field line tracing step_size : float Maximum step size (km) allowed when field line tracing ref_height : float Altitude used as cutoff for labeling a field line location a footpoint filter_zonal : bool If True, removes any field aligned component from the calculated zonal unit vector. Resulting coordinate system is not-orthogonal. Returns ------- zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z """ if steps is None: steps = np.arange(max_steps) # calculate satellite position in ECEF coordinates ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude) # also get position in geocentric coordinates geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z, ref_height=0.) # filter longitudes (could use pysat's function here) idx, = np.where(geo_long < 0) geo_long[idx] = geo_long[idx] + 360. # prepare output lists north_x = []; north_y = []; north_z = [] south_x = []; south_y = []; south_z = [] bn = []; be = []; bd = [] for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z, geo_alt, np.deg2rad(90. - geo_lat), np.deg2rad(geo_long), datetimes): init = np.array([x, y, z]) # date = inst.yr + inst.doy / 366. # trace = full_field_line(init, time, ref_height, step_size=step_size, # max_steps=max_steps, # steps=steps) trace_north = field_line_trace(init, time, 1., ref_height, steps=steps, step_size=step_size, max_steps=max_steps) trace_south = field_line_trace(init, time, -1., ref_height, steps=steps, step_size=step_size, max_steps=max_steps) # store final location, full trace goes south to north trace_north = trace_north[-1, :] trace_south = trace_south[-1, :] # magnetic field at spacecraft location, using geocentric inputs # to get magnetic field in geocentric output # recast from datetime to float, as required by IGRF12 code doy = (time - datetime.datetime(time.year,1,1)).days # number of days in year, works for leap years num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24. # get IGRF field components # tbn, tbe, tbd, tbmag are in nT tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong) # collect outputs south_x.append(trace_south[0]) south_y.append(trace_south[1]) south_z.append(trace_south[2]) north_x.append(trace_north[0]) north_y.append(trace_north[1]) north_z.append(trace_north[2]) bn.append(tbn); be.append(tbe); bd.append(tbd) north_x = np.array(north_x) north_y = np.array(north_y) north_z = np.array(north_z) south_x = np.array(south_x) south_y = np.array(south_y) south_z = np.array(south_z) bn = np.array(bn) be = np.array(be) bd = np.array(bd) # calculate vector from satellite to northern/southern footpoints north_x = north_x - ecef_x north_y = north_y - ecef_y north_z = north_z - ecef_z north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z) south_x = south_x - ecef_x south_y = south_y - ecef_y south_z = south_z - ecef_z south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z) # calculate magnetic unit vector bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long) bx, by, bz = normalize_vector(bx, by, bz) # take cross product of southward and northward vectors to get the zonal vector zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z, north_x, north_y, north_z) # getting zonal vector utilizing magnetic field vector instead zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z, bx, by, bz) # getting zonal vector utilizing magnetic field vector instead and southern point zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z, bx, by, bz) # normalize the vectors norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2) # calculate zonal vector zvx = zvx_foot / norm_foot zvy = zvy_foot / norm_foot zvz = zvz_foot / norm_foot # remove any field aligned component to the zonal vector dot_fa = zvx * bx + zvy * by + zvz * bz zvx -= dot_fa * bx zvy -= dot_fa * by zvz -= dot_fa * bz zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz) # compute meridional vector # cross product of zonal and magnetic unit vector mx, my, mz = cross_product(zvx, zvy, zvz, bx, by, bz) # add unit vectors for magnetic drifts in ecef coordinates return zvx, zvy, zvz, bx, by, bz, mx, my, mz
[ "def", "calculate_mag_drift_unit_vectors_ecef", "(", "latitude", ",", "longitude", ",", "altitude", ",", "datetimes", ",", "steps", "=", "None", ",", "max_steps", "=", "1000", ",", "step_size", "=", "100.", ",", "ref_height", "=", "120.", ",", "filter_zonal", ...
Calculates unit vectors expressing the ion drift coordinate system organized by the geomagnetic field. Unit vectors are expressed in ECEF coordinates. Note ---- The zonal vector is calculated by field-line tracing from the input locations toward the footpoint locations at ref_height. The cross product of these two vectors is taken to define the plane of the magnetic field. This vector is not always orthogonal with the local field-aligned vector (IGRF), thus any component of the zonal vector with the field-aligned direction is removed (optional). The meridional unit vector is defined via the cross product of the zonal and field-aligned directions. Parameters ---------- latitude : array-like of floats (degrees) Latitude of location, degrees, WGS84 longitude : array-like of floats (degrees) Longitude of location, degrees, WGS84 altitude : array-like of floats (km) Altitude of location, height above surface, WGS84 datetimes : array-like of datetimes Time to calculate vectors max_steps : int Maximum number of steps allowed for field line tracing step_size : float Maximum step size (km) allowed when field line tracing ref_height : float Altitude used as cutoff for labeling a field line location a footpoint filter_zonal : bool If True, removes any field aligned component from the calculated zonal unit vector. Resulting coordinate system is not-orthogonal. Returns ------- zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
[ "Calculates", "unit", "vectors", "expressing", "the", "ion", "drift", "coordinate", "system", "organized", "by", "the", "geomagnetic", "field", ".", "Unit", "vectors", "are", "expressed", "in", "ECEF", "coordinates", ".", "Note", "----", "The", "zonal", "vector"...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L479-L631
rstoneback/pysatMagVect
pysatMagVect/_core.py
step_until_intersect
def step_until_intersect(pos, field_line, sign, time, direction=None, step_size_goal=5., field_step_size=None): """Starting at pos, method steps along magnetic unit vector direction towards the supplied field line trace. Determines the distance of closest approach to field line. Routine is used when calculting the mapping of electric fields along magnetic field lines. Voltage remains constant along the field but the distance between field lines does not.This routine may be used to form the last leg when trying to trace out a closed field line loop. Routine will create a high resolution field line trace (.01 km step size) near the location of closest approach to better determine where the intersection occurs. Parameters ---------- pos : array-like X, Y, and Z ECEF locations to start from field_line : array-like (:,3) X, Y, and Z ECEF locations of field line trace, produced by the field_line_trace method. sign : int if 1, move along positive unit vector. Negwtive direction for -1. time : datetime or float Date to perform tracing on (year + day/365 + hours/24. + etc.) Accounts for leap year if datetime provided. direction : string ('meridional', 'zonal', or 'aligned') Which unit vector direction to move slong when trying to intersect with supplied field line trace. See step_along_mag_unit_vector method for more. step_size_goal : float step size goal that method will try to match when stepping towards field line. Returns ------- (float, array, float) Total distance taken along vector direction; the position after taking the step [x, y, z] in ECEF; distance of closest approach from input pos towards the input field line trace. """ # work on a copy, probably not needed field_copy = field_line # set a high last minimum distance to ensure first loop does better than this last_min_dist = 2500000. # scalar is the distance along unit vector line that we are taking scalar = 0. # repeat boolean repeat=True # first run boolean first=True # factor is a divisor applied to the remaining distance between point and field line # I slowly take steps towards the field line and I don't want to overshoot # each time my minimum distance increases, I step back, increase factor, reducing # my next step size, then I try again factor = 1 while repeat: # take a total step along magnetic unit vector # try to take steps near user provided step_size_goal unit_steps = np.abs(scalar//step_size_goal) if unit_steps == 0: unit_steps = 1 # print (unit_steps, scalar/unit_steps) pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time, direction=direction, num_steps=unit_steps, step_size=np.abs(scalar)/unit_steps, scalar=sign) # find closest point along field line trace diff = field_copy - pos_step diff_mag = np.sqrt((diff ** 2).sum(axis=1)) min_idx = np.argmin(diff_mag) if first: # first time in while loop, create some information # make a high resolution field line trace around closest distance # want to take a field step size in each direction # maintain accuracy of high res trace below to be .01 km init = field_copy[min_idx,:] field_copy = full_field_line(init, time, 0., step_size=0.01, max_steps=int(field_step_size/.01), recurse=False) # difference with position diff = field_copy - pos_step diff_mag = np.sqrt((diff ** 2).sum(axis=1)) # find closest one min_idx = np.argmin(diff_mag) # # reduce number of elements we really need to check # field_copy = field_copy[min_idx-100:min_idx+100] # # difference with position # diff = field_copy - pos_step # diff_mag = np.sqrt((diff ** 2).sum(axis=1)) # # find closest one # min_idx = np.argmin(diff_mag) first = False # pull out distance of closest point min_dist = diff_mag[min_idx] # check how the solution is doing # if well, add more distance to the total step and recheck if closer # if worse, step back and try a smaller step if min_dist > last_min_dist: # last step we took made the solution worse if factor > 4: # we've tried enough, stop looping repeat = False # undo increment to last total distance scalar = scalar - last_min_dist/(2*factor) # calculate latest position pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time, direction=direction, num_steps=unit_steps, step_size=np.abs(scalar)/unit_steps, scalar=sign) else: # undo increment to last total distance scalar = scalar - last_min_dist/(2*factor) # increase the divisor used to reduce the distance # actually stepped per increment factor = factor + 1. # try a new increment to total distance scalar = scalar + last_min_dist/(2*factor) else: # we did better, move even closer, a fraction of remaining distance # increment scalar, but only by a fraction scalar = scalar + min_dist/(2*factor) # we have a new standard to judge against, set it last_min_dist = min_dist.copy() # return magnitude of step return scalar, pos_step, min_dist
python
def step_until_intersect(pos, field_line, sign, time, direction=None, step_size_goal=5., field_step_size=None): """Starting at pos, method steps along magnetic unit vector direction towards the supplied field line trace. Determines the distance of closest approach to field line. Routine is used when calculting the mapping of electric fields along magnetic field lines. Voltage remains constant along the field but the distance between field lines does not.This routine may be used to form the last leg when trying to trace out a closed field line loop. Routine will create a high resolution field line trace (.01 km step size) near the location of closest approach to better determine where the intersection occurs. Parameters ---------- pos : array-like X, Y, and Z ECEF locations to start from field_line : array-like (:,3) X, Y, and Z ECEF locations of field line trace, produced by the field_line_trace method. sign : int if 1, move along positive unit vector. Negwtive direction for -1. time : datetime or float Date to perform tracing on (year + day/365 + hours/24. + etc.) Accounts for leap year if datetime provided. direction : string ('meridional', 'zonal', or 'aligned') Which unit vector direction to move slong when trying to intersect with supplied field line trace. See step_along_mag_unit_vector method for more. step_size_goal : float step size goal that method will try to match when stepping towards field line. Returns ------- (float, array, float) Total distance taken along vector direction; the position after taking the step [x, y, z] in ECEF; distance of closest approach from input pos towards the input field line trace. """ # work on a copy, probably not needed field_copy = field_line # set a high last minimum distance to ensure first loop does better than this last_min_dist = 2500000. # scalar is the distance along unit vector line that we are taking scalar = 0. # repeat boolean repeat=True # first run boolean first=True # factor is a divisor applied to the remaining distance between point and field line # I slowly take steps towards the field line and I don't want to overshoot # each time my minimum distance increases, I step back, increase factor, reducing # my next step size, then I try again factor = 1 while repeat: # take a total step along magnetic unit vector # try to take steps near user provided step_size_goal unit_steps = np.abs(scalar//step_size_goal) if unit_steps == 0: unit_steps = 1 # print (unit_steps, scalar/unit_steps) pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time, direction=direction, num_steps=unit_steps, step_size=np.abs(scalar)/unit_steps, scalar=sign) # find closest point along field line trace diff = field_copy - pos_step diff_mag = np.sqrt((diff ** 2).sum(axis=1)) min_idx = np.argmin(diff_mag) if first: # first time in while loop, create some information # make a high resolution field line trace around closest distance # want to take a field step size in each direction # maintain accuracy of high res trace below to be .01 km init = field_copy[min_idx,:] field_copy = full_field_line(init, time, 0., step_size=0.01, max_steps=int(field_step_size/.01), recurse=False) # difference with position diff = field_copy - pos_step diff_mag = np.sqrt((diff ** 2).sum(axis=1)) # find closest one min_idx = np.argmin(diff_mag) # # reduce number of elements we really need to check # field_copy = field_copy[min_idx-100:min_idx+100] # # difference with position # diff = field_copy - pos_step # diff_mag = np.sqrt((diff ** 2).sum(axis=1)) # # find closest one # min_idx = np.argmin(diff_mag) first = False # pull out distance of closest point min_dist = diff_mag[min_idx] # check how the solution is doing # if well, add more distance to the total step and recheck if closer # if worse, step back and try a smaller step if min_dist > last_min_dist: # last step we took made the solution worse if factor > 4: # we've tried enough, stop looping repeat = False # undo increment to last total distance scalar = scalar - last_min_dist/(2*factor) # calculate latest position pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time, direction=direction, num_steps=unit_steps, step_size=np.abs(scalar)/unit_steps, scalar=sign) else: # undo increment to last total distance scalar = scalar - last_min_dist/(2*factor) # increase the divisor used to reduce the distance # actually stepped per increment factor = factor + 1. # try a new increment to total distance scalar = scalar + last_min_dist/(2*factor) else: # we did better, move even closer, a fraction of remaining distance # increment scalar, but only by a fraction scalar = scalar + min_dist/(2*factor) # we have a new standard to judge against, set it last_min_dist = min_dist.copy() # return magnitude of step return scalar, pos_step, min_dist
[ "def", "step_until_intersect", "(", "pos", ",", "field_line", ",", "sign", ",", "time", ",", "direction", "=", "None", ",", "step_size_goal", "=", "5.", ",", "field_step_size", "=", "None", ")", ":", "# work on a copy, probably not needed", "field_copy", "=", "f...
Starting at pos, method steps along magnetic unit vector direction towards the supplied field line trace. Determines the distance of closest approach to field line. Routine is used when calculting the mapping of electric fields along magnetic field lines. Voltage remains constant along the field but the distance between field lines does not.This routine may be used to form the last leg when trying to trace out a closed field line loop. Routine will create a high resolution field line trace (.01 km step size) near the location of closest approach to better determine where the intersection occurs. Parameters ---------- pos : array-like X, Y, and Z ECEF locations to start from field_line : array-like (:,3) X, Y, and Z ECEF locations of field line trace, produced by the field_line_trace method. sign : int if 1, move along positive unit vector. Negwtive direction for -1. time : datetime or float Date to perform tracing on (year + day/365 + hours/24. + etc.) Accounts for leap year if datetime provided. direction : string ('meridional', 'zonal', or 'aligned') Which unit vector direction to move slong when trying to intersect with supplied field line trace. See step_along_mag_unit_vector method for more. step_size_goal : float step size goal that method will try to match when stepping towards field line. Returns ------- (float, array, float) Total distance taken along vector direction; the position after taking the step [x, y, z] in ECEF; distance of closest approach from input pos towards the input field line trace.
[ "Starting", "at", "pos", "method", "steps", "along", "magnetic", "unit", "vector", "direction", "towards", "the", "supplied", "field", "line", "trace", ".", "Determines", "the", "distance", "of", "closest", "approach", "to", "field", "line", ".", "Routine", "i...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L634-L769
rstoneback/pysatMagVect
pysatMagVect/_core.py
step_along_mag_unit_vector
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5., step_size=5., scalar=1): """ Move along 'lines' formed by following the magnetic unit vector directions. Moving along the field is effectively the same as a field line trace though extended movement along a field should use the specific field_line_trace method. Parameters ---------- x : ECEF-x (km) Location to step from in ECEF (km). Scalar input. y : ECEF-y (km) Location to step from in ECEF (km). Scalar input. z : ECEF-z (km) Location to step from in ECEF (km). Scalar input. date : list-like of datetimes Date and time for magnetic field direction : string String identifier for which unit vector directino to move along. Supported inputs, 'meridional', 'zonal', 'aligned' num_steps : int Number of steps to take along unit vector direction step_size = float Distance taken for each step (km) scalar : int Scalar modifier for step size distance. Input a -1 to move along negative unit vector direction. Returns ------- np.array [x, y, z] of ECEF location after taking num_steps along direction, each step_size long. """ # set parameters for the field line tracing routines field_step_size = 100. field_max_steps = 1000 field_steps = np.arange(field_max_steps) for i in np.arange(num_steps): # x, y, z in ECEF # convert to geodetic lat, lon, alt = ecef_to_geodetic(x, y, z) # get unit vector directions zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef( [lat], [lon], [alt], [date], steps=field_steps, max_steps=field_max_steps, step_size=field_step_size, ref_height=0.) # pull out the direction we need if direction == 'meridional': ux, uy, uz = mx, my, mz elif direction == 'zonal': ux, uy, uz = zvx, zvy, zvz elif direction == 'aligned': ux, uy, uz = bx, by, bz # take steps along direction x = x + step_size*ux[0]*scalar y = y + step_size*uy[0]*scalar z = z + step_size*uz[0]*scalar return np.array([x, y, z])
python
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5., step_size=5., scalar=1): """ Move along 'lines' formed by following the magnetic unit vector directions. Moving along the field is effectively the same as a field line trace though extended movement along a field should use the specific field_line_trace method. Parameters ---------- x : ECEF-x (km) Location to step from in ECEF (km). Scalar input. y : ECEF-y (km) Location to step from in ECEF (km). Scalar input. z : ECEF-z (km) Location to step from in ECEF (km). Scalar input. date : list-like of datetimes Date and time for magnetic field direction : string String identifier for which unit vector directino to move along. Supported inputs, 'meridional', 'zonal', 'aligned' num_steps : int Number of steps to take along unit vector direction step_size = float Distance taken for each step (km) scalar : int Scalar modifier for step size distance. Input a -1 to move along negative unit vector direction. Returns ------- np.array [x, y, z] of ECEF location after taking num_steps along direction, each step_size long. """ # set parameters for the field line tracing routines field_step_size = 100. field_max_steps = 1000 field_steps = np.arange(field_max_steps) for i in np.arange(num_steps): # x, y, z in ECEF # convert to geodetic lat, lon, alt = ecef_to_geodetic(x, y, z) # get unit vector directions zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef( [lat], [lon], [alt], [date], steps=field_steps, max_steps=field_max_steps, step_size=field_step_size, ref_height=0.) # pull out the direction we need if direction == 'meridional': ux, uy, uz = mx, my, mz elif direction == 'zonal': ux, uy, uz = zvx, zvy, zvz elif direction == 'aligned': ux, uy, uz = bx, by, bz # take steps along direction x = x + step_size*ux[0]*scalar y = y + step_size*uy[0]*scalar z = z + step_size*uz[0]*scalar return np.array([x, y, z])
[ "def", "step_along_mag_unit_vector", "(", "x", ",", "y", ",", "z", ",", "date", ",", "direction", "=", "None", ",", "num_steps", "=", "5.", ",", "step_size", "=", "5.", ",", "scalar", "=", "1", ")", ":", "# set parameters for the field line tracing routines", ...
Move along 'lines' formed by following the magnetic unit vector directions. Moving along the field is effectively the same as a field line trace though extended movement along a field should use the specific field_line_trace method. Parameters ---------- x : ECEF-x (km) Location to step from in ECEF (km). Scalar input. y : ECEF-y (km) Location to step from in ECEF (km). Scalar input. z : ECEF-z (km) Location to step from in ECEF (km). Scalar input. date : list-like of datetimes Date and time for magnetic field direction : string String identifier for which unit vector directino to move along. Supported inputs, 'meridional', 'zonal', 'aligned' num_steps : int Number of steps to take along unit vector direction step_size = float Distance taken for each step (km) scalar : int Scalar modifier for step size distance. Input a -1 to move along negative unit vector direction. Returns ------- np.array [x, y, z] of ECEF location after taking num_steps along direction, each step_size long.
[ "Move", "along", "lines", "formed", "by", "following", "the", "magnetic", "unit", "vector", "directions", "." ]
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L772-L841
rstoneback/pysatMagVect
pysatMagVect/_core.py
apex_location_info
def apex_location_info(glats, glons, alts, dates): """Determine apex location for the field line passing through input point. Employs a two stage method. A broad step (100 km) field line trace spanning Northern/Southern footpoints is used to find the location with the largest geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to get a better fix on this location. Greatest geodetic height is once again selected. Parameters ---------- glats : list-like of floats (degrees) Geodetic (WGS84) latitude glons : list-like of floats (degrees) Geodetic (WGS84) longitude alts : list-like of floats (km) Geodetic (WGS84) altitude, height above surface dates : list-like of datetimes Date and time for determination of scalars Returns ------- (float, float, float, float, float, float) ECEF X (km), ECEF Y (km), ECEF Z (km), Geodetic Latitude (degrees), Geodetic Longitude (degrees), Geodetic Altitude (km) """ # use input location and convert to ECEF ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts) # prepare parameters for field line trace step_size = 100. max_steps = 1000 steps = np.arange(max_steps) # high resolution trace parameters fine_step_size = .01 fine_max_steps = int(step_size/fine_step_size)+10 fine_steps = np.arange(fine_max_steps) # prepare output out_x = [] out_y = [] out_z = [] for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs, glats, glons, alts, dates): # to get the apex location we need to do a field line trace # then find the highest point trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0., steps=steps, step_size=step_size, max_steps=max_steps) # convert all locations to geodetic coordinates tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2]) # determine location that is highest with respect to the geodetic Earth max_idx = np.argmax(talt) # repeat using a high resolution trace one big step size each # direction around identified max # recurse False ensures only max_steps are taken trace = full_field_line(trace[max_idx,:], date, 0., steps=fine_steps, step_size=fine_step_size, max_steps=fine_max_steps, recurse=False) # convert all locations to geodetic coordinates tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2]) # determine location that is highest with respect to the geodetic Earth max_idx = np.argmax(talt) # collect outputs out_x.append(trace[max_idx,0]) out_y.append(trace[max_idx,1]) out_z.append(trace[max_idx,2]) out_x = np.array(out_x) out_y = np.array(out_y) out_z = np.array(out_z) glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z) return out_x, out_y, out_z, glat, glon, alt
python
def apex_location_info(glats, glons, alts, dates): """Determine apex location for the field line passing through input point. Employs a two stage method. A broad step (100 km) field line trace spanning Northern/Southern footpoints is used to find the location with the largest geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to get a better fix on this location. Greatest geodetic height is once again selected. Parameters ---------- glats : list-like of floats (degrees) Geodetic (WGS84) latitude glons : list-like of floats (degrees) Geodetic (WGS84) longitude alts : list-like of floats (km) Geodetic (WGS84) altitude, height above surface dates : list-like of datetimes Date and time for determination of scalars Returns ------- (float, float, float, float, float, float) ECEF X (km), ECEF Y (km), ECEF Z (km), Geodetic Latitude (degrees), Geodetic Longitude (degrees), Geodetic Altitude (km) """ # use input location and convert to ECEF ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts) # prepare parameters for field line trace step_size = 100. max_steps = 1000 steps = np.arange(max_steps) # high resolution trace parameters fine_step_size = .01 fine_max_steps = int(step_size/fine_step_size)+10 fine_steps = np.arange(fine_max_steps) # prepare output out_x = [] out_y = [] out_z = [] for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs, glats, glons, alts, dates): # to get the apex location we need to do a field line trace # then find the highest point trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0., steps=steps, step_size=step_size, max_steps=max_steps) # convert all locations to geodetic coordinates tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2]) # determine location that is highest with respect to the geodetic Earth max_idx = np.argmax(talt) # repeat using a high resolution trace one big step size each # direction around identified max # recurse False ensures only max_steps are taken trace = full_field_line(trace[max_idx,:], date, 0., steps=fine_steps, step_size=fine_step_size, max_steps=fine_max_steps, recurse=False) # convert all locations to geodetic coordinates tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2]) # determine location that is highest with respect to the geodetic Earth max_idx = np.argmax(talt) # collect outputs out_x.append(trace[max_idx,0]) out_y.append(trace[max_idx,1]) out_z.append(trace[max_idx,2]) out_x = np.array(out_x) out_y = np.array(out_y) out_z = np.array(out_z) glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z) return out_x, out_y, out_z, glat, glon, alt
[ "def", "apex_location_info", "(", "glats", ",", "glons", ",", "alts", ",", "dates", ")", ":", "# use input location and convert to ECEF", "ecef_xs", ",", "ecef_ys", ",", "ecef_zs", "=", "geodetic_to_ecef", "(", "glats", ",", "glons", ",", "alts", ")", "# prepare...
Determine apex location for the field line passing through input point. Employs a two stage method. A broad step (100 km) field line trace spanning Northern/Southern footpoints is used to find the location with the largest geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to get a better fix on this location. Greatest geodetic height is once again selected. Parameters ---------- glats : list-like of floats (degrees) Geodetic (WGS84) latitude glons : list-like of floats (degrees) Geodetic (WGS84) longitude alts : list-like of floats (km) Geodetic (WGS84) altitude, height above surface dates : list-like of datetimes Date and time for determination of scalars Returns ------- (float, float, float, float, float, float) ECEF X (km), ECEF Y (km), ECEF Z (km), Geodetic Latitude (degrees), Geodetic Longitude (degrees), Geodetic Altitude (km)
[ "Determine", "apex", "location", "for", "the", "field", "line", "passing", "through", "input", "point", ".", "Employs", "a", "two", "stage", "method", ".", "A", "broad", "step", "(", "100", "km", ")", "field", "line", "trace", "spanning", "Northern", "/", ...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L844-L924
rstoneback/pysatMagVect
pysatMagVect/_core.py
closed_loop_edge_lengths_via_footpoint
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction, vector_direction, step_size=None, max_steps=None, edge_length=25., edge_steps=5): """ Forms closed loop integration along mag field, satrting at input points and goes through footpoint. At footpoint, steps along vector direction in both positive and negative directions, then traces back to opposite footpoint. Back at input location, steps toward those new field lines (edge_length) along vector direction until hitting distance of minimum approach. Loops don't always close. Returns total edge distance that goes through input location, along with the distances of closest approach. Note ---- vector direction refers to the magnetic unit vector direction Parameters ---------- glats : list-like of floats (degrees) Geodetic (WGS84) latitude glons : list-like of floats (degrees) Geodetic (WGS84) longitude alts : list-like of floats (km) Geodetic (WGS84) altitude, height above surface dates : list-like of datetimes Date and time for determination of scalars direction : string 'north' or 'south' for tracing through northern or southern footpoint locations vector_direction : string 'meridional' or 'zonal' unit vector directions step_size : float (km) Step size (km) used for field line integration max_steps : int Number of steps taken for field line integration edge_length : float (km) Half of total edge length (step) taken at footpoint location. edge_length step in both positive and negative directions. edge_steps : int Number of steps taken from footpoint towards new field line in a given direction (positive/negative) along unit vector Returns ------- np.array, np.array, np.array A closed loop field line path through input location and footpoint in northern/southern hemisphere and back is taken. The return edge length through input location is provided. The distances of closest approach for the positive step along vector direction, and the negative step are returned. """ if step_size is None: step_size = 100. if max_steps is None: max_steps = 1000 steps = np.arange(max_steps) if direction == 'south': direct = -1 elif direction == 'north': direct = 1 # use spacecraft location to get ECEF ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts) # prepare output full_local_step = [] min_distance_plus = [] min_distance_minus = [] for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs, glats, glons, alts, dates): # going to try and form close loops via field line integration # start at location of interest, map down to northern or southern # footpoints then take symmetric steps along meridional and zonal # directions and trace back from location of interest, step along # field line directions until we intersect or hit the distance of # closest approach to the return field line with the known # distances of footpoint steps, and the closet approach distance # we can determine the scalar mapping of one location to another yr, doy = pysat.utils.getyrdoy(date) double_date = float(yr) + float(doy) / 366. # print (glat, glon, alt) # trace to footpoint, starting with input location sc_root = np.array([ecef_x, ecef_y, ecef_z]) trace = field_line_trace(sc_root, double_date, direct, 120., steps=steps, step_size=step_size, max_steps=max_steps) # pull out footpoint location ftpnt = trace[-1, :] ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt) # take step from footpoint along + vector direction plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2], date, direction=vector_direction, num_steps=edge_steps, step_size=edge_length/edge_steps) # trace this back to other footpoint other_plus = field_line_trace(plus_step, double_date, -direct, 0., steps=steps, step_size=step_size, max_steps=max_steps) # take half step from first footpoint along - vector direction minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2], date, direction=vector_direction, scalar=-1, num_steps=edge_steps, step_size=edge_length/edge_steps) # trace this back to other footpoint other_minus = field_line_trace(minus_step, double_date, -direct, 0., steps=steps, step_size=step_size, max_steps=max_steps) # need to determine where the intersection of field line coming back from # footpoint through postive vector direction step and back # in relation to the vector direction from the s/c location. pos_edge_length, _, mind_pos = step_until_intersect(sc_root, other_plus, 1, date, direction=vector_direction, field_step_size=step_size, step_size_goal=edge_length/edge_steps) # take half step from S/C along - vector direction minus_edge_length, _, mind_minus = step_until_intersect(sc_root, other_minus, -1, date, direction=vector_direction, field_step_size=step_size, step_size_goal=edge_length/edge_steps) # collect outputs full_local_step.append(pos_edge_length + minus_edge_length) min_distance_plus.append(mind_pos) min_distance_minus.append(mind_minus) return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
python
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction, vector_direction, step_size=None, max_steps=None, edge_length=25., edge_steps=5): """ Forms closed loop integration along mag field, satrting at input points and goes through footpoint. At footpoint, steps along vector direction in both positive and negative directions, then traces back to opposite footpoint. Back at input location, steps toward those new field lines (edge_length) along vector direction until hitting distance of minimum approach. Loops don't always close. Returns total edge distance that goes through input location, along with the distances of closest approach. Note ---- vector direction refers to the magnetic unit vector direction Parameters ---------- glats : list-like of floats (degrees) Geodetic (WGS84) latitude glons : list-like of floats (degrees) Geodetic (WGS84) longitude alts : list-like of floats (km) Geodetic (WGS84) altitude, height above surface dates : list-like of datetimes Date and time for determination of scalars direction : string 'north' or 'south' for tracing through northern or southern footpoint locations vector_direction : string 'meridional' or 'zonal' unit vector directions step_size : float (km) Step size (km) used for field line integration max_steps : int Number of steps taken for field line integration edge_length : float (km) Half of total edge length (step) taken at footpoint location. edge_length step in both positive and negative directions. edge_steps : int Number of steps taken from footpoint towards new field line in a given direction (positive/negative) along unit vector Returns ------- np.array, np.array, np.array A closed loop field line path through input location and footpoint in northern/southern hemisphere and back is taken. The return edge length through input location is provided. The distances of closest approach for the positive step along vector direction, and the negative step are returned. """ if step_size is None: step_size = 100. if max_steps is None: max_steps = 1000 steps = np.arange(max_steps) if direction == 'south': direct = -1 elif direction == 'north': direct = 1 # use spacecraft location to get ECEF ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts) # prepare output full_local_step = [] min_distance_plus = [] min_distance_minus = [] for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs, glats, glons, alts, dates): # going to try and form close loops via field line integration # start at location of interest, map down to northern or southern # footpoints then take symmetric steps along meridional and zonal # directions and trace back from location of interest, step along # field line directions until we intersect or hit the distance of # closest approach to the return field line with the known # distances of footpoint steps, and the closet approach distance # we can determine the scalar mapping of one location to another yr, doy = pysat.utils.getyrdoy(date) double_date = float(yr) + float(doy) / 366. # print (glat, glon, alt) # trace to footpoint, starting with input location sc_root = np.array([ecef_x, ecef_y, ecef_z]) trace = field_line_trace(sc_root, double_date, direct, 120., steps=steps, step_size=step_size, max_steps=max_steps) # pull out footpoint location ftpnt = trace[-1, :] ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt) # take step from footpoint along + vector direction plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2], date, direction=vector_direction, num_steps=edge_steps, step_size=edge_length/edge_steps) # trace this back to other footpoint other_plus = field_line_trace(plus_step, double_date, -direct, 0., steps=steps, step_size=step_size, max_steps=max_steps) # take half step from first footpoint along - vector direction minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2], date, direction=vector_direction, scalar=-1, num_steps=edge_steps, step_size=edge_length/edge_steps) # trace this back to other footpoint other_minus = field_line_trace(minus_step, double_date, -direct, 0., steps=steps, step_size=step_size, max_steps=max_steps) # need to determine where the intersection of field line coming back from # footpoint through postive vector direction step and back # in relation to the vector direction from the s/c location. pos_edge_length, _, mind_pos = step_until_intersect(sc_root, other_plus, 1, date, direction=vector_direction, field_step_size=step_size, step_size_goal=edge_length/edge_steps) # take half step from S/C along - vector direction minus_edge_length, _, mind_minus = step_until_intersect(sc_root, other_minus, -1, date, direction=vector_direction, field_step_size=step_size, step_size_goal=edge_length/edge_steps) # collect outputs full_local_step.append(pos_edge_length + minus_edge_length) min_distance_plus.append(mind_pos) min_distance_minus.append(mind_minus) return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
[ "def", "closed_loop_edge_lengths_via_footpoint", "(", "glats", ",", "glons", ",", "alts", ",", "dates", ",", "direction", ",", "vector_direction", ",", "step_size", "=", "None", ",", "max_steps", "=", "None", ",", "edge_length", "=", "25.", ",", "edge_steps", ...
Forms closed loop integration along mag field, satrting at input points and goes through footpoint. At footpoint, steps along vector direction in both positive and negative directions, then traces back to opposite footpoint. Back at input location, steps toward those new field lines (edge_length) along vector direction until hitting distance of minimum approach. Loops don't always close. Returns total edge distance that goes through input location, along with the distances of closest approach. Note ---- vector direction refers to the magnetic unit vector direction Parameters ---------- glats : list-like of floats (degrees) Geodetic (WGS84) latitude glons : list-like of floats (degrees) Geodetic (WGS84) longitude alts : list-like of floats (km) Geodetic (WGS84) altitude, height above surface dates : list-like of datetimes Date and time for determination of scalars direction : string 'north' or 'south' for tracing through northern or southern footpoint locations vector_direction : string 'meridional' or 'zonal' unit vector directions step_size : float (km) Step size (km) used for field line integration max_steps : int Number of steps taken for field line integration edge_length : float (km) Half of total edge length (step) taken at footpoint location. edge_length step in both positive and negative directions. edge_steps : int Number of steps taken from footpoint towards new field line in a given direction (positive/negative) along unit vector Returns ------- np.array, np.array, np.array A closed loop field line path through input location and footpoint in northern/southern hemisphere and back is taken. The return edge length through input location is provided. The distances of closest approach for the positive step along vector direction, and the negative step are returned.
[ "Forms", "closed", "loop", "integration", "along", "mag", "field", "satrting", "at", "input", "points", "and", "goes", "through", "footpoint", ".", "At", "footpoint", "steps", "along", "vector", "direction", "in", "both", "positive", "and", "negative", "directio...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L927-L1072
rstoneback/pysatMagVect
pysatMagVect/_core.py
closed_loop_edge_lengths_via_equator
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates, vector_direction, edge_length=25., edge_steps=5): """ Calculates the distance between apex locations mapping to the input location. Using the input location, the apex location is calculated. Also from the input location, a step along both the positive and negative vector_directions is taken, and the apex locations for those points are calculated. The difference in position between these apex locations is the total centered distance between magnetic field lines at the magnetic apex when starting locally with a field line half distance of edge_length. An alternative method has been implemented, then commented out. This technique takes multiple steps from the origin apex towards the apex locations identified along vector_direction. In principle this is more accurate but more computationally intensive, similar to the footpoint model. A comparison is planned. Note ---- vector direction refers to the magnetic unit vector direction Parameters ---------- glats : list-like of floats (degrees) Geodetic (WGS84) latitude glons : list-like of floats (degrees) Geodetic (WGS84) longitude alts : list-like of floats (km) Geodetic (WGS84) altitude, height above surface dates : list-like of datetimes Date and time for determination of scalars vector_direction : string 'meridional' or 'zonal' unit vector directions step_size : float (km) Step size (km) used for field line integration max_steps : int Number of steps taken for field line integration edge_length : float (km) Half of total edge length (step) taken at footpoint location. edge_length step in both positive and negative directions. edge_steps : int Number of steps taken from footpoint towards new field line in a given direction (positive/negative) along unit vector Returns ------- np.array, ### np.array, np.array The change in field line apex locations. ## Pending ## The return edge length through input location is provided. ## Pending ## The distances of closest approach for the positive step along vector direction, and the negative step are returned. """ # use spacecraft location to get ECEF ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts) # prepare output apex_edge_length = [] # outputs for alternative calculation full_local_step = [] min_distance_plus = [] min_distance_minus = [] for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs, glats, glons, alts, dates): yr, doy = pysat.utils.getyrdoy(date) double_date = float(yr) + float(doy) / 366. # get location of apex for s/c field line apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info( [glat], [glon], [alt], [date]) # apex in ecef (maps to input location) apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]]) # take step from s/c along + vector direction # then get the apex location plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date, direction=vector_direction, num_steps=edge_steps, step_size=edge_length/edge_steps) plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus) plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \ apex_location_info([plus_lat], [plus_lon], [plus_alt], [date]) # plus apex location in ECEF plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]]) # take half step from s/c along - vector direction # then get the apex location minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date, direction=vector_direction, scalar=-1, num_steps=edge_steps, step_size=edge_length/edge_steps) minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus) minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \ apex_location_info([minus_lat], [minus_lon], [minus_alt], [date]) minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]]) # take difference in apex locations apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 + (plus_apex_y[0]-minus_apex_y[0])**2 + (plus_apex_z[0]-minus_apex_z[0])**2)) # # take an alternative path to calculation # # do field line trace around pos and neg apexes # # then do intersection with field line projection thing # # # do a short centered field line trace around plus apex location # other_trace = full_field_line(plus_apex_root, double_date, 0., # step_size=1., # max_steps=10, # recurse=False) # # need to determine where the intersection of apex field line # # in relation to the vector direction from the s/c field apex location. # pos_edge_length, _, mind_pos = step_until_intersect(apex_root, # other_trace, # 1, date, # direction=vector_direction, # field_step_size=1., # step_size_goal=edge_length/edge_steps) # # do a short centered field line trace around 'minus' apex location # other_trace = full_field_line(minus_apex_root, double_date, 0., # step_size=1., # max_steps=10, # recurse=False) # # need to determine where the intersection of apex field line # # in relation to the vector direction from the s/c field apex location. # minus_edge_length, _, mind_minus = step_until_intersect(apex_root, # other_trace, # -1, date, # direction=vector_direction, # field_step_size=1., # step_size_goal=edge_length/edge_steps) # full_local_step.append(pos_edge_length + minus_edge_length) # min_distance_plus.append(mind_pos) # min_distance_minus.append(mind_minus) # still sorting out alternative option for this calculation # commented code is 'good' as far as the plan goes # takes more time, so I haven't tested one vs the other yet # having two live methods can lead to problems # THIS IS A TODO (sort it out) return np.array(apex_edge_length)
python
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates, vector_direction, edge_length=25., edge_steps=5): """ Calculates the distance between apex locations mapping to the input location. Using the input location, the apex location is calculated. Also from the input location, a step along both the positive and negative vector_directions is taken, and the apex locations for those points are calculated. The difference in position between these apex locations is the total centered distance between magnetic field lines at the magnetic apex when starting locally with a field line half distance of edge_length. An alternative method has been implemented, then commented out. This technique takes multiple steps from the origin apex towards the apex locations identified along vector_direction. In principle this is more accurate but more computationally intensive, similar to the footpoint model. A comparison is planned. Note ---- vector direction refers to the magnetic unit vector direction Parameters ---------- glats : list-like of floats (degrees) Geodetic (WGS84) latitude glons : list-like of floats (degrees) Geodetic (WGS84) longitude alts : list-like of floats (km) Geodetic (WGS84) altitude, height above surface dates : list-like of datetimes Date and time for determination of scalars vector_direction : string 'meridional' or 'zonal' unit vector directions step_size : float (km) Step size (km) used for field line integration max_steps : int Number of steps taken for field line integration edge_length : float (km) Half of total edge length (step) taken at footpoint location. edge_length step in both positive and negative directions. edge_steps : int Number of steps taken from footpoint towards new field line in a given direction (positive/negative) along unit vector Returns ------- np.array, ### np.array, np.array The change in field line apex locations. ## Pending ## The return edge length through input location is provided. ## Pending ## The distances of closest approach for the positive step along vector direction, and the negative step are returned. """ # use spacecraft location to get ECEF ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts) # prepare output apex_edge_length = [] # outputs for alternative calculation full_local_step = [] min_distance_plus = [] min_distance_minus = [] for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs, glats, glons, alts, dates): yr, doy = pysat.utils.getyrdoy(date) double_date = float(yr) + float(doy) / 366. # get location of apex for s/c field line apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info( [glat], [glon], [alt], [date]) # apex in ecef (maps to input location) apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]]) # take step from s/c along + vector direction # then get the apex location plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date, direction=vector_direction, num_steps=edge_steps, step_size=edge_length/edge_steps) plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus) plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \ apex_location_info([plus_lat], [plus_lon], [plus_alt], [date]) # plus apex location in ECEF plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]]) # take half step from s/c along - vector direction # then get the apex location minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date, direction=vector_direction, scalar=-1, num_steps=edge_steps, step_size=edge_length/edge_steps) minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus) minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \ apex_location_info([minus_lat], [minus_lon], [minus_alt], [date]) minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]]) # take difference in apex locations apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 + (plus_apex_y[0]-minus_apex_y[0])**2 + (plus_apex_z[0]-minus_apex_z[0])**2)) # # take an alternative path to calculation # # do field line trace around pos and neg apexes # # then do intersection with field line projection thing # # # do a short centered field line trace around plus apex location # other_trace = full_field_line(plus_apex_root, double_date, 0., # step_size=1., # max_steps=10, # recurse=False) # # need to determine where the intersection of apex field line # # in relation to the vector direction from the s/c field apex location. # pos_edge_length, _, mind_pos = step_until_intersect(apex_root, # other_trace, # 1, date, # direction=vector_direction, # field_step_size=1., # step_size_goal=edge_length/edge_steps) # # do a short centered field line trace around 'minus' apex location # other_trace = full_field_line(minus_apex_root, double_date, 0., # step_size=1., # max_steps=10, # recurse=False) # # need to determine where the intersection of apex field line # # in relation to the vector direction from the s/c field apex location. # minus_edge_length, _, mind_minus = step_until_intersect(apex_root, # other_trace, # -1, date, # direction=vector_direction, # field_step_size=1., # step_size_goal=edge_length/edge_steps) # full_local_step.append(pos_edge_length + minus_edge_length) # min_distance_plus.append(mind_pos) # min_distance_minus.append(mind_minus) # still sorting out alternative option for this calculation # commented code is 'good' as far as the plan goes # takes more time, so I haven't tested one vs the other yet # having two live methods can lead to problems # THIS IS A TODO (sort it out) return np.array(apex_edge_length)
[ "def", "closed_loop_edge_lengths_via_equator", "(", "glats", ",", "glons", ",", "alts", ",", "dates", ",", "vector_direction", ",", "edge_length", "=", "25.", ",", "edge_steps", "=", "5", ")", ":", "# use spacecraft location to get ECEF", "ecef_xs", ",", "ecef_ys", ...
Calculates the distance between apex locations mapping to the input location. Using the input location, the apex location is calculated. Also from the input location, a step along both the positive and negative vector_directions is taken, and the apex locations for those points are calculated. The difference in position between these apex locations is the total centered distance between magnetic field lines at the magnetic apex when starting locally with a field line half distance of edge_length. An alternative method has been implemented, then commented out. This technique takes multiple steps from the origin apex towards the apex locations identified along vector_direction. In principle this is more accurate but more computationally intensive, similar to the footpoint model. A comparison is planned. Note ---- vector direction refers to the magnetic unit vector direction Parameters ---------- glats : list-like of floats (degrees) Geodetic (WGS84) latitude glons : list-like of floats (degrees) Geodetic (WGS84) longitude alts : list-like of floats (km) Geodetic (WGS84) altitude, height above surface dates : list-like of datetimes Date and time for determination of scalars vector_direction : string 'meridional' or 'zonal' unit vector directions step_size : float (km) Step size (km) used for field line integration max_steps : int Number of steps taken for field line integration edge_length : float (km) Half of total edge length (step) taken at footpoint location. edge_length step in both positive and negative directions. edge_steps : int Number of steps taken from footpoint towards new field line in a given direction (positive/negative) along unit vector Returns ------- np.array, ### np.array, np.array The change in field line apex locations. ## Pending ## The return edge length through input location is provided. ## Pending ## The distances of closest approach for the positive step along vector direction, and the negative step are returned.
[ "Calculates", "the", "distance", "between", "apex", "locations", "mapping", "to", "the", "input", "location", ".", "Using", "the", "input", "location", "the", "apex", "location", "is", "calculated", ".", "Also", "from", "the", "input", "location", "a", "step",...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L1075-L1227
rstoneback/pysatMagVect
pysatMagVect/_core.py
scalars_for_mapping_ion_drifts
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None, max_steps=None, e_field_scaling_only=False): """ Calculates scalars for translating ion motions at position glat, glon, and alt, for date, to the footpoints of the field line as well as at the magnetic equator. All inputs are assumed to be 1D arrays. Note ---- Directions refer to the ion motion direction e.g. the zonal scalar applies to zonal ion motions (meridional E field assuming ExB ion motion) Parameters ---------- glats : list-like of floats (degrees) Geodetic (WGS84) latitude glons : list-like of floats (degrees) Geodetic (WGS84) longitude alts : list-like of floats (km) Geodetic (WGS84) altitude, height above surface dates : list-like of datetimes Date and time for determination of scalars e_field_scaling_only : boolean (False) If True, method only calculates the electric field scalar, ignoring changes in magnitude of B. Note ion velocity related to E/B. Returns ------- dict array-like of scalars for translating ion drifts. Keys are, 'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly for southern locations. 'equator_mer_drifts_scalar' and 'equator_zonal_drifts_scalar' cover the mappings to the equator. """ if step_size is None: step_size = 100. if max_steps is None: max_steps = 1000 steps = np.arange(max_steps) # use spacecraft location to get ECEF ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts) # prepare output eq_zon_drifts_scalar = [] eq_mer_drifts_scalar = [] # magnetic field info north_mag_scalar = [] south_mag_scalar = [] eq_mag_scalar = [] out = {} # meridional e-field scalar map, can also be # zonal ion drift scalar map # print ('Starting Northern') north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, 'north', 'meridional', step_size=step_size, max_steps=max_steps, edge_length=25., edge_steps=5) north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, 'north', 'zonal', step_size=step_size, max_steps=max_steps, edge_length=25., edge_steps=5) # print ('Starting Southern') south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, 'south', 'meridional', step_size=step_size, max_steps=max_steps, edge_length=25., edge_steps=5) south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, 'south', 'zonal', step_size=step_size, max_steps=max_steps, edge_length=25., edge_steps=5) # print ('Starting Equatorial') # , step_zon_apex2, mind_plus, mind_minus eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates, 'meridional', edge_length=25., edge_steps=5) # , step_mer_apex2, mind_plus, mind_minus eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates, 'zonal', edge_length=25., edge_steps=5) # print ('Done with core') north_zon_drifts_scalar = north_zon_drifts_scalar/50. south_zon_drifts_scalar = south_zon_drifts_scalar/50. north_mer_drifts_scalar = north_mer_drifts_scalar/50. south_mer_drifts_scalar = south_mer_drifts_scalar/50. # equatorial eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar if e_field_scaling_only: # prepare output out['north_mer_fields_scalar'] = north_zon_drifts_scalar out['south_mer_fields_scalar'] = south_zon_drifts_scalar out['north_zon_fields_scalar'] = north_mer_drifts_scalar out['south_zon_fields_scalar'] = south_mer_drifts_scalar out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar else: # figure out scaling for drifts based upon change in magnetic field # strength for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs, glats, glons, alts, dates): yr, doy = pysat.utils.getyrdoy(date) double_date = float(yr) + float(doy) / 366. # get location of apex for s/c field line apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info( [glat], [glon], [alt], [date]) # trace to northern footpoint sc_root = np.array([ecef_x, ecef_y, ecef_z]) trace_north = field_line_trace(sc_root, double_date, 1., 120., steps=steps, step_size=step_size, max_steps=max_steps) # southern tracing trace_south = field_line_trace(sc_root, double_date, -1., 120., steps=steps, step_size=step_size, max_steps=max_steps) # footpoint location north_ftpnt = trace_north[-1, :] nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt) south_ftpnt = trace_south[-1, :] sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt) # scalar for the northern footpoint electric field based on distances # for drift also need to include the magnetic field, drift = E/B tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt, np.deg2rad(90.-glat), np.deg2rad(glon)) # get mag field and scalar for northern footpoint tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt, np.deg2rad(90.-nft_glat), np.deg2rad(nft_glon)) north_mag_scalar.append(b_sc/b_nft) # equatorial values tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt, np.deg2rad(90.-apex_lat), np.deg2rad(apex_lon)) eq_mag_scalar.append(b_sc/b_eq) # scalar for the southern footpoint tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt, np.deg2rad(90.-sft_glat), np.deg2rad(sft_glon)) south_mag_scalar.append(b_sc/b_sft) # make E-Field scalars to drifts # lists to arrays north_mag_scalar = np.array(north_mag_scalar) south_mag_scalar = np.array(south_mag_scalar) eq_mag_scalar = np.array(eq_mag_scalar) # apply to electric field scaling to get ion drift values north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar # equatorial eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar # output out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar out['north_mer_drifts_scalar'] = north_mer_drifts_scalar out['south_mer_drifts_scalar'] = south_mer_drifts_scalar out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar return out
python
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None, max_steps=None, e_field_scaling_only=False): """ Calculates scalars for translating ion motions at position glat, glon, and alt, for date, to the footpoints of the field line as well as at the magnetic equator. All inputs are assumed to be 1D arrays. Note ---- Directions refer to the ion motion direction e.g. the zonal scalar applies to zonal ion motions (meridional E field assuming ExB ion motion) Parameters ---------- glats : list-like of floats (degrees) Geodetic (WGS84) latitude glons : list-like of floats (degrees) Geodetic (WGS84) longitude alts : list-like of floats (km) Geodetic (WGS84) altitude, height above surface dates : list-like of datetimes Date and time for determination of scalars e_field_scaling_only : boolean (False) If True, method only calculates the electric field scalar, ignoring changes in magnitude of B. Note ion velocity related to E/B. Returns ------- dict array-like of scalars for translating ion drifts. Keys are, 'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly for southern locations. 'equator_mer_drifts_scalar' and 'equator_zonal_drifts_scalar' cover the mappings to the equator. """ if step_size is None: step_size = 100. if max_steps is None: max_steps = 1000 steps = np.arange(max_steps) # use spacecraft location to get ECEF ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts) # prepare output eq_zon_drifts_scalar = [] eq_mer_drifts_scalar = [] # magnetic field info north_mag_scalar = [] south_mag_scalar = [] eq_mag_scalar = [] out = {} # meridional e-field scalar map, can also be # zonal ion drift scalar map # print ('Starting Northern') north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, 'north', 'meridional', step_size=step_size, max_steps=max_steps, edge_length=25., edge_steps=5) north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, 'north', 'zonal', step_size=step_size, max_steps=max_steps, edge_length=25., edge_steps=5) # print ('Starting Southern') south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, 'south', 'meridional', step_size=step_size, max_steps=max_steps, edge_length=25., edge_steps=5) south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, 'south', 'zonal', step_size=step_size, max_steps=max_steps, edge_length=25., edge_steps=5) # print ('Starting Equatorial') # , step_zon_apex2, mind_plus, mind_minus eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates, 'meridional', edge_length=25., edge_steps=5) # , step_mer_apex2, mind_plus, mind_minus eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates, 'zonal', edge_length=25., edge_steps=5) # print ('Done with core') north_zon_drifts_scalar = north_zon_drifts_scalar/50. south_zon_drifts_scalar = south_zon_drifts_scalar/50. north_mer_drifts_scalar = north_mer_drifts_scalar/50. south_mer_drifts_scalar = south_mer_drifts_scalar/50. # equatorial eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar if e_field_scaling_only: # prepare output out['north_mer_fields_scalar'] = north_zon_drifts_scalar out['south_mer_fields_scalar'] = south_zon_drifts_scalar out['north_zon_fields_scalar'] = north_mer_drifts_scalar out['south_zon_fields_scalar'] = south_mer_drifts_scalar out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar else: # figure out scaling for drifts based upon change in magnetic field # strength for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs, glats, glons, alts, dates): yr, doy = pysat.utils.getyrdoy(date) double_date = float(yr) + float(doy) / 366. # get location of apex for s/c field line apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info( [glat], [glon], [alt], [date]) # trace to northern footpoint sc_root = np.array([ecef_x, ecef_y, ecef_z]) trace_north = field_line_trace(sc_root, double_date, 1., 120., steps=steps, step_size=step_size, max_steps=max_steps) # southern tracing trace_south = field_line_trace(sc_root, double_date, -1., 120., steps=steps, step_size=step_size, max_steps=max_steps) # footpoint location north_ftpnt = trace_north[-1, :] nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt) south_ftpnt = trace_south[-1, :] sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt) # scalar for the northern footpoint electric field based on distances # for drift also need to include the magnetic field, drift = E/B tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt, np.deg2rad(90.-glat), np.deg2rad(glon)) # get mag field and scalar for northern footpoint tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt, np.deg2rad(90.-nft_glat), np.deg2rad(nft_glon)) north_mag_scalar.append(b_sc/b_nft) # equatorial values tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt, np.deg2rad(90.-apex_lat), np.deg2rad(apex_lon)) eq_mag_scalar.append(b_sc/b_eq) # scalar for the southern footpoint tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt, np.deg2rad(90.-sft_glat), np.deg2rad(sft_glon)) south_mag_scalar.append(b_sc/b_sft) # make E-Field scalars to drifts # lists to arrays north_mag_scalar = np.array(north_mag_scalar) south_mag_scalar = np.array(south_mag_scalar) eq_mag_scalar = np.array(eq_mag_scalar) # apply to electric field scaling to get ion drift values north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar # equatorial eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar # output out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar out['north_mer_drifts_scalar'] = north_mer_drifts_scalar out['south_mer_drifts_scalar'] = south_mer_drifts_scalar out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar return out
[ "def", "scalars_for_mapping_ion_drifts", "(", "glats", ",", "glons", ",", "alts", ",", "dates", ",", "step_size", "=", "None", ",", "max_steps", "=", "None", ",", "e_field_scaling_only", "=", "False", ")", ":", "if", "step_size", "is", "None", ":", "step_siz...
Calculates scalars for translating ion motions at position glat, glon, and alt, for date, to the footpoints of the field line as well as at the magnetic equator. All inputs are assumed to be 1D arrays. Note ---- Directions refer to the ion motion direction e.g. the zonal scalar applies to zonal ion motions (meridional E field assuming ExB ion motion) Parameters ---------- glats : list-like of floats (degrees) Geodetic (WGS84) latitude glons : list-like of floats (degrees) Geodetic (WGS84) longitude alts : list-like of floats (km) Geodetic (WGS84) altitude, height above surface dates : list-like of datetimes Date and time for determination of scalars e_field_scaling_only : boolean (False) If True, method only calculates the electric field scalar, ignoring changes in magnitude of B. Note ion velocity related to E/B. Returns ------- dict array-like of scalars for translating ion drifts. Keys are, 'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly for southern locations. 'equator_mer_drifts_scalar' and 'equator_zonal_drifts_scalar' cover the mappings to the equator.
[ "Calculates", "scalars", "for", "translating", "ion", "motions", "at", "position", "glat", "glon", "and", "alt", "for", "date", "to", "the", "footpoints", "of", "the", "field", "line", "as", "well", "as", "at", "the", "magnetic", "equator", ".", "All", "in...
train
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L1241-L1431
natea/django-deployer
django_deployer/main.py
add_fabfile
def add_fabfile(): """ Copy the base fabfile.py to the current working directory. """ fabfile_src = os.path.join(PACKAGE_ROOT, 'fabfile.py') fabfile_dest = os.path.join(os.getcwd(), 'fabfile_deployer.py') if os.path.exists(fabfile_dest): print "`fabfile.py` exists in the current directory. " \ "Please remove or rename it and try again." return shutil.copyfile(fabfile_src, fabfile_dest)
python
def add_fabfile(): """ Copy the base fabfile.py to the current working directory. """ fabfile_src = os.path.join(PACKAGE_ROOT, 'fabfile.py') fabfile_dest = os.path.join(os.getcwd(), 'fabfile_deployer.py') if os.path.exists(fabfile_dest): print "`fabfile.py` exists in the current directory. " \ "Please remove or rename it and try again." return shutil.copyfile(fabfile_src, fabfile_dest)
[ "def", "add_fabfile", "(", ")", ":", "fabfile_src", "=", "os", ".", "path", ".", "join", "(", "PACKAGE_ROOT", ",", "'fabfile.py'", ")", "fabfile_dest", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'fabfile_deployer.py'",...
Copy the base fabfile.py to the current working directory.
[ "Copy", "the", "base", "fabfile", ".", "py", "to", "the", "current", "working", "directory", "." ]
train
https://github.com/natea/django-deployer/blob/5ce7d972db2f8500ec53ad89e7eb312d3360d074/django_deployer/main.py#L11-L23
gmr/infoblox
infoblox/record.py
Record.delete
def delete(self): """Remove the item from the infoblox server. :rtype: bool :raises: AssertionError :raises: ValueError :raises: infoblox.exceptions.ProtocolError """ if not self._ref: raise ValueError('Object has no reference id for deletion') if 'save' not in self._supports: raise AssertionError('Can not save this object type') response = self._session.delete(self._path) if response.status_code == 200: self._ref = None self.clear() return True try: error = response.json() raise exceptions.ProtocolError(error['text']) except ValueError: raise exceptions.ProtocolError(response.content)
python
def delete(self): """Remove the item from the infoblox server. :rtype: bool :raises: AssertionError :raises: ValueError :raises: infoblox.exceptions.ProtocolError """ if not self._ref: raise ValueError('Object has no reference id for deletion') if 'save' not in self._supports: raise AssertionError('Can not save this object type') response = self._session.delete(self._path) if response.status_code == 200: self._ref = None self.clear() return True try: error = response.json() raise exceptions.ProtocolError(error['text']) except ValueError: raise exceptions.ProtocolError(response.content)
[ "def", "delete", "(", "self", ")", ":", "if", "not", "self", ".", "_ref", ":", "raise", "ValueError", "(", "'Object has no reference id for deletion'", ")", "if", "'save'", "not", "in", "self", ".", "_supports", ":", "raise", "AssertionError", "(", "'Can not s...
Remove the item from the infoblox server. :rtype: bool :raises: AssertionError :raises: ValueError :raises: infoblox.exceptions.ProtocolError
[ "Remove", "the", "item", "from", "the", "infoblox", "server", "." ]
train
https://github.com/gmr/infoblox/blob/163dd9cff5f77c08751936c56aa8428acfd2d208/infoblox/record.py#L51-L73
gmr/infoblox
infoblox/record.py
Record.fetch
def fetch(self): """Attempt to fetch the object from the Infoblox device. If successful the object will be updated and the method will return True. :rtype: bool :raises: infoblox.exceptions.ProtocolError """ LOGGER.debug('Fetching %s, %s', self._path, self._search_values) response = self._session.get(self._path, self._search_values, {'_return_fields': self._return_fields}) if response.status_code == 200: values = response.json() self._assign(values) return bool(values) elif response.status_code >= 400: try: error = response.json() raise exceptions.ProtocolError(error['text']) except ValueError: raise exceptions.ProtocolError(response.content) return False
python
def fetch(self): """Attempt to fetch the object from the Infoblox device. If successful the object will be updated and the method will return True. :rtype: bool :raises: infoblox.exceptions.ProtocolError """ LOGGER.debug('Fetching %s, %s', self._path, self._search_values) response = self._session.get(self._path, self._search_values, {'_return_fields': self._return_fields}) if response.status_code == 200: values = response.json() self._assign(values) return bool(values) elif response.status_code >= 400: try: error = response.json() raise exceptions.ProtocolError(error['text']) except ValueError: raise exceptions.ProtocolError(response.content) return False
[ "def", "fetch", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "'Fetching %s, %s'", ",", "self", ".", "_path", ",", "self", ".", "_search_values", ")", "response", "=", "self", ".", "_session", ".", "get", "(", "self", ".", "_path", ",", "self", ...
Attempt to fetch the object from the Infoblox device. If successful the object will be updated and the method will return True. :rtype: bool :raises: infoblox.exceptions.ProtocolError
[ "Attempt", "to", "fetch", "the", "object", "from", "the", "Infoblox", "device", ".", "If", "successful", "the", "object", "will", "be", "updated", "and", "the", "method", "will", "return", "True", "." ]
train
https://github.com/gmr/infoblox/blob/163dd9cff5f77c08751936c56aa8428acfd2d208/infoblox/record.py#L75-L96
gmr/infoblox
infoblox/record.py
Record.save
def save(self): """Update the infoblox with new values for the specified object, or add the values if it's a new object all together. :raises: AssertionError :raises: infoblox.exceptions.ProtocolError """ if 'save' not in self._supports: raise AssertionError('Can not save this object type') values = {} for key in [key for key in self.keys() if key not in self._save_ignore]: if not getattr(self, key) and getattr(self, key) != False: continue if isinstance(getattr(self, key, None), list): value = list() for item in getattr(self, key): if isinstance(item, dict): value.append(item) elif hasattr(item, '_save_as'): value.append(item._save_as()) elif hasattr(item, '_ref') and getattr(item, '_ref'): value.append(getattr(item, '_ref')) else: LOGGER.warning('Cant assign %r', item) values[key] = value elif getattr(self, key, None): values[key] = getattr(self, key) if not self._ref: response = self._session.post(self._path, values) else: values['_ref'] = self._ref response = self._session.put(self._path, values) LOGGER.debug('Response: %r, %r', response.status_code, response.content) if 200 <= response.status_code <= 201: self.fetch() return True else: try: error = response.json() raise exceptions.ProtocolError(error['text']) except ValueError: raise exceptions.ProtocolError(response.content)
python
def save(self): """Update the infoblox with new values for the specified object, or add the values if it's a new object all together. :raises: AssertionError :raises: infoblox.exceptions.ProtocolError """ if 'save' not in self._supports: raise AssertionError('Can not save this object type') values = {} for key in [key for key in self.keys() if key not in self._save_ignore]: if not getattr(self, key) and getattr(self, key) != False: continue if isinstance(getattr(self, key, None), list): value = list() for item in getattr(self, key): if isinstance(item, dict): value.append(item) elif hasattr(item, '_save_as'): value.append(item._save_as()) elif hasattr(item, '_ref') and getattr(item, '_ref'): value.append(getattr(item, '_ref')) else: LOGGER.warning('Cant assign %r', item) values[key] = value elif getattr(self, key, None): values[key] = getattr(self, key) if not self._ref: response = self._session.post(self._path, values) else: values['_ref'] = self._ref response = self._session.put(self._path, values) LOGGER.debug('Response: %r, %r', response.status_code, response.content) if 200 <= response.status_code <= 201: self.fetch() return True else: try: error = response.json() raise exceptions.ProtocolError(error['text']) except ValueError: raise exceptions.ProtocolError(response.content)
[ "def", "save", "(", "self", ")", ":", "if", "'save'", "not", "in", "self", ".", "_supports", ":", "raise", "AssertionError", "(", "'Can not save this object type'", ")", "values", "=", "{", "}", "for", "key", "in", "[", "key", "for", "key", "in", "self",...
Update the infoblox with new values for the specified object, or add the values if it's a new object all together. :raises: AssertionError :raises: infoblox.exceptions.ProtocolError
[ "Update", "the", "infoblox", "with", "new", "values", "for", "the", "specified", "object", "or", "add", "the", "values", "if", "it", "s", "a", "new", "object", "all", "together", "." ]
train
https://github.com/gmr/infoblox/blob/163dd9cff5f77c08751936c56aa8428acfd2d208/infoblox/record.py#L104-L148
gmr/infoblox
infoblox/record.py
Record._assign
def _assign(self, values): """Assign the values passed as either a dict or list to the object if the key for each value matches an available attribute on the object. :param dict values: The values to assign """ LOGGER.debug('Assigning values: %r', values) if not values: return keys = self.keys() if not self._ref: keys.append('_ref') if isinstance(values, dict): for key in keys: if values.get(key): if isinstance(values.get(key), list): items = list() for item in values[key]: if isinstance(item, dict): if '_ref' in item: obj_class = get_class(item['_ref']) if obj_class: items.append(obj_class(self._session, **item)) else: items.append(item) setattr(self, key, items) else: setattr(self, key, values[key]) elif isinstance(values, list): self._assign(values[0]) else: LOGGER.critical('Unhandled return type: %r', values)
python
def _assign(self, values): """Assign the values passed as either a dict or list to the object if the key for each value matches an available attribute on the object. :param dict values: The values to assign """ LOGGER.debug('Assigning values: %r', values) if not values: return keys = self.keys() if not self._ref: keys.append('_ref') if isinstance(values, dict): for key in keys: if values.get(key): if isinstance(values.get(key), list): items = list() for item in values[key]: if isinstance(item, dict): if '_ref' in item: obj_class = get_class(item['_ref']) if obj_class: items.append(obj_class(self._session, **item)) else: items.append(item) setattr(self, key, items) else: setattr(self, key, values[key]) elif isinstance(values, list): self._assign(values[0]) else: LOGGER.critical('Unhandled return type: %r', values)
[ "def", "_assign", "(", "self", ",", "values", ")", ":", "LOGGER", ".", "debug", "(", "'Assigning values: %r'", ",", "values", ")", "if", "not", "values", ":", "return", "keys", "=", "self", ".", "keys", "(", ")", "if", "not", "self", ".", "_ref", ":"...
Assign the values passed as either a dict or list to the object if the key for each value matches an available attribute on the object. :param dict values: The values to assign
[ "Assign", "the", "values", "passed", "as", "either", "a", "dict", "or", "list", "to", "the", "object", "if", "the", "key", "for", "each", "value", "matches", "an", "available", "attribute", "on", "the", "object", "." ]
train
https://github.com/gmr/infoblox/blob/163dd9cff5f77c08751936c56aa8428acfd2d208/infoblox/record.py#L150-L183
gmr/infoblox
infoblox/record.py
Record._build_search_values
def _build_search_values(self, kwargs): """Build the search criteria dictionary. It will first try and build the values from already set attributes on the object, falling back to the passed in kwargs. :param dict kwargs: Values to build the dict from :rtype: dict """ criteria = {} for key in self._search_by: if getattr(self, key, None): criteria[key] = getattr(self, key) elif key in kwargs and kwargs.get(key): criteria[key] = kwargs.get(key) return criteria
python
def _build_search_values(self, kwargs): """Build the search criteria dictionary. It will first try and build the values from already set attributes on the object, falling back to the passed in kwargs. :param dict kwargs: Values to build the dict from :rtype: dict """ criteria = {} for key in self._search_by: if getattr(self, key, None): criteria[key] = getattr(self, key) elif key in kwargs and kwargs.get(key): criteria[key] = kwargs.get(key) return criteria
[ "def", "_build_search_values", "(", "self", ",", "kwargs", ")", ":", "criteria", "=", "{", "}", "for", "key", "in", "self", ".", "_search_by", ":", "if", "getattr", "(", "self", ",", "key", ",", "None", ")", ":", "criteria", "[", "key", "]", "=", "...
Build the search criteria dictionary. It will first try and build the values from already set attributes on the object, falling back to the passed in kwargs. :param dict kwargs: Values to build the dict from :rtype: dict
[ "Build", "the", "search", "criteria", "dictionary", ".", "It", "will", "first", "try", "and", "build", "the", "values", "from", "already", "set", "attributes", "on", "the", "object", "falling", "back", "to", "the", "passed", "in", "kwargs", "." ]
train
https://github.com/gmr/infoblox/blob/163dd9cff5f77c08751936c56aa8428acfd2d208/infoblox/record.py#L185-L200
gmr/infoblox
infoblox/record.py
Host.add_ipv4addr
def add_ipv4addr(self, ipv4addr): """Add an IPv4 address to the host. :param str ipv4addr: The IP address to add. :raises: ValueError """ for addr in self.ipv4addrs: if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or (isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)): raise ValueError('Already exists') self.ipv4addrs.append({'ipv4addr': ipv4addr})
python
def add_ipv4addr(self, ipv4addr): """Add an IPv4 address to the host. :param str ipv4addr: The IP address to add. :raises: ValueError """ for addr in self.ipv4addrs: if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or (isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)): raise ValueError('Already exists') self.ipv4addrs.append({'ipv4addr': ipv4addr})
[ "def", "add_ipv4addr", "(", "self", ",", "ipv4addr", ")", ":", "for", "addr", "in", "self", ".", "ipv4addrs", ":", "if", "(", "(", "isinstance", "(", "addr", ",", "dict", ")", "and", "addr", "[", "'ipv4addr'", "]", "==", "ipv4addr", ")", "or", "(", ...
Add an IPv4 address to the host. :param str ipv4addr: The IP address to add. :raises: ValueError
[ "Add", "an", "IPv4", "address", "to", "the", "host", "." ]
train
https://github.com/gmr/infoblox/blob/163dd9cff5f77c08751936c56aa8428acfd2d208/infoblox/record.py#L265-L276
gmr/infoblox
infoblox/record.py
Host.remove_ipv4addr
def remove_ipv4addr(self, ipv4addr): """Remove an IPv4 address from the host. :param str ipv4addr: The IP address to remove """ for addr in self.ipv4addrs: if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or (isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)): self.ipv4addrs.remove(addr) break
python
def remove_ipv4addr(self, ipv4addr): """Remove an IPv4 address from the host. :param str ipv4addr: The IP address to remove """ for addr in self.ipv4addrs: if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or (isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)): self.ipv4addrs.remove(addr) break
[ "def", "remove_ipv4addr", "(", "self", ",", "ipv4addr", ")", ":", "for", "addr", "in", "self", ".", "ipv4addrs", ":", "if", "(", "(", "isinstance", "(", "addr", ",", "dict", ")", "and", "addr", "[", "'ipv4addr'", "]", "==", "ipv4addr", ")", "or", "("...
Remove an IPv4 address from the host. :param str ipv4addr: The IP address to remove
[ "Remove", "an", "IPv4", "address", "from", "the", "host", "." ]
train
https://github.com/gmr/infoblox/blob/163dd9cff5f77c08751936c56aa8428acfd2d208/infoblox/record.py#L278-L288
gmr/infoblox
infoblox/record.py
Host.add_ipv6addr
def add_ipv6addr(self, ipv6addr): """Add an IPv6 address to the host. :param str ipv6addr: The IP address to add. :raises: ValueError """ for addr in self.ipv6addrs: if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or (isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)): raise ValueError('Already exists') self.ipv6addrs.append({'ipv6addr': ipv6addr})
python
def add_ipv6addr(self, ipv6addr): """Add an IPv6 address to the host. :param str ipv6addr: The IP address to add. :raises: ValueError """ for addr in self.ipv6addrs: if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or (isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)): raise ValueError('Already exists') self.ipv6addrs.append({'ipv6addr': ipv6addr})
[ "def", "add_ipv6addr", "(", "self", ",", "ipv6addr", ")", ":", "for", "addr", "in", "self", ".", "ipv6addrs", ":", "if", "(", "(", "isinstance", "(", "addr", ",", "dict", ")", "and", "addr", "[", "'ipv6addr'", "]", "==", "ipv6addr", ")", "or", "(", ...
Add an IPv6 address to the host. :param str ipv6addr: The IP address to add. :raises: ValueError
[ "Add", "an", "IPv6", "address", "to", "the", "host", "." ]
train
https://github.com/gmr/infoblox/blob/163dd9cff5f77c08751936c56aa8428acfd2d208/infoblox/record.py#L290-L301
gmr/infoblox
infoblox/record.py
Host.remove_ipv6addr
def remove_ipv6addr(self, ipv6addr): """Remove an IPv6 address from the host. :param str ipv6addr: The IP address to remove """ for addr in self.ipv6addrs: if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or (isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)): self.ipv6addrs.remove(addr) break
python
def remove_ipv6addr(self, ipv6addr): """Remove an IPv6 address from the host. :param str ipv6addr: The IP address to remove """ for addr in self.ipv6addrs: if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or (isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)): self.ipv6addrs.remove(addr) break
[ "def", "remove_ipv6addr", "(", "self", ",", "ipv6addr", ")", ":", "for", "addr", "in", "self", ".", "ipv6addrs", ":", "if", "(", "(", "isinstance", "(", "addr", ",", "dict", ")", "and", "addr", "[", "'ipv6addr'", "]", "==", "ipv6addr", ")", "or", "("...
Remove an IPv6 address from the host. :param str ipv6addr: The IP address to remove
[ "Remove", "an", "IPv6", "address", "from", "the", "host", "." ]
train
https://github.com/gmr/infoblox/blob/163dd9cff5f77c08751936c56aa8428acfd2d208/infoblox/record.py#L303-L313
kejbaly2/metrique
metrique/sqlalchemy.py
SQLAlchemyProxy._sqla_postgresql
def _sqla_postgresql(self, uri, version=None, isolation_level="READ COMMITTED"): ''' expected uri form: postgresql+psycopg2://%s:%s@%s:%s/%s' % ( username, password, host, port, db) ''' isolation_level = isolation_level or "READ COMMITTED" kwargs = dict(isolation_level=isolation_level) # FIXME: version of postgresql < 9.2 don't have pg.JSON! # check and use JSONTypedLite instead # override default dict and list column types types = {list: pg.ARRAY, tuple: pg.ARRAY, set: pg.ARRAY, dict: JSONDict, datetime: UTCEpoch} self.type_map.update(types) bs = self.config['batch_size'] # 999 batch_size is default for sqlite, postgres handles more at once self.config['batch_size'] = 5000 if bs == 999 else bs self._lock_required = False # default schema name is 'public' for postgres dsn = self.config['db_schema'] self.config['db_schema'] = dsn or 'public' return uri, kwargs
python
def _sqla_postgresql(self, uri, version=None, isolation_level="READ COMMITTED"): ''' expected uri form: postgresql+psycopg2://%s:%s@%s:%s/%s' % ( username, password, host, port, db) ''' isolation_level = isolation_level or "READ COMMITTED" kwargs = dict(isolation_level=isolation_level) # FIXME: version of postgresql < 9.2 don't have pg.JSON! # check and use JSONTypedLite instead # override default dict and list column types types = {list: pg.ARRAY, tuple: pg.ARRAY, set: pg.ARRAY, dict: JSONDict, datetime: UTCEpoch} self.type_map.update(types) bs = self.config['batch_size'] # 999 batch_size is default for sqlite, postgres handles more at once self.config['batch_size'] = 5000 if bs == 999 else bs self._lock_required = False # default schema name is 'public' for postgres dsn = self.config['db_schema'] self.config['db_schema'] = dsn or 'public' return uri, kwargs
[ "def", "_sqla_postgresql", "(", "self", ",", "uri", ",", "version", "=", "None", ",", "isolation_level", "=", "\"READ COMMITTED\"", ")", ":", "isolation_level", "=", "isolation_level", "or", "\"READ COMMITTED\"", "kwargs", "=", "dict", "(", "isolation_level", "=",...
expected uri form: postgresql+psycopg2://%s:%s@%s:%s/%s' % ( username, password, host, port, db)
[ "expected", "uri", "form", ":", "postgresql", "+", "psycopg2", ":", "//", "%s", ":", "%s" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/sqlalchemy.py#L381-L403
kejbaly2/metrique
metrique/sqlalchemy.py
SQLAlchemyProxy.autoschema
def autoschema(self, objects, **kwargs): ''' wrapper around utils.autoschema function ''' return autoschema(objects=objects, exclude_keys=self.RESTRICTED_KEYS, **kwargs)
python
def autoschema(self, objects, **kwargs): ''' wrapper around utils.autoschema function ''' return autoschema(objects=objects, exclude_keys=self.RESTRICTED_KEYS, **kwargs)
[ "def", "autoschema", "(", "self", ",", "objects", ",", "*", "*", "kwargs", ")", ":", "return", "autoschema", "(", "objects", "=", "objects", ",", "exclude_keys", "=", "self", ".", "RESTRICTED_KEYS", ",", "*", "*", "kwargs", ")" ]
wrapper around utils.autoschema function
[ "wrapper", "around", "utils", ".", "autoschema", "function" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/sqlalchemy.py#L405-L408
kejbaly2/metrique
metrique/sqlalchemy.py
SQLAlchemyProxy.count
def count(self, query=None, date=None, table=None): ''' Run a query on the given cube and return only the count of resulting matches. :param query: The query in pql :param date: date (metrique date range) that should be queried If date==None then the most recent versions of the objects will be queried. :param collection: cube name :param owner: username of cube owner ''' table = table or self.config.get('table') sql_count = select([func.count()]) query = self._parse_query(table=table, query=query, date=date, fields='id', alias='anon_x') if query is not None: query = sql_count.select_from(query) else: table = self.get_table(table) query = sql_count query = query.select_from(table) return self.session_auto.execute(query).scalar()
python
def count(self, query=None, date=None, table=None): ''' Run a query on the given cube and return only the count of resulting matches. :param query: The query in pql :param date: date (metrique date range) that should be queried If date==None then the most recent versions of the objects will be queried. :param collection: cube name :param owner: username of cube owner ''' table = table or self.config.get('table') sql_count = select([func.count()]) query = self._parse_query(table=table, query=query, date=date, fields='id', alias='anon_x') if query is not None: query = sql_count.select_from(query) else: table = self.get_table(table) query = sql_count query = query.select_from(table) return self.session_auto.execute(query).scalar()
[ "def", "count", "(", "self", ",", "query", "=", "None", ",", "date", "=", "None", ",", "table", "=", "None", ")", ":", "table", "=", "table", "or", "self", ".", "config", ".", "get", "(", "'table'", ")", "sql_count", "=", "select", "(", "[", "fun...
Run a query on the given cube and return only the count of resulting matches. :param query: The query in pql :param date: date (metrique date range) that should be queried If date==None then the most recent versions of the objects will be queried. :param collection: cube name :param owner: username of cube owner
[ "Run", "a", "query", "on", "the", "given", "cube", "and", "return", "only", "the", "count", "of", "resulting", "matches", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/sqlalchemy.py#L641-L664
kejbaly2/metrique
metrique/sqlalchemy.py
SQLAlchemyProxy.deptree
def deptree(self, field, oids, date=None, level=None, table=None): ''' Dependency tree builder. Recursively fetchs objects that are children of the initial set of parent object ids provided. :param field: Field that contains the 'parent of' data :param oids: Object oids to build depedency tree for :param date: date (metrique date range) that should be queried. If date==None then the most recent versions of the objects will be queried. :param level: limit depth of recursion ''' table = self.get_table(table) fringe = str2list(oids) checked = set(fringe) loop_k = 0 while len(fringe) > 0: if level and loop_k == abs(level): break query = '_oid in %s' % list(fringe) docs = self.find(table=table, query=query, fields=[field], date=date, raw=True) fringe = {oid for doc in docs for oid in (doc[field] or []) if oid not in checked} checked |= fringe loop_k += 1 return sorted(checked)
python
def deptree(self, field, oids, date=None, level=None, table=None): ''' Dependency tree builder. Recursively fetchs objects that are children of the initial set of parent object ids provided. :param field: Field that contains the 'parent of' data :param oids: Object oids to build depedency tree for :param date: date (metrique date range) that should be queried. If date==None then the most recent versions of the objects will be queried. :param level: limit depth of recursion ''' table = self.get_table(table) fringe = str2list(oids) checked = set(fringe) loop_k = 0 while len(fringe) > 0: if level and loop_k == abs(level): break query = '_oid in %s' % list(fringe) docs = self.find(table=table, query=query, fields=[field], date=date, raw=True) fringe = {oid for doc in docs for oid in (doc[field] or []) if oid not in checked} checked |= fringe loop_k += 1 return sorted(checked)
[ "def", "deptree", "(", "self", ",", "field", ",", "oids", ",", "date", "=", "None", ",", "level", "=", "None", ",", "table", "=", "None", ")", ":", "table", "=", "self", ".", "get_table", "(", "table", ")", "fringe", "=", "str2list", "(", "oids", ...
Dependency tree builder. Recursively fetchs objects that are children of the initial set of parent object ids provided. :param field: Field that contains the 'parent of' data :param oids: Object oids to build depedency tree for :param date: date (metrique date range) that should be queried. If date==None then the most recent versions of the objects will be queried. :param level: limit depth of recursion
[ "Dependency", "tree", "builder", ".", "Recursively", "fetchs", "objects", "that", "are", "children", "of", "the", "initial", "set", "of", "parent", "object", "ids", "provided", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/sqlalchemy.py#L666-L692
kejbaly2/metrique
metrique/sqlalchemy.py
SQLAlchemyProxy.get_last_field
def get_last_field(self, field, table=None): '''Shortcut for querying to get the last field value for a given owner, cube. :param field: field name to query ''' field = field if is_array(field) else [field] table = self.get_table(table, except_=False) if table is None: last = None else: is_defined(field, 'field must be defined!') last = self.find(table=table, fields=field, scalar=True, sort=field, limit=1, descending=True, date='~', default_fields=False) logger.debug("last %s.%s: %s" % (table, list2str(field), last)) return last
python
def get_last_field(self, field, table=None): '''Shortcut for querying to get the last field value for a given owner, cube. :param field: field name to query ''' field = field if is_array(field) else [field] table = self.get_table(table, except_=False) if table is None: last = None else: is_defined(field, 'field must be defined!') last = self.find(table=table, fields=field, scalar=True, sort=field, limit=1, descending=True, date='~', default_fields=False) logger.debug("last %s.%s: %s" % (table, list2str(field), last)) return last
[ "def", "get_last_field", "(", "self", ",", "field", ",", "table", "=", "None", ")", ":", "field", "=", "field", "if", "is_array", "(", "field", ")", "else", "[", "field", "]", "table", "=", "self", ".", "get_table", "(", "table", ",", "except_", "=",...
Shortcut for querying to get the last field value for a given owner, cube. :param field: field name to query
[ "Shortcut", "for", "querying", "to", "get", "the", "last", "field", "value", "for", "a", "given", "owner", "cube", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/sqlalchemy.py#L787-L803
kejbaly2/metrique
metrique/sqlalchemy.py
SQLAlchemyProxy.index
def index(self, fields, name=None, table=None, **kwargs): ''' Build a new index on a cube. Examples: + index('field_name') :param fields: A single field or a list of (key, direction) pairs :param name: (optional) Custom name to use for this index :param collection: cube name :param owner: username of cube owner ''' table = self.get_table(table) name = self._index_default_name(fields, name) fields = parse.parse_fields(fields) fields = self.columns(table, fields, reflect=True) session = self.session_new() index = Index(name, *fields) logger.info('Writing new index %s: %s' % (name, fields)) result = index.create(self.engine) session.commit() return result
python
def index(self, fields, name=None, table=None, **kwargs): ''' Build a new index on a cube. Examples: + index('field_name') :param fields: A single field or a list of (key, direction) pairs :param name: (optional) Custom name to use for this index :param collection: cube name :param owner: username of cube owner ''' table = self.get_table(table) name = self._index_default_name(fields, name) fields = parse.parse_fields(fields) fields = self.columns(table, fields, reflect=True) session = self.session_new() index = Index(name, *fields) logger.info('Writing new index %s: %s' % (name, fields)) result = index.create(self.engine) session.commit() return result
[ "def", "index", "(", "self", ",", "fields", ",", "name", "=", "None", ",", "table", "=", "None", ",", "*", "*", "kwargs", ")", ":", "table", "=", "self", ".", "get_table", "(", "table", ")", "name", "=", "self", ".", "_index_default_name", "(", "fi...
Build a new index on a cube. Examples: + index('field_name') :param fields: A single field or a list of (key, direction) pairs :param name: (optional) Custom name to use for this index :param collection: cube name :param owner: username of cube owner
[ "Build", "a", "new", "index", "on", "a", "cube", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/sqlalchemy.py#L826-L847
kejbaly2/metrique
metrique/sqlalchemy.py
SQLAlchemyProxy.index_list
def index_list(self): ''' List all cube indexes :param collection: cube name :param owner: username of cube owner ''' logger.info('Listing indexes') _ix = {} _i = self.inspector for tbl in _i.get_table_names(): _ix.setdefault(tbl, []) for ix in _i.get_indexes(tbl): _ix[tbl].append(ix) return _ix
python
def index_list(self): ''' List all cube indexes :param collection: cube name :param owner: username of cube owner ''' logger.info('Listing indexes') _ix = {} _i = self.inspector for tbl in _i.get_table_names(): _ix.setdefault(tbl, []) for ix in _i.get_indexes(tbl): _ix[tbl].append(ix) return _ix
[ "def", "index_list", "(", "self", ")", ":", "logger", ".", "info", "(", "'Listing indexes'", ")", "_ix", "=", "{", "}", "_i", "=", "self", ".", "inspector", "for", "tbl", "in", "_i", ".", "get_table_names", "(", ")", ":", "_ix", ".", "setdefault", "(...
List all cube indexes :param collection: cube name :param owner: username of cube owner
[ "List", "all", "cube", "indexes" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/sqlalchemy.py#L849-L863
kejbaly2/metrique
metrique/sqlalchemy.py
SQLAlchemyProxy.ls
def ls(self, startswith=None): ''' List all cubes available to the calling client. :param startswith: string to use in a simple "startswith" query filter :returns list: sorted list of cube names ''' logger.info('Listing cubes starting with "%s")' % startswith) startswith = unicode(startswith or '') tables = sorted(name for name in self.db_tables if name.startswith(startswith)) return tables
python
def ls(self, startswith=None): ''' List all cubes available to the calling client. :param startswith: string to use in a simple "startswith" query filter :returns list: sorted list of cube names ''' logger.info('Listing cubes starting with "%s")' % startswith) startswith = unicode(startswith or '') tables = sorted(name for name in self.db_tables if name.startswith(startswith)) return tables
[ "def", "ls", "(", "self", ",", "startswith", "=", "None", ")", ":", "logger", ".", "info", "(", "'Listing cubes starting with \"%s\")'", "%", "startswith", ")", "startswith", "=", "unicode", "(", "startswith", "or", "''", ")", "tables", "=", "sorted", "(", ...
List all cubes available to the calling client. :param startswith: string to use in a simple "startswith" query filter :returns list: sorted list of cube names
[ "List", "all", "cubes", "available", "to", "the", "calling", "client", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/sqlalchemy.py#L877-L888
kejbaly2/metrique
metrique/sqlalchemy.py
SQLAlchemyProxy.share
def share(self, with_user, roles=None, table=None): ''' Give cube access rights to another user Not, this method is NOT supported by SQLite3! ''' table = self.get_table(table) is_true(table is not None, 'invalid table: %s' % table) with_user = validate_username(with_user) roles = roles or ['SELECT'] roles = validate_roles(roles, self.VALID_SHARE_ROLES) roles = list2str(roles) logger.info('Sharing cube %s with %s (%s)' % (table, with_user, roles)) sql = 'GRANT %s ON %s TO %s' % (roles, table, with_user) return self.session_auto.execute(sql)
python
def share(self, with_user, roles=None, table=None): ''' Give cube access rights to another user Not, this method is NOT supported by SQLite3! ''' table = self.get_table(table) is_true(table is not None, 'invalid table: %s' % table) with_user = validate_username(with_user) roles = roles or ['SELECT'] roles = validate_roles(roles, self.VALID_SHARE_ROLES) roles = list2str(roles) logger.info('Sharing cube %s with %s (%s)' % (table, with_user, roles)) sql = 'GRANT %s ON %s TO %s' % (roles, table, with_user) return self.session_auto.execute(sql)
[ "def", "share", "(", "self", ",", "with_user", ",", "roles", "=", "None", ",", "table", "=", "None", ")", ":", "table", "=", "self", ".", "get_table", "(", "table", ")", "is_true", "(", "table", "is", "not", "None", ",", "'invalid table: %s'", "%", "...
Give cube access rights to another user Not, this method is NOT supported by SQLite3!
[ "Give", "cube", "access", "rights", "to", "another", "user" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/sqlalchemy.py#L890-L904
biocore/burrito-fillings
bfillings/muscle_v38.py
muscle_seqs
def muscle_seqs(seqs, add_seq_names=False, out_filename=None, input_handler=None, params={}, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None): """Muscle align list of sequences. seqs: a list of sequences as strings or objects, you must set add_seq_names=True or sequences in a multiline string, as read() from a fasta file or sequences in a list of lines, as readlines() from a fasta file or a fasta seq filename. == for eg, testcode for guessing #guess_input_handler should correctly identify input gih = guess_input_handler self.assertEqual(gih('abc.txt'), '_input_as_string') self.assertEqual(gih('>ab\nTCAG'), '_input_as_multiline_string') self.assertEqual(gih(['ACC','TGA'], True), '_input_as_seqs') self.assertEqual(gih(['>a','ACC','>b','TGA']), '_input_as_lines') == docstring for blast_seqs, apply to muscle_seqs == seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. WARNING: DECISION RULES FOR INPUT HANDLING HAVE CHANGED. Decision rules for data are as follows. If it's s list, treat as lines, unless add_seq_names is true (in which case treat as list of seqs). If it's a string, test whether it has newlines. If it doesn't have newlines, assume it's a filename. If it does have newlines, it can't be a filename, so assume it's a multiline string containing sequences. If you want to skip the detection and force a specific type of input handler, use input_handler='your_favorite_handler'. add_seq_names: boolean. if True, sequence names are inserted in the list of sequences. if False, it assumes seqs is a list of lines of some proper format that the program can handle Addl docs coming soon """ if out_filename: params["-out"] = out_filename #else: # params["-out"] = get_tmp_filename(WorkingDir) ih = input_handler or guess_input_handler(seqs, add_seq_names) muscle_app = Muscle( params=params, InputHandler=ih, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) return muscle_app(seqs)
python
def muscle_seqs(seqs, add_seq_names=False, out_filename=None, input_handler=None, params={}, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None): """Muscle align list of sequences. seqs: a list of sequences as strings or objects, you must set add_seq_names=True or sequences in a multiline string, as read() from a fasta file or sequences in a list of lines, as readlines() from a fasta file or a fasta seq filename. == for eg, testcode for guessing #guess_input_handler should correctly identify input gih = guess_input_handler self.assertEqual(gih('abc.txt'), '_input_as_string') self.assertEqual(gih('>ab\nTCAG'), '_input_as_multiline_string') self.assertEqual(gih(['ACC','TGA'], True), '_input_as_seqs') self.assertEqual(gih(['>a','ACC','>b','TGA']), '_input_as_lines') == docstring for blast_seqs, apply to muscle_seqs == seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. WARNING: DECISION RULES FOR INPUT HANDLING HAVE CHANGED. Decision rules for data are as follows. If it's s list, treat as lines, unless add_seq_names is true (in which case treat as list of seqs). If it's a string, test whether it has newlines. If it doesn't have newlines, assume it's a filename. If it does have newlines, it can't be a filename, so assume it's a multiline string containing sequences. If you want to skip the detection and force a specific type of input handler, use input_handler='your_favorite_handler'. add_seq_names: boolean. if True, sequence names are inserted in the list of sequences. if False, it assumes seqs is a list of lines of some proper format that the program can handle Addl docs coming soon """ if out_filename: params["-out"] = out_filename #else: # params["-out"] = get_tmp_filename(WorkingDir) ih = input_handler or guess_input_handler(seqs, add_seq_names) muscle_app = Muscle( params=params, InputHandler=ih, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) return muscle_app(seqs)
[ "def", "muscle_seqs", "(", "seqs", ",", "add_seq_names", "=", "False", ",", "out_filename", "=", "None", ",", "input_handler", "=", "None", ",", "params", "=", "{", "}", ",", "WorkingDir", "=", "tempfile", ".", "gettempdir", "(", ")", ",", "SuppressStderr"...
Muscle align list of sequences. seqs: a list of sequences as strings or objects, you must set add_seq_names=True or sequences in a multiline string, as read() from a fasta file or sequences in a list of lines, as readlines() from a fasta file or a fasta seq filename. == for eg, testcode for guessing #guess_input_handler should correctly identify input gih = guess_input_handler self.assertEqual(gih('abc.txt'), '_input_as_string') self.assertEqual(gih('>ab\nTCAG'), '_input_as_multiline_string') self.assertEqual(gih(['ACC','TGA'], True), '_input_as_seqs') self.assertEqual(gih(['>a','ACC','>b','TGA']), '_input_as_lines') == docstring for blast_seqs, apply to muscle_seqs == seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. WARNING: DECISION RULES FOR INPUT HANDLING HAVE CHANGED. Decision rules for data are as follows. If it's s list, treat as lines, unless add_seq_names is true (in which case treat as list of seqs). If it's a string, test whether it has newlines. If it doesn't have newlines, assume it's a filename. If it does have newlines, it can't be a filename, so assume it's a multiline string containing sequences. If you want to skip the detection and force a specific type of input handler, use input_handler='your_favorite_handler'. add_seq_names: boolean. if True, sequence names are inserted in the list of sequences. if False, it assumes seqs is a list of lines of some proper format that the program can handle Addl docs coming soon
[ "Muscle", "align", "list", "of", "sequences", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/muscle_v38.py#L340-L396
biocore/burrito-fillings
bfillings/muscle_v38.py
cluster_seqs
def cluster_seqs(seqs, neighbor_join=False, params={}, add_seq_names=True, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None, max_chars=1000000, max_hours=1.0, constructor=PhyloNode, clean_up=True ): """Muscle cluster list of sequences. seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. Addl docs coming soon """ num_seqs = len(seqs) if num_seqs < 2: raise ValueError, "Muscle requres 2 or more sequences to cluster." num_chars = sum(map(len, seqs)) if num_chars > max_chars: params["-maxiters"] = 2 params["-diags1"] = True params["-sv"] = True #params["-distance1"] = "kmer6_6" #params["-distance1"] = "kmer20_3" #params["-distance1"] = "kbit20_3" print "lots of chars, using fast align", num_chars params["-maxhours"] = max_hours #params["-maxiters"] = 10 #cluster_type = "upgmb" #if neighbor_join: # cluster_type = "neighborjoining" params["-clusteronly"] = True params["-tree1"] = get_tmp_filename(WorkingDir) muscle_res = muscle_seqs(seqs, params=params, add_seq_names=add_seq_names, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) tree = DndParser(muscle_res["Tree1Out"], constructor=constructor) if clean_up: muscle_res.cleanUp() return tree
python
def cluster_seqs(seqs, neighbor_join=False, params={}, add_seq_names=True, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None, max_chars=1000000, max_hours=1.0, constructor=PhyloNode, clean_up=True ): """Muscle cluster list of sequences. seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. Addl docs coming soon """ num_seqs = len(seqs) if num_seqs < 2: raise ValueError, "Muscle requres 2 or more sequences to cluster." num_chars = sum(map(len, seqs)) if num_chars > max_chars: params["-maxiters"] = 2 params["-diags1"] = True params["-sv"] = True #params["-distance1"] = "kmer6_6" #params["-distance1"] = "kmer20_3" #params["-distance1"] = "kbit20_3" print "lots of chars, using fast align", num_chars params["-maxhours"] = max_hours #params["-maxiters"] = 10 #cluster_type = "upgmb" #if neighbor_join: # cluster_type = "neighborjoining" params["-clusteronly"] = True params["-tree1"] = get_tmp_filename(WorkingDir) muscle_res = muscle_seqs(seqs, params=params, add_seq_names=add_seq_names, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) tree = DndParser(muscle_res["Tree1Out"], constructor=constructor) if clean_up: muscle_res.cleanUp() return tree
[ "def", "cluster_seqs", "(", "seqs", ",", "neighbor_join", "=", "False", ",", "params", "=", "{", "}", ",", "add_seq_names", "=", "True", ",", "WorkingDir", "=", "tempfile", ".", "gettempdir", "(", ")", ",", "SuppressStderr", "=", "None", ",", "SuppressStdo...
Muscle cluster list of sequences. seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. Addl docs coming soon
[ "Muscle", "cluster", "list", "of", "sequences", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/muscle_v38.py#L399-L455
biocore/burrito-fillings
bfillings/muscle_v38.py
aln_tree_seqs
def aln_tree_seqs(seqs, input_handler=None, tree_type='neighborjoining', params={}, add_seq_names=True, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None, max_hours=5.0, constructor=PhyloNode, clean_up=True ): """Muscle align sequences and report tree from iteration2. Unlike cluster_seqs, returns tree2 which is the tree made during the second muscle iteration (it should be more accurate that the cluster from the first iteration which is made fast based on k-mer words) seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. tree_type: can be either neighborjoining (default) or upgmb for UPGMA clean_up: When true, will clean up output files """ params["-maxhours"] = max_hours if tree_type: params["-cluster2"] = tree_type params["-tree2"] = get_tmp_filename(WorkingDir) params["-out"] = get_tmp_filename(WorkingDir) muscle_res = muscle_seqs(seqs, input_handler=input_handler, params=params, add_seq_names=add_seq_names, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) tree = DndParser(muscle_res["Tree2Out"], constructor=constructor) aln = [line for line in muscle_res["MuscleOut"]] if clean_up: muscle_res.cleanUp() return tree, aln
python
def aln_tree_seqs(seqs, input_handler=None, tree_type='neighborjoining', params={}, add_seq_names=True, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None, max_hours=5.0, constructor=PhyloNode, clean_up=True ): """Muscle align sequences and report tree from iteration2. Unlike cluster_seqs, returns tree2 which is the tree made during the second muscle iteration (it should be more accurate that the cluster from the first iteration which is made fast based on k-mer words) seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. tree_type: can be either neighborjoining (default) or upgmb for UPGMA clean_up: When true, will clean up output files """ params["-maxhours"] = max_hours if tree_type: params["-cluster2"] = tree_type params["-tree2"] = get_tmp_filename(WorkingDir) params["-out"] = get_tmp_filename(WorkingDir) muscle_res = muscle_seqs(seqs, input_handler=input_handler, params=params, add_seq_names=add_seq_names, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) tree = DndParser(muscle_res["Tree2Out"], constructor=constructor) aln = [line for line in muscle_res["MuscleOut"]] if clean_up: muscle_res.cleanUp() return tree, aln
[ "def", "aln_tree_seqs", "(", "seqs", ",", "input_handler", "=", "None", ",", "tree_type", "=", "'neighborjoining'", ",", "params", "=", "{", "}", ",", "add_seq_names", "=", "True", ",", "WorkingDir", "=", "tempfile", ".", "gettempdir", "(", ")", ",", "Supp...
Muscle align sequences and report tree from iteration2. Unlike cluster_seqs, returns tree2 which is the tree made during the second muscle iteration (it should be more accurate that the cluster from the first iteration which is made fast based on k-mer words) seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. tree_type: can be either neighborjoining (default) or upgmb for UPGMA clean_up: When true, will clean up output files
[ "Muscle", "align", "sequences", "and", "report", "tree", "from", "iteration2", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/muscle_v38.py#L457-L499
biocore/burrito-fillings
bfillings/muscle_v38.py
align_unaligned_seqs
def align_unaligned_seqs(seqs, moltype=DNA, params=None): """Returns an Alignment object from seqs. seqs: SequenceCollection object, or data that can be used to build one. moltype: a MolType object. DNA, RNA, or PROTEIN. params: dict of parameters to pass in to the Muscle app controller. Result will be an Alignment object. """ if not params: params = {} #create SequenceCollection object from seqs seq_collection = SequenceCollection(seqs,MolType=moltype) #Create mapping between abbreviated IDs and full IDs int_map, int_keys = seq_collection.getIntMap() #Create SequenceCollection from int_map. int_map = SequenceCollection(int_map,MolType=moltype) #get temporary filename params.update({'-out':get_tmp_filename()}) #Create Muscle app. app = Muscle(InputHandler='_input_as_multiline_string',\ params=params, WorkingDir=tempfile.gettempdir()) #Get results using int_map as input to app res = app(int_map.toFasta()) #Get alignment as dict out of results alignment = dict(parse_fasta(res['MuscleOut'])) #Make new dict mapping original IDs new_alignment = {} for k,v in alignment.items(): new_alignment[int_keys[k]]=v #Create an Alignment object from alignment dict new_alignment = Alignment(new_alignment,MolType=moltype) #Clean up res.cleanUp() del(seq_collection,int_map,int_keys,app,res,alignment,params) return new_alignment
python
def align_unaligned_seqs(seqs, moltype=DNA, params=None): """Returns an Alignment object from seqs. seqs: SequenceCollection object, or data that can be used to build one. moltype: a MolType object. DNA, RNA, or PROTEIN. params: dict of parameters to pass in to the Muscle app controller. Result will be an Alignment object. """ if not params: params = {} #create SequenceCollection object from seqs seq_collection = SequenceCollection(seqs,MolType=moltype) #Create mapping between abbreviated IDs and full IDs int_map, int_keys = seq_collection.getIntMap() #Create SequenceCollection from int_map. int_map = SequenceCollection(int_map,MolType=moltype) #get temporary filename params.update({'-out':get_tmp_filename()}) #Create Muscle app. app = Muscle(InputHandler='_input_as_multiline_string',\ params=params, WorkingDir=tempfile.gettempdir()) #Get results using int_map as input to app res = app(int_map.toFasta()) #Get alignment as dict out of results alignment = dict(parse_fasta(res['MuscleOut'])) #Make new dict mapping original IDs new_alignment = {} for k,v in alignment.items(): new_alignment[int_keys[k]]=v #Create an Alignment object from alignment dict new_alignment = Alignment(new_alignment,MolType=moltype) #Clean up res.cleanUp() del(seq_collection,int_map,int_keys,app,res,alignment,params) return new_alignment
[ "def", "align_unaligned_seqs", "(", "seqs", ",", "moltype", "=", "DNA", ",", "params", "=", "None", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "#create SequenceCollection object from seqs", "seq_collection", "=", "SequenceCollection", "(", "s...
Returns an Alignment object from seqs. seqs: SequenceCollection object, or data that can be used to build one. moltype: a MolType object. DNA, RNA, or PROTEIN. params: dict of parameters to pass in to the Muscle app controller. Result will be an Alignment object.
[ "Returns", "an", "Alignment", "object", "from", "seqs", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/muscle_v38.py#L531-L569
biocore/burrito-fillings
bfillings/muscle_v38.py
align_and_build_tree
def align_and_build_tree(seqs, moltype, best_tree=False, params=None): """Returns an alignment and a tree from Sequences object seqs. seqs: a cogent.core.alignment.SequenceCollection object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object best_tree: if True (default:False), uses a slower but more accurate algorithm to build the tree. params: dict of parameters to pass in to the Muscle app controller. The result will be a tuple containing a cogent.core.alignment.Alignment and a cogent.core.tree.PhyloNode object (or None for the alignment and/or tree if either fails). """ aln = align_unaligned_seqs(seqs, moltype=moltype, params=params) tree = build_tree_from_alignment(aln, moltype, best_tree, params) return {'Align':aln, 'Tree':tree}
python
def align_and_build_tree(seqs, moltype, best_tree=False, params=None): """Returns an alignment and a tree from Sequences object seqs. seqs: a cogent.core.alignment.SequenceCollection object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object best_tree: if True (default:False), uses a slower but more accurate algorithm to build the tree. params: dict of parameters to pass in to the Muscle app controller. The result will be a tuple containing a cogent.core.alignment.Alignment and a cogent.core.tree.PhyloNode object (or None for the alignment and/or tree if either fails). """ aln = align_unaligned_seqs(seqs, moltype=moltype, params=params) tree = build_tree_from_alignment(aln, moltype, best_tree, params) return {'Align':aln, 'Tree':tree}
[ "def", "align_and_build_tree", "(", "seqs", ",", "moltype", ",", "best_tree", "=", "False", ",", "params", "=", "None", ")", ":", "aln", "=", "align_unaligned_seqs", "(", "seqs", ",", "moltype", "=", "moltype", ",", "params", "=", "params", ")", "tree", ...
Returns an alignment and a tree from Sequences object seqs. seqs: a cogent.core.alignment.SequenceCollection object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object best_tree: if True (default:False), uses a slower but more accurate algorithm to build the tree. params: dict of parameters to pass in to the Muscle app controller. The result will be a tuple containing a cogent.core.alignment.Alignment and a cogent.core.tree.PhyloNode object (or None for the alignment and/or tree if either fails).
[ "Returns", "an", "alignment", "and", "a", "tree", "from", "Sequences", "object", "seqs", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/muscle_v38.py#L572-L591
biocore/burrito-fillings
bfillings/muscle_v38.py
build_tree_from_alignment
def build_tree_from_alignment(aln, moltype=DNA, best_tree=False, params=None): """Returns a tree from Alignment object aln. aln: a cogent.core.alignment.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object best_tree: unsupported params: dict of parameters to pass in to the Muscle app controller. The result will be an cogent.core.tree.PhyloNode object, or None if tree fails. """ # Create instance of app controller, enable tree, disable alignment app = Muscle(InputHandler='_input_as_multiline_string', params=params, \ WorkingDir=tempfile.gettempdir()) app.Parameters['-clusteronly'].on() app.Parameters['-tree1'].on(get_tmp_filename(app.WorkingDir)) app.Parameters['-seqtype'].on(moltype.label) seq_collection = SequenceCollection(aln, MolType=moltype) #Create mapping between abbreviated IDs and full IDs int_map, int_keys = seq_collection.getIntMap() #Create SequenceCollection from int_map. int_map = SequenceCollection(int_map,MolType=moltype) # Collect result result = app(int_map.toFasta()) # Build tree tree = DndParser(result['Tree1Out'].read(), constructor=PhyloNode) for tip in tree.tips(): tip.Name = int_keys[tip.Name] # Clean up result.cleanUp() del(seq_collection, app, result) return tree
python
def build_tree_from_alignment(aln, moltype=DNA, best_tree=False, params=None): """Returns a tree from Alignment object aln. aln: a cogent.core.alignment.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object best_tree: unsupported params: dict of parameters to pass in to the Muscle app controller. The result will be an cogent.core.tree.PhyloNode object, or None if tree fails. """ # Create instance of app controller, enable tree, disable alignment app = Muscle(InputHandler='_input_as_multiline_string', params=params, \ WorkingDir=tempfile.gettempdir()) app.Parameters['-clusteronly'].on() app.Parameters['-tree1'].on(get_tmp_filename(app.WorkingDir)) app.Parameters['-seqtype'].on(moltype.label) seq_collection = SequenceCollection(aln, MolType=moltype) #Create mapping between abbreviated IDs and full IDs int_map, int_keys = seq_collection.getIntMap() #Create SequenceCollection from int_map. int_map = SequenceCollection(int_map,MolType=moltype) # Collect result result = app(int_map.toFasta()) # Build tree tree = DndParser(result['Tree1Out'].read(), constructor=PhyloNode) for tip in tree.tips(): tip.Name = int_keys[tip.Name] # Clean up result.cleanUp() del(seq_collection, app, result) return tree
[ "def", "build_tree_from_alignment", "(", "aln", ",", "moltype", "=", "DNA", ",", "best_tree", "=", "False", ",", "params", "=", "None", ")", ":", "# Create instance of app controller, enable tree, disable alignment", "app", "=", "Muscle", "(", "InputHandler", "=", "...
Returns a tree from Alignment object aln. aln: a cogent.core.alignment.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object best_tree: unsupported params: dict of parameters to pass in to the Muscle app controller. The result will be an cogent.core.tree.PhyloNode object, or None if tree fails.
[ "Returns", "a", "tree", "from", "Alignment", "object", "aln", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/muscle_v38.py#L593-L637
biocore/burrito-fillings
bfillings/muscle_v38.py
add_seqs_to_alignment
def add_seqs_to_alignment(seqs, aln, params=None): """Returns an Alignment object from seqs and existing Alignment. seqs: a cogent.core.alignment.SequenceCollection object, or data that can be used to build one. aln: a cogent.core.alignment.Alignment object, or data that can be used to build one params: dict of parameters to pass in to the Muscle app controller. """ if not params: params = {} #create SequenceCollection object from seqs seqs_collection = SequenceCollection(seqs) #Create mapping between abbreviated IDs and full IDs seqs_int_map, seqs_int_keys = seqs_collection.getIntMap(prefix='seq_') #Create SequenceCollection from int_map. seqs_int_map = SequenceCollection(seqs_int_map) #create SequenceCollection object from aln aln_collection = SequenceCollection(aln) #Create mapping between abbreviated IDs and full IDs aln_int_map, aln_int_keys = aln_collection.getIntMap(prefix='aln_') #Create SequenceCollection from int_map. aln_int_map = SequenceCollection(aln_int_map) #set output and profile options params.update({'-out':get_tmp_filename(), '-profile':True}) #save seqs to tmp file seqs_filename = get_tmp_filename() seqs_out = open(seqs_filename,'w') seqs_out.write(seqs_int_map.toFasta()) seqs_out.close() #save aln to tmp file aln_filename = get_tmp_filename() aln_out = open(aln_filename, 'w') aln_out.write(aln_int_map.toFasta()) aln_out.close() #Create Muscle app and get results app = Muscle(InputHandler='_input_as_multifile', params=params, WorkingDir=tempfile.gettempdir()) res = app((aln_filename, seqs_filename)) #Get alignment as dict out of results alignment = dict(parse_fasta(res['MuscleOut'])) #Make new dict mapping original IDs new_alignment = {} for k,v in alignment.items(): if k in seqs_int_keys: new_alignment[seqs_int_keys[k]] = v else: new_alignment[aln_int_keys[k]] = v #Create an Alignment object from alignment dict new_alignment = Alignment(new_alignment) #Clean up res.cleanUp() del(seqs_collection, seqs_int_map, seqs_int_keys) del(aln_collection, aln_int_map, aln_int_keys) del(app, res, alignment, params) remove(seqs_filename) remove(aln_filename) return new_alignment
python
def add_seqs_to_alignment(seqs, aln, params=None): """Returns an Alignment object from seqs and existing Alignment. seqs: a cogent.core.alignment.SequenceCollection object, or data that can be used to build one. aln: a cogent.core.alignment.Alignment object, or data that can be used to build one params: dict of parameters to pass in to the Muscle app controller. """ if not params: params = {} #create SequenceCollection object from seqs seqs_collection = SequenceCollection(seqs) #Create mapping between abbreviated IDs and full IDs seqs_int_map, seqs_int_keys = seqs_collection.getIntMap(prefix='seq_') #Create SequenceCollection from int_map. seqs_int_map = SequenceCollection(seqs_int_map) #create SequenceCollection object from aln aln_collection = SequenceCollection(aln) #Create mapping between abbreviated IDs and full IDs aln_int_map, aln_int_keys = aln_collection.getIntMap(prefix='aln_') #Create SequenceCollection from int_map. aln_int_map = SequenceCollection(aln_int_map) #set output and profile options params.update({'-out':get_tmp_filename(), '-profile':True}) #save seqs to tmp file seqs_filename = get_tmp_filename() seqs_out = open(seqs_filename,'w') seqs_out.write(seqs_int_map.toFasta()) seqs_out.close() #save aln to tmp file aln_filename = get_tmp_filename() aln_out = open(aln_filename, 'w') aln_out.write(aln_int_map.toFasta()) aln_out.close() #Create Muscle app and get results app = Muscle(InputHandler='_input_as_multifile', params=params, WorkingDir=tempfile.gettempdir()) res = app((aln_filename, seqs_filename)) #Get alignment as dict out of results alignment = dict(parse_fasta(res['MuscleOut'])) #Make new dict mapping original IDs new_alignment = {} for k,v in alignment.items(): if k in seqs_int_keys: new_alignment[seqs_int_keys[k]] = v else: new_alignment[aln_int_keys[k]] = v #Create an Alignment object from alignment dict new_alignment = Alignment(new_alignment) #Clean up res.cleanUp() del(seqs_collection, seqs_int_map, seqs_int_keys) del(aln_collection, aln_int_map, aln_int_keys) del(app, res, alignment, params) remove(seqs_filename) remove(aln_filename) return new_alignment
[ "def", "add_seqs_to_alignment", "(", "seqs", ",", "aln", ",", "params", "=", "None", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "#create SequenceCollection object from seqs", "seqs_collection", "=", "SequenceCollection", "(", "seqs", ")", "#C...
Returns an Alignment object from seqs and existing Alignment. seqs: a cogent.core.alignment.SequenceCollection object, or data that can be used to build one. aln: a cogent.core.alignment.Alignment object, or data that can be used to build one params: dict of parameters to pass in to the Muscle app controller.
[ "Returns", "an", "Alignment", "object", "from", "seqs", "and", "existing", "Alignment", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/muscle_v38.py#L639-L708
biocore/burrito-fillings
bfillings/muscle_v38.py
align_two_alignments
def align_two_alignments(aln1, aln2, params=None): """Returns an Alignment object from two existing Alignments. aln1, aln2: cogent.core.alignment.Alignment objects, or data that can be used to build them. params: dict of parameters to pass in to the Muscle app controller. """ if not params: params = {} #create SequenceCollection object from aln1 aln1_collection = SequenceCollection(aln1) #Create mapping between abbreviated IDs and full IDs aln1_int_map, aln1_int_keys = aln1_collection.getIntMap(prefix='aln1_') #Create SequenceCollection from int_map. aln1_int_map = SequenceCollection(aln1_int_map) #create SequenceCollection object from aln2 aln2_collection = SequenceCollection(aln2) #Create mapping between abbreviated IDs and full IDs aln2_int_map, aln2_int_keys = aln2_collection.getIntMap(prefix='aln2_') #Create SequenceCollection from int_map. aln2_int_map = SequenceCollection(aln2_int_map) #set output and profile options params.update({'-out':get_tmp_filename(), '-profile':True}) #save aln1 to tmp file aln1_filename = get_tmp_filename() aln1_out = open(aln1_filename,'w') aln1_out.write(aln1_int_map.toFasta()) aln1_out.close() #save aln2 to tmp file aln2_filename = get_tmp_filename() aln2_out = open(aln2_filename, 'w') aln2_out.write(aln2_int_map.toFasta()) aln2_out.close() #Create Muscle app and get results app = Muscle(InputHandler='_input_as_multifile', params=params, WorkingDir=tempfile.gettempdir()) res = app((aln1_filename, aln2_filename)) #Get alignment as dict out of results alignment = dict(parse_fasta(res['MuscleOut'])) #Make new dict mapping original IDs new_alignment = {} for k,v in alignment.items(): if k in aln1_int_keys: new_alignment[aln1_int_keys[k]] = v else: new_alignment[aln2_int_keys[k]] = v #Create an Alignment object from alignment dict new_alignment = Alignment(new_alignment) #Clean up res.cleanUp() del(aln1_collection, aln1_int_map, aln1_int_keys) del(aln2_collection, aln2_int_map, aln2_int_keys) del(app, res, alignment, params) remove(aln1_filename) remove(aln2_filename) return new_alignment
python
def align_two_alignments(aln1, aln2, params=None): """Returns an Alignment object from two existing Alignments. aln1, aln2: cogent.core.alignment.Alignment objects, or data that can be used to build them. params: dict of parameters to pass in to the Muscle app controller. """ if not params: params = {} #create SequenceCollection object from aln1 aln1_collection = SequenceCollection(aln1) #Create mapping between abbreviated IDs and full IDs aln1_int_map, aln1_int_keys = aln1_collection.getIntMap(prefix='aln1_') #Create SequenceCollection from int_map. aln1_int_map = SequenceCollection(aln1_int_map) #create SequenceCollection object from aln2 aln2_collection = SequenceCollection(aln2) #Create mapping between abbreviated IDs and full IDs aln2_int_map, aln2_int_keys = aln2_collection.getIntMap(prefix='aln2_') #Create SequenceCollection from int_map. aln2_int_map = SequenceCollection(aln2_int_map) #set output and profile options params.update({'-out':get_tmp_filename(), '-profile':True}) #save aln1 to tmp file aln1_filename = get_tmp_filename() aln1_out = open(aln1_filename,'w') aln1_out.write(aln1_int_map.toFasta()) aln1_out.close() #save aln2 to tmp file aln2_filename = get_tmp_filename() aln2_out = open(aln2_filename, 'w') aln2_out.write(aln2_int_map.toFasta()) aln2_out.close() #Create Muscle app and get results app = Muscle(InputHandler='_input_as_multifile', params=params, WorkingDir=tempfile.gettempdir()) res = app((aln1_filename, aln2_filename)) #Get alignment as dict out of results alignment = dict(parse_fasta(res['MuscleOut'])) #Make new dict mapping original IDs new_alignment = {} for k,v in alignment.items(): if k in aln1_int_keys: new_alignment[aln1_int_keys[k]] = v else: new_alignment[aln2_int_keys[k]] = v #Create an Alignment object from alignment dict new_alignment = Alignment(new_alignment) #Clean up res.cleanUp() del(aln1_collection, aln1_int_map, aln1_int_keys) del(aln2_collection, aln2_int_map, aln2_int_keys) del(app, res, alignment, params) remove(aln1_filename) remove(aln2_filename) return new_alignment
[ "def", "align_two_alignments", "(", "aln1", ",", "aln2", ",", "params", "=", "None", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "#create SequenceCollection object from aln1", "aln1_collection", "=", "SequenceCollection", "(", "aln1", ")", "#C...
Returns an Alignment object from two existing Alignments. aln1, aln2: cogent.core.alignment.Alignment objects, or data that can be used to build them. params: dict of parameters to pass in to the Muscle app controller.
[ "Returns", "an", "Alignment", "object", "from", "two", "existing", "Alignments", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/muscle_v38.py#L710-L777
biocore/burrito-fillings
bfillings/muscle_v38.py
Muscle._input_as_multifile
def _input_as_multifile(self, data): """For use with the -profile option This input handler expects data to be a tuple containing two filenames. Index 0 will be set to -in1 and index 1 to -in2 """ if data: try: filename1, filename2 = data except: raise ValueError, "Expected two filenames" self.Parameters['-in'].off() self.Parameters['-in1'].on(filename1) self.Parameters['-in2'].on(filename2) return ''
python
def _input_as_multifile(self, data): """For use with the -profile option This input handler expects data to be a tuple containing two filenames. Index 0 will be set to -in1 and index 1 to -in2 """ if data: try: filename1, filename2 = data except: raise ValueError, "Expected two filenames" self.Parameters['-in'].off() self.Parameters['-in1'].on(filename1) self.Parameters['-in2'].on(filename2) return ''
[ "def", "_input_as_multifile", "(", "self", ",", "data", ")", ":", "if", "data", ":", "try", ":", "filename1", ",", "filename2", "=", "data", "except", ":", "raise", "ValueError", ",", "\"Expected two filenames\"", "self", ".", "Parameters", "[", "'-in'", "]"...
For use with the -profile option This input handler expects data to be a tuple containing two filenames. Index 0 will be set to -in1 and index 1 to -in2
[ "For", "use", "with", "the", "-", "profile", "option" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/muscle_v38.py#L276-L291
mokelly/wabbit_wappa
examples/capitalization_demo.py
get_example
def get_example(): """Make an example for training and testing. Outputs a tuple (label, features) where label is +1 if capital letters are the majority, and -1 otherwise; and features is a list of letters. """ features = random.sample(string.ascii_letters, NUM_SAMPLES) num_capitalized = len([ letter for letter in features if letter in string.ascii_uppercase ]) num_lowercase = len([ letter for letter in features if letter in string.ascii_lowercase ]) if num_capitalized > num_lowercase: label = 1 else: label = -1 return (label, features)
python
def get_example(): """Make an example for training and testing. Outputs a tuple (label, features) where label is +1 if capital letters are the majority, and -1 otherwise; and features is a list of letters. """ features = random.sample(string.ascii_letters, NUM_SAMPLES) num_capitalized = len([ letter for letter in features if letter in string.ascii_uppercase ]) num_lowercase = len([ letter for letter in features if letter in string.ascii_lowercase ]) if num_capitalized > num_lowercase: label = 1 else: label = -1 return (label, features)
[ "def", "get_example", "(", ")", ":", "features", "=", "random", ".", "sample", "(", "string", ".", "ascii_letters", ",", "NUM_SAMPLES", ")", "num_capitalized", "=", "len", "(", "[", "letter", "for", "letter", "in", "features", "if", "letter", "in", "string...
Make an example for training and testing. Outputs a tuple (label, features) where label is +1 if capital letters are the majority, and -1 otherwise; and features is a list of letters.
[ "Make", "an", "example", "for", "training", "and", "testing", ".", "Outputs", "a", "tuple", "(", "label", "features", ")", "where", "label", "is", "+", "1", "if", "capital", "letters", "are", "the", "majority", "and", "-", "1", "otherwise", ";", "and", ...
train
https://github.com/mokelly/wabbit_wappa/blob/dfe5bf6d6036079e473c4148335cd6f339d0299b/examples/capitalization_demo.py#L21-L33
dailymuse/oz
oz/aws_cdn/__init__.py
static_url
def static_url(redis, path): """Gets the static path for a file""" file_hash = get_cache_buster(redis, path) return "%s/%s?v=%s" % (oz.settings["static_host"], path, file_hash)
python
def static_url(redis, path): """Gets the static path for a file""" file_hash = get_cache_buster(redis, path) return "%s/%s?v=%s" % (oz.settings["static_host"], path, file_hash)
[ "def", "static_url", "(", "redis", ",", "path", ")", ":", "file_hash", "=", "get_cache_buster", "(", "redis", ",", "path", ")", "return", "\"%s/%s?v=%s\"", "%", "(", "oz", ".", "settings", "[", "\"static_host\"", "]", ",", "path", ",", "file_hash", ")" ]
Gets the static path for a file
[ "Gets", "the", "static", "path", "for", "a", "file" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/aws_cdn/__init__.py#L24-L27
dailymuse/oz
oz/aws_cdn/__init__.py
get_cache_buster
def get_cache_buster(redis, path): """Gets the cache buster value for a given file path""" return escape.to_unicode(redis.hget("cache-buster:{}:v3".format(oz.settings["s3_bucket"]), path))
python
def get_cache_buster(redis, path): """Gets the cache buster value for a given file path""" return escape.to_unicode(redis.hget("cache-buster:{}:v3".format(oz.settings["s3_bucket"]), path))
[ "def", "get_cache_buster", "(", "redis", ",", "path", ")", ":", "return", "escape", ".", "to_unicode", "(", "redis", ".", "hget", "(", "\"cache-buster:{}:v3\"", ".", "format", "(", "oz", ".", "settings", "[", "\"s3_bucket\"", "]", ")", ",", "path", ")", ...
Gets the cache buster value for a given file path
[ "Gets", "the", "cache", "buster", "value", "for", "a", "given", "file", "path" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/aws_cdn/__init__.py#L29-L31
dailymuse/oz
oz/aws_cdn/__init__.py
set_cache_buster
def set_cache_buster(redis, path, hash): """Sets the cache buster value for a given file path""" redis.hset("cache-buster:{}:v3".format(oz.settings["s3_bucket"]), path, hash)
python
def set_cache_buster(redis, path, hash): """Sets the cache buster value for a given file path""" redis.hset("cache-buster:{}:v3".format(oz.settings["s3_bucket"]), path, hash)
[ "def", "set_cache_buster", "(", "redis", ",", "path", ",", "hash", ")", ":", "redis", ".", "hset", "(", "\"cache-buster:{}:v3\"", ".", "format", "(", "oz", ".", "settings", "[", "\"s3_bucket\"", "]", ")", ",", "path", ",", "hash", ")" ]
Sets the cache buster value for a given file path
[ "Sets", "the", "cache", "buster", "value", "for", "a", "given", "file", "path" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/aws_cdn/__init__.py#L33-L35
dailymuse/oz
oz/aws_cdn/__init__.py
get_bucket
def get_bucket(s3_bucket=None, validate=False): """Gets a bucket from specified settings""" global S3Connection if S3Connection != None: settings = oz.settings s3_bucket = s3_bucket or settings["s3_bucket"] opts = {} if settings["s3_host"]: opts["host"] = settings["s3_host"] if settings["aws_access_key"] and settings["aws_secret_key"]: opts["aws_access_key_id"] = settings["aws_access_key"] opts["aws_secret_access_key"] = settings["aws_secret_key"] return S3Connection(**opts).get_bucket(s3_bucket, validate=validate) else: raise Exception("S3 not supported in this environment as boto is not installed")
python
def get_bucket(s3_bucket=None, validate=False): """Gets a bucket from specified settings""" global S3Connection if S3Connection != None: settings = oz.settings s3_bucket = s3_bucket or settings["s3_bucket"] opts = {} if settings["s3_host"]: opts["host"] = settings["s3_host"] if settings["aws_access_key"] and settings["aws_secret_key"]: opts["aws_access_key_id"] = settings["aws_access_key"] opts["aws_secret_access_key"] = settings["aws_secret_key"] return S3Connection(**opts).get_bucket(s3_bucket, validate=validate) else: raise Exception("S3 not supported in this environment as boto is not installed")
[ "def", "get_bucket", "(", "s3_bucket", "=", "None", ",", "validate", "=", "False", ")", ":", "global", "S3Connection", "if", "S3Connection", "!=", "None", ":", "settings", "=", "oz", ".", "settings", "s3_bucket", "=", "s3_bucket", "or", "settings", "[", "\...
Gets a bucket from specified settings
[ "Gets", "a", "bucket", "from", "specified", "settings" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/aws_cdn/__init__.py#L41-L56
dailymuse/oz
oz/aws_cdn/__init__.py
get_file
def get_file(path, s3_bucket=None): """Gets a file""" bucket_name = s3_bucket or oz.settings["s3_bucket"] if bucket_name: bucket = get_bucket(bucket_name) key = bucket.get_key(path) if not key: key = bucket.new_key(path) return S3File(key) else: return LocalFile(oz.settings["static_path"], path)
python
def get_file(path, s3_bucket=None): """Gets a file""" bucket_name = s3_bucket or oz.settings["s3_bucket"] if bucket_name: bucket = get_bucket(bucket_name) key = bucket.get_key(path) if not key: key = bucket.new_key(path) return S3File(key) else: return LocalFile(oz.settings["static_path"], path)
[ "def", "get_file", "(", "path", ",", "s3_bucket", "=", "None", ")", ":", "bucket_name", "=", "s3_bucket", "or", "oz", ".", "settings", "[", "\"s3_bucket\"", "]", "if", "bucket_name", ":", "bucket", "=", "get_bucket", "(", "bucket_name", ")", "key", "=", ...
Gets a file
[ "Gets", "a", "file" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/aws_cdn/__init__.py#L58-L70
dailymuse/oz
oz/aws_cdn/__init__.py
LocalFile.copy
def copy(self, new_path, replace=False): """ Uses shutil to copy a file over """ new_full_path = os.path.join(self.static_path, new_path) if replace or not os.path.exists(new_full_path): shutil.copy2(self.full_path, new_full_path) return True return False
python
def copy(self, new_path, replace=False): """ Uses shutil to copy a file over """ new_full_path = os.path.join(self.static_path, new_path) if replace or not os.path.exists(new_full_path): shutil.copy2(self.full_path, new_full_path) return True return False
[ "def", "copy", "(", "self", ",", "new_path", ",", "replace", "=", "False", ")", ":", "new_full_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "static_path", ",", "new_path", ")", "if", "replace", "or", "not", "os", ".", "path", ".", ...
Uses shutil to copy a file over
[ "Uses", "shutil", "to", "copy", "a", "file", "over" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/aws_cdn/__init__.py#L147-L153
dailymuse/oz
oz/aws_cdn/__init__.py
S3File.copy
def copy(self, new_path, replace=False): """Uses boto to copy the file to the new path instead of uploading another file to the new key""" if replace or not get_file(new_path).exists(): self.key.copy(self.key.bucket, new_path) return True return False
python
def copy(self, new_path, replace=False): """Uses boto to copy the file to the new path instead of uploading another file to the new key""" if replace or not get_file(new_path).exists(): self.key.copy(self.key.bucket, new_path) return True return False
[ "def", "copy", "(", "self", ",", "new_path", ",", "replace", "=", "False", ")", ":", "if", "replace", "or", "not", "get_file", "(", "new_path", ")", ".", "exists", "(", ")", ":", "self", ".", "key", ".", "copy", "(", "self", ".", "key", ".", "buc...
Uses boto to copy the file to the new path instead of uploading another file to the new key
[ "Uses", "boto", "to", "copy", "the", "file", "to", "the", "new", "path", "instead", "of", "uploading", "another", "file", "to", "the", "new", "key" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/aws_cdn/__init__.py#L184-L189
dailymuse/oz
oz/redis/__init__.py
create_connection
def create_connection(): """Sets up a redis configuration""" global _cached_connection settings = oz.settings if settings["redis_cache_connections"] and _cached_connection != None: return _cached_connection else: conn = redis.StrictRedis( host=settings["redis_host"], port=settings["redis_port"], db=settings["redis_db"], password=settings["redis_password"], decode_responses=settings["redis_decode_responses"], ssl=settings["redis_use_ssl"], ssl_keyfile=settings["redis_ssl_keyfile"], ssl_certfile=settings["redis_ssl_certfile"], ssl_cert_reqs=settings["redis_ssl_cert_reqs"], ssl_ca_certs=settings["redis_ssl_ca_certs"] ) if settings["redis_cache_connections"]: _cached_connection = conn return conn
python
def create_connection(): """Sets up a redis configuration""" global _cached_connection settings = oz.settings if settings["redis_cache_connections"] and _cached_connection != None: return _cached_connection else: conn = redis.StrictRedis( host=settings["redis_host"], port=settings["redis_port"], db=settings["redis_db"], password=settings["redis_password"], decode_responses=settings["redis_decode_responses"], ssl=settings["redis_use_ssl"], ssl_keyfile=settings["redis_ssl_keyfile"], ssl_certfile=settings["redis_ssl_certfile"], ssl_cert_reqs=settings["redis_ssl_cert_reqs"], ssl_ca_certs=settings["redis_ssl_ca_certs"] ) if settings["redis_cache_connections"]: _cached_connection = conn return conn
[ "def", "create_connection", "(", ")", ":", "global", "_cached_connection", "settings", "=", "oz", ".", "settings", "if", "settings", "[", "\"redis_cache_connections\"", "]", "and", "_cached_connection", "!=", "None", ":", "return", "_cached_connection", "else", ":",...
Sets up a redis configuration
[ "Sets", "up", "a", "redis", "configuration" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/redis/__init__.py#L12-L37
biocore/burrito-fillings
bfillings/usearch.py
clusters_from_blast_uc_file
def clusters_from_blast_uc_file(uc_lines, otu_id_field=1): """ Parses out hit/miss sequences from usearch blast uc file All lines should be 'H'it or 'N'o hit. Returns a dict of OTU ids: sequence labels of the hits, and a list of all sequence labels that miss. uc_lines = open file object of uc file otu_id_field: uc field to use as the otu id. 1 is usearch's ClusterNr field, and 9 is usearch's TargetLabel field """ hit_miss_index = 0 cluster_id_index = otu_id_field seq_label_index = 8 otus = {} unassigned_seqs = [] for line in uc_lines: # skip empty, comment lines if line.startswith('#') or len(line.strip()) == 0: continue curr_line = line.split('\t') if curr_line[hit_miss_index] == 'N': # only retaining actual sequence label unassigned_seqs.append(curr_line[seq_label_index].split()[0]) if curr_line[hit_miss_index] == 'H': curr_seq_label = curr_line[seq_label_index].split()[0] curr_otu_id = curr_line[cluster_id_index].split()[0] # Append sequence label to dictionary, or create key try: otus[curr_otu_id].append(curr_seq_label) except KeyError: otus[curr_otu_id] = [curr_seq_label] return otus, unassigned_seqs
python
def clusters_from_blast_uc_file(uc_lines, otu_id_field=1): """ Parses out hit/miss sequences from usearch blast uc file All lines should be 'H'it or 'N'o hit. Returns a dict of OTU ids: sequence labels of the hits, and a list of all sequence labels that miss. uc_lines = open file object of uc file otu_id_field: uc field to use as the otu id. 1 is usearch's ClusterNr field, and 9 is usearch's TargetLabel field """ hit_miss_index = 0 cluster_id_index = otu_id_field seq_label_index = 8 otus = {} unassigned_seqs = [] for line in uc_lines: # skip empty, comment lines if line.startswith('#') or len(line.strip()) == 0: continue curr_line = line.split('\t') if curr_line[hit_miss_index] == 'N': # only retaining actual sequence label unassigned_seqs.append(curr_line[seq_label_index].split()[0]) if curr_line[hit_miss_index] == 'H': curr_seq_label = curr_line[seq_label_index].split()[0] curr_otu_id = curr_line[cluster_id_index].split()[0] # Append sequence label to dictionary, or create key try: otus[curr_otu_id].append(curr_seq_label) except KeyError: otus[curr_otu_id] = [curr_seq_label] return otus, unassigned_seqs
[ "def", "clusters_from_blast_uc_file", "(", "uc_lines", ",", "otu_id_field", "=", "1", ")", ":", "hit_miss_index", "=", "0", "cluster_id_index", "=", "otu_id_field", "seq_label_index", "=", "8", "otus", "=", "{", "}", "unassigned_seqs", "=", "[", "]", "for", "l...
Parses out hit/miss sequences from usearch blast uc file All lines should be 'H'it or 'N'o hit. Returns a dict of OTU ids: sequence labels of the hits, and a list of all sequence labels that miss. uc_lines = open file object of uc file otu_id_field: uc field to use as the otu id. 1 is usearch's ClusterNr field, and 9 is usearch's TargetLabel field
[ "Parses", "out", "hit", "/", "miss", "sequences", "from", "usearch", "blast", "uc", "file" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L253-L294
biocore/burrito-fillings
bfillings/usearch.py
usearch_fasta_sort_from_filepath
def usearch_fasta_sort_from_filepath( fasta_filepath, output_filepath=None, log_name="sortlen.log", HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """Generates sorted fasta file via usearch --mergesort. fasta_filepath: filepath to input fasta file output_filepath: filepath for output sorted fasta file. log_name: string to specify log filename HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created.""" if not output_filepath: _, output_filepath = mkstemp(prefix='usearch_fasta_sort', suffix='.fasta') log_filepath = join(working_dir, log_name) params = {} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) data = {'--mergesort': fasta_filepath, '--output': output_filepath, } if not remove_usearch_logs: data['--log'] = log_filepath app_result = app(data) return app_result, output_filepath
python
def usearch_fasta_sort_from_filepath( fasta_filepath, output_filepath=None, log_name="sortlen.log", HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """Generates sorted fasta file via usearch --mergesort. fasta_filepath: filepath to input fasta file output_filepath: filepath for output sorted fasta file. log_name: string to specify log filename HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created.""" if not output_filepath: _, output_filepath = mkstemp(prefix='usearch_fasta_sort', suffix='.fasta') log_filepath = join(working_dir, log_name) params = {} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) data = {'--mergesort': fasta_filepath, '--output': output_filepath, } if not remove_usearch_logs: data['--log'] = log_filepath app_result = app(data) return app_result, output_filepath
[ "def", "usearch_fasta_sort_from_filepath", "(", "fasta_filepath", ",", "output_filepath", "=", "None", ",", "log_name", "=", "\"sortlen.log\"", ",", "HALT_EXEC", "=", "False", ",", "save_intermediate_files", "=", "False", ",", "remove_usearch_logs", "=", "False", ",",...
Generates sorted fasta file via usearch --mergesort. fasta_filepath: filepath to input fasta file output_filepath: filepath for output sorted fasta file. log_name: string to specify log filename HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created.
[ "Generates", "sorted", "fasta", "file", "via", "usearch", "--", "mergesort", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L299-L333
biocore/burrito-fillings
bfillings/usearch.py
usearch_sort_by_abundance
def usearch_sort_by_abundance( fasta_filepath, output_filepath=None, sizein=True, sizeout=True, minsize=0, log_name="abundance_sort.log", usersort=False, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """ Sorts fasta file by abundance fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output abundance sorted fasta filepath sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring minsize = minimum size of cluster to retain. log_name = string to specify log filename usersort = Use if not sorting by abundance or usearch will raise an error HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. """ if not output_filepath: _, output_filepath = mkstemp(prefix='usearch_abundance_sorted', suffix='.fasta') log_filepath = join( working_dir, "minsize_" + str(minsize) + "_" + log_name) params = {} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() if minsize: app.Parameters['--minsize'].on(minsize) if sizein: app.Parameters['--sizein'].on() if sizeout: app.Parameters['--sizeout'].on() data = {'--sortsize': fasta_filepath, '--output': output_filepath } if not remove_usearch_logs: data['--log'] = log_filepath # Can have no data following this filter step, which will raise an # application error, try to catch it here to raise meaningful message. try: app_result = app(data) except ApplicationError: raise ValueError('No data following filter steps, please check ' + 'parameter settings for usearch_qf.') return app_result, output_filepath
python
def usearch_sort_by_abundance( fasta_filepath, output_filepath=None, sizein=True, sizeout=True, minsize=0, log_name="abundance_sort.log", usersort=False, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """ Sorts fasta file by abundance fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output abundance sorted fasta filepath sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring minsize = minimum size of cluster to retain. log_name = string to specify log filename usersort = Use if not sorting by abundance or usearch will raise an error HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. """ if not output_filepath: _, output_filepath = mkstemp(prefix='usearch_abundance_sorted', suffix='.fasta') log_filepath = join( working_dir, "minsize_" + str(minsize) + "_" + log_name) params = {} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() if minsize: app.Parameters['--minsize'].on(minsize) if sizein: app.Parameters['--sizein'].on() if sizeout: app.Parameters['--sizeout'].on() data = {'--sortsize': fasta_filepath, '--output': output_filepath } if not remove_usearch_logs: data['--log'] = log_filepath # Can have no data following this filter step, which will raise an # application error, try to catch it here to raise meaningful message. try: app_result = app(data) except ApplicationError: raise ValueError('No data following filter steps, please check ' + 'parameter settings for usearch_qf.') return app_result, output_filepath
[ "def", "usearch_sort_by_abundance", "(", "fasta_filepath", ",", "output_filepath", "=", "None", ",", "sizein", "=", "True", ",", "sizeout", "=", "True", ",", "minsize", "=", "0", ",", "log_name", "=", "\"abundance_sort.log\"", ",", "usersort", "=", "False", ",...
Sorts fasta file by abundance fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output abundance sorted fasta filepath sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring minsize = minimum size of cluster to retain. log_name = string to specify log filename usersort = Use if not sorting by abundance or usearch will raise an error HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created.
[ "Sorts", "fasta", "file", "by", "abundance" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L477-L541
biocore/burrito-fillings
bfillings/usearch.py
usearch_cluster_error_correction
def usearch_cluster_error_correction( fasta_filepath, output_filepath=None, output_uc_filepath=None, percent_id_err=0.97, sizein=True, sizeout=True, w=64, slots=16769023, maxrejects=64, log_name="usearch_cluster_err_corrected.log", usersort=False, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """ Cluster for err. correction at percent_id_err, output consensus fasta fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output error corrected fasta filepath percent_id_err = minimum identity percent. sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring w = Word length for U-sorting slots = Size of compressed index table. Should be prime, e.g. 40000003. Should also specify --w, typical is --w 16 or --w 32. maxrejects = Max rejected targets, 0=ignore, default 32. log_name = string specifying output log name usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. """ if not output_filepath: _, output_filepath = mkstemp(prefix='usearch_cluster_err_corrected', suffix='.fasta') log_filepath = join(working_dir, log_name) params = {'--sizein': sizein, '--sizeout': sizeout, '--id': percent_id_err, '--w': w, '--slots': slots, '--maxrejects': maxrejects} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() data = {'--cluster': fasta_filepath, '--consout': output_filepath } if not remove_usearch_logs: data['--log'] = log_filepath if output_uc_filepath: data['--uc'] = output_uc_filepath app_result = app(data) return app_result, output_filepath
python
def usearch_cluster_error_correction( fasta_filepath, output_filepath=None, output_uc_filepath=None, percent_id_err=0.97, sizein=True, sizeout=True, w=64, slots=16769023, maxrejects=64, log_name="usearch_cluster_err_corrected.log", usersort=False, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """ Cluster for err. correction at percent_id_err, output consensus fasta fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output error corrected fasta filepath percent_id_err = minimum identity percent. sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring w = Word length for U-sorting slots = Size of compressed index table. Should be prime, e.g. 40000003. Should also specify --w, typical is --w 16 or --w 32. maxrejects = Max rejected targets, 0=ignore, default 32. log_name = string specifying output log name usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. """ if not output_filepath: _, output_filepath = mkstemp(prefix='usearch_cluster_err_corrected', suffix='.fasta') log_filepath = join(working_dir, log_name) params = {'--sizein': sizein, '--sizeout': sizeout, '--id': percent_id_err, '--w': w, '--slots': slots, '--maxrejects': maxrejects} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() data = {'--cluster': fasta_filepath, '--consout': output_filepath } if not remove_usearch_logs: data['--log'] = log_filepath if output_uc_filepath: data['--uc'] = output_uc_filepath app_result = app(data) return app_result, output_filepath
[ "def", "usearch_cluster_error_correction", "(", "fasta_filepath", ",", "output_filepath", "=", "None", ",", "output_uc_filepath", "=", "None", ",", "percent_id_err", "=", "0.97", ",", "sizein", "=", "True", ",", "sizeout", "=", "True", ",", "w", "=", "64", ","...
Cluster for err. correction at percent_id_err, output consensus fasta fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output error corrected fasta filepath percent_id_err = minimum identity percent. sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring w = Word length for U-sorting slots = Size of compressed index table. Should be prime, e.g. 40000003. Should also specify --w, typical is --w 16 or --w 32. maxrejects = Max rejected targets, 0=ignore, default 32. log_name = string specifying output log name usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created.
[ "Cluster", "for", "err", ".", "correction", "at", "percent_id_err", "output", "consensus", "fasta" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L544-L607
biocore/burrito-fillings
bfillings/usearch.py
usearch_chimera_filter_de_novo
def usearch_chimera_filter_de_novo( fasta_filepath, output_chimera_filepath=None, output_non_chimera_filepath=None, abundance_skew=2.0, log_name="uchime_de_novo_chimera_filtering.log", usersort=False, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """ Chimera filter de novo, output chimeras and non-chimeras to fastas fasta_filepath = input fasta file, generally a dereplicated fasta output_chimera_filepath = output chimera filepath output_non_chimera_filepath = output non chimera filepath abundance_skew = abundance skew setting for de novo filtering. usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. """ if not output_chimera_filepath: _, output_chimera_filepath = mkstemp(prefix='uchime_chimeras_', suffix='.fasta') if not output_non_chimera_filepath: _, output_non_chimera_filepath = mkstemp(prefix='uchime_non_chimeras_', suffix='.fasta') log_filepath = join(working_dir, log_name) params = {'--abskew': abundance_skew} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() data = {'--uchime': fasta_filepath, '--chimeras': output_chimera_filepath, '--nonchimeras': output_non_chimera_filepath } if not remove_usearch_logs: data['--log'] = log_filepath app_result = app(data) if not save_intermediate_files: remove_files([output_chimera_filepath]) return app_result, output_non_chimera_filepath
python
def usearch_chimera_filter_de_novo( fasta_filepath, output_chimera_filepath=None, output_non_chimera_filepath=None, abundance_skew=2.0, log_name="uchime_de_novo_chimera_filtering.log", usersort=False, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """ Chimera filter de novo, output chimeras and non-chimeras to fastas fasta_filepath = input fasta file, generally a dereplicated fasta output_chimera_filepath = output chimera filepath output_non_chimera_filepath = output non chimera filepath abundance_skew = abundance skew setting for de novo filtering. usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. """ if not output_chimera_filepath: _, output_chimera_filepath = mkstemp(prefix='uchime_chimeras_', suffix='.fasta') if not output_non_chimera_filepath: _, output_non_chimera_filepath = mkstemp(prefix='uchime_non_chimeras_', suffix='.fasta') log_filepath = join(working_dir, log_name) params = {'--abskew': abundance_skew} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() data = {'--uchime': fasta_filepath, '--chimeras': output_chimera_filepath, '--nonchimeras': output_non_chimera_filepath } if not remove_usearch_logs: data['--log'] = log_filepath app_result = app(data) if not save_intermediate_files: remove_files([output_chimera_filepath]) return app_result, output_non_chimera_filepath
[ "def", "usearch_chimera_filter_de_novo", "(", "fasta_filepath", ",", "output_chimera_filepath", "=", "None", ",", "output_non_chimera_filepath", "=", "None", ",", "abundance_skew", "=", "2.0", ",", "log_name", "=", "\"uchime_de_novo_chimera_filtering.log\"", ",", "usersort"...
Chimera filter de novo, output chimeras and non-chimeras to fastas fasta_filepath = input fasta file, generally a dereplicated fasta output_chimera_filepath = output chimera filepath output_non_chimera_filepath = output non chimera filepath abundance_skew = abundance skew setting for de novo filtering. usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created.
[ "Chimera", "filter", "de", "novo", "output", "chimeras", "and", "non", "-", "chimeras", "to", "fastas" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L610-L662
biocore/burrito-fillings
bfillings/usearch.py
usearch_cluster_seqs_ref
def usearch_cluster_seqs_ref( fasta_filepath, output_filepath=None, percent_id=0.97, sizein=True, sizeout=True, w=64, slots=16769023, maxrejects=64, log_name="usearch_cluster_seqs.log", usersort=True, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, suppress_new_clusters=False, refseqs_fp=None, output_dir=None, working_dir=None, rev=False): """ Cluster seqs at percent_id, output consensus fasta Also appends de novo clustered seqs if suppress_new_clusters is False. Forced to handle reference + de novo in hackish fashion as usearch does not work as listed in the helpstrings. Any failures are clustered de novo, and given unique cluster IDs. fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output reference clustered uc filepath percent_id = minimum identity percent. sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring w = Word length for U-sorting slots = Size of compressed index table. Should be prime, e.g. 40000003. Should also specify --w, typical is --w 16 or --w 32. maxrejects = Max rejected targets, 0=ignore, default 32. log_name = string specifying output log name usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. In post chimera checked sequences, the seqs are sorted by abundance, so this should be set to True. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. suppress_new_clusters: Disables de novo OTUs when ref based OTU picking enabled. refseqs_fp: Filepath for ref based OTU picking output_dir: output directory rev = search plus and minus strands of sequences """ if not output_filepath: _, output_filepath = mkstemp(prefix='usearch_cluster_ref_based', suffix='.uc') log_filepath = join(working_dir, log_name) uc_filepath = join(working_dir, "clustered_seqs_post_chimera.uc") params = {'--sizein': sizein, '--sizeout': sizeout, '--id': percent_id, '--w': w, '--slots': slots, '--maxrejects': maxrejects} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() if rev: app.Parameters['--rev'].on() data = {'--query': fasta_filepath, '--uc': uc_filepath, '--db': refseqs_fp } if not remove_usearch_logs: data['--log'] = log_filepath app_result = app(data) files_to_remove = [] # Need to create fasta file of all hits (with reference IDs), # recluster failures if new clusters allowed, and create complete fasta # file, with unique fasta label IDs. if suppress_new_clusters: output_fna_filepath = join(output_dir, 'ref_clustered_seqs.fasta') output_filepath, labels_hits = get_fasta_from_uc_file(fasta_filepath, uc_filepath, hit_type="H", output_dir=output_dir, output_fna_filepath=output_fna_filepath) files_to_remove.append(uc_filepath) else: # Get fasta of successful ref based clusters output_fna_clustered = join(output_dir, 'ref_clustered_seqs.fasta') output_filepath_ref_clusters, labels_hits =\ get_fasta_from_uc_file(fasta_filepath, uc_filepath, hit_type="H", output_dir=output_dir, output_fna_filepath=output_fna_clustered) # get failures and recluster output_fna_failures =\ join(output_dir, 'ref_clustered_seqs_failures.fasta') output_filepath_failures, labels_hits =\ get_fasta_from_uc_file(fasta_filepath, uc_filepath, hit_type="N", output_dir=output_dir, output_fna_filepath=output_fna_failures) # de novo cluster the failures app_result, output_filepath_clustered_failures =\ usearch_cluster_seqs(output_fna_failures, output_filepath= join( output_dir, 'clustered_seqs_reference_failures.fasta'), percent_id=percent_id, sizein=sizein, sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects, save_intermediate_files=save_intermediate_files, remove_usearch_logs=remove_usearch_logs, working_dir=working_dir) output_filepath = concatenate_fastas(output_fna_clustered, output_fna_failures, output_concat_filepath=join( output_dir, 'concatenated_reference_denovo_clusters.fasta')) files_to_remove.append(output_fna_clustered) files_to_remove.append(output_fna_failures) files_to_remove.append(output_filepath_clustered_failures) if not save_intermediate_files: remove_files(files_to_remove) return app_result, output_filepath
python
def usearch_cluster_seqs_ref( fasta_filepath, output_filepath=None, percent_id=0.97, sizein=True, sizeout=True, w=64, slots=16769023, maxrejects=64, log_name="usearch_cluster_seqs.log", usersort=True, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, suppress_new_clusters=False, refseqs_fp=None, output_dir=None, working_dir=None, rev=False): """ Cluster seqs at percent_id, output consensus fasta Also appends de novo clustered seqs if suppress_new_clusters is False. Forced to handle reference + de novo in hackish fashion as usearch does not work as listed in the helpstrings. Any failures are clustered de novo, and given unique cluster IDs. fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output reference clustered uc filepath percent_id = minimum identity percent. sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring w = Word length for U-sorting slots = Size of compressed index table. Should be prime, e.g. 40000003. Should also specify --w, typical is --w 16 or --w 32. maxrejects = Max rejected targets, 0=ignore, default 32. log_name = string specifying output log name usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. In post chimera checked sequences, the seqs are sorted by abundance, so this should be set to True. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. suppress_new_clusters: Disables de novo OTUs when ref based OTU picking enabled. refseqs_fp: Filepath for ref based OTU picking output_dir: output directory rev = search plus and minus strands of sequences """ if not output_filepath: _, output_filepath = mkstemp(prefix='usearch_cluster_ref_based', suffix='.uc') log_filepath = join(working_dir, log_name) uc_filepath = join(working_dir, "clustered_seqs_post_chimera.uc") params = {'--sizein': sizein, '--sizeout': sizeout, '--id': percent_id, '--w': w, '--slots': slots, '--maxrejects': maxrejects} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() if rev: app.Parameters['--rev'].on() data = {'--query': fasta_filepath, '--uc': uc_filepath, '--db': refseqs_fp } if not remove_usearch_logs: data['--log'] = log_filepath app_result = app(data) files_to_remove = [] # Need to create fasta file of all hits (with reference IDs), # recluster failures if new clusters allowed, and create complete fasta # file, with unique fasta label IDs. if suppress_new_clusters: output_fna_filepath = join(output_dir, 'ref_clustered_seqs.fasta') output_filepath, labels_hits = get_fasta_from_uc_file(fasta_filepath, uc_filepath, hit_type="H", output_dir=output_dir, output_fna_filepath=output_fna_filepath) files_to_remove.append(uc_filepath) else: # Get fasta of successful ref based clusters output_fna_clustered = join(output_dir, 'ref_clustered_seqs.fasta') output_filepath_ref_clusters, labels_hits =\ get_fasta_from_uc_file(fasta_filepath, uc_filepath, hit_type="H", output_dir=output_dir, output_fna_filepath=output_fna_clustered) # get failures and recluster output_fna_failures =\ join(output_dir, 'ref_clustered_seqs_failures.fasta') output_filepath_failures, labels_hits =\ get_fasta_from_uc_file(fasta_filepath, uc_filepath, hit_type="N", output_dir=output_dir, output_fna_filepath=output_fna_failures) # de novo cluster the failures app_result, output_filepath_clustered_failures =\ usearch_cluster_seqs(output_fna_failures, output_filepath= join( output_dir, 'clustered_seqs_reference_failures.fasta'), percent_id=percent_id, sizein=sizein, sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects, save_intermediate_files=save_intermediate_files, remove_usearch_logs=remove_usearch_logs, working_dir=working_dir) output_filepath = concatenate_fastas(output_fna_clustered, output_fna_failures, output_concat_filepath=join( output_dir, 'concatenated_reference_denovo_clusters.fasta')) files_to_remove.append(output_fna_clustered) files_to_remove.append(output_fna_failures) files_to_remove.append(output_filepath_clustered_failures) if not save_intermediate_files: remove_files(files_to_remove) return app_result, output_filepath
[ "def", "usearch_cluster_seqs_ref", "(", "fasta_filepath", ",", "output_filepath", "=", "None", ",", "percent_id", "=", "0.97", ",", "sizein", "=", "True", ",", "sizeout", "=", "True", ",", "w", "=", "64", ",", "slots", "=", "16769023", ",", "maxrejects", "...
Cluster seqs at percent_id, output consensus fasta Also appends de novo clustered seqs if suppress_new_clusters is False. Forced to handle reference + de novo in hackish fashion as usearch does not work as listed in the helpstrings. Any failures are clustered de novo, and given unique cluster IDs. fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output reference clustered uc filepath percent_id = minimum identity percent. sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring w = Word length for U-sorting slots = Size of compressed index table. Should be prime, e.g. 40000003. Should also specify --w, typical is --w 16 or --w 32. maxrejects = Max rejected targets, 0=ignore, default 32. log_name = string specifying output log name usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. In post chimera checked sequences, the seqs are sorted by abundance, so this should be set to True. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. suppress_new_clusters: Disables de novo OTUs when ref based OTU picking enabled. refseqs_fp: Filepath for ref based OTU picking output_dir: output directory rev = search plus and minus strands of sequences
[ "Cluster", "seqs", "at", "percent_id", "output", "consensus", "fasta" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L801-L931
biocore/burrito-fillings
bfillings/usearch.py
concatenate_fastas
def concatenate_fastas(output_fna_clustered, output_fna_failures, output_concat_filepath): """ Concatenates two input fastas, writes to output_concat_filepath output_fna_clustered: fasta of successful ref clusters output_fna_failures: de novo fasta of cluster failures output_concat_filepath: path to write combined fastas to """ output_fp = open(output_concat_filepath, "w") for label, seq in parse_fasta(open(output_fna_clustered, "U")): output_fp.write(">%s\n%s\n" % (label, seq)) for label, seq in parse_fasta(open(output_fna_failures, "U")): output_fp.write(">%s\n%s\n" % (label, seq)) return output_concat_filepath
python
def concatenate_fastas(output_fna_clustered, output_fna_failures, output_concat_filepath): """ Concatenates two input fastas, writes to output_concat_filepath output_fna_clustered: fasta of successful ref clusters output_fna_failures: de novo fasta of cluster failures output_concat_filepath: path to write combined fastas to """ output_fp = open(output_concat_filepath, "w") for label, seq in parse_fasta(open(output_fna_clustered, "U")): output_fp.write(">%s\n%s\n" % (label, seq)) for label, seq in parse_fasta(open(output_fna_failures, "U")): output_fp.write(">%s\n%s\n" % (label, seq)) return output_concat_filepath
[ "def", "concatenate_fastas", "(", "output_fna_clustered", ",", "output_fna_failures", ",", "output_concat_filepath", ")", ":", "output_fp", "=", "open", "(", "output_concat_filepath", ",", "\"w\"", ")", "for", "label", ",", "seq", "in", "parse_fasta", "(", "open", ...
Concatenates two input fastas, writes to output_concat_filepath output_fna_clustered: fasta of successful ref clusters output_fna_failures: de novo fasta of cluster failures output_concat_filepath: path to write combined fastas to
[ "Concatenates", "two", "input", "fastas", "writes", "to", "output_concat_filepath" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L934-L951
biocore/burrito-fillings
bfillings/usearch.py
enumerate_otus
def enumerate_otus(fasta_filepath, output_filepath=None, label_prefix="", label_suffix="", retain_label_as_comment=False, count_start=0): """ Writes unique, sequential count to OTUs fasta_filepath = input fasta filepath output_filepath = output fasta filepath label_prefix = string to place before enumeration label_suffix = string to place after enumeration retain_label_as_comment = if True, will place existing label in sequence comment, after a tab count_start = number to start enumerating OTUs with """ fasta_i = open(fasta_filepath, "U") if not output_filepath: _, output_filepath = mkstemp(prefix='enumerated_seqs_', suffix='.fasta') fasta_o = open(output_filepath, "w") for label, seq in parse_fasta(fasta_i): curr_label = ">" + label_prefix + str(count_start) + label_suffix if retain_label_as_comment: curr_label += '\t' + label fasta_o.write(curr_label.strip() + '\n') fasta_o.write(seq.strip() + '\n') count_start += 1 return output_filepath
python
def enumerate_otus(fasta_filepath, output_filepath=None, label_prefix="", label_suffix="", retain_label_as_comment=False, count_start=0): """ Writes unique, sequential count to OTUs fasta_filepath = input fasta filepath output_filepath = output fasta filepath label_prefix = string to place before enumeration label_suffix = string to place after enumeration retain_label_as_comment = if True, will place existing label in sequence comment, after a tab count_start = number to start enumerating OTUs with """ fasta_i = open(fasta_filepath, "U") if not output_filepath: _, output_filepath = mkstemp(prefix='enumerated_seqs_', suffix='.fasta') fasta_o = open(output_filepath, "w") for label, seq in parse_fasta(fasta_i): curr_label = ">" + label_prefix + str(count_start) + label_suffix if retain_label_as_comment: curr_label += '\t' + label fasta_o.write(curr_label.strip() + '\n') fasta_o.write(seq.strip() + '\n') count_start += 1 return output_filepath
[ "def", "enumerate_otus", "(", "fasta_filepath", ",", "output_filepath", "=", "None", ",", "label_prefix", "=", "\"\"", ",", "label_suffix", "=", "\"\"", ",", "retain_label_as_comment", "=", "False", ",", "count_start", "=", "0", ")", ":", "fasta_i", "=", "open...
Writes unique, sequential count to OTUs fasta_filepath = input fasta filepath output_filepath = output fasta filepath label_prefix = string to place before enumeration label_suffix = string to place after enumeration retain_label_as_comment = if True, will place existing label in sequence comment, after a tab count_start = number to start enumerating OTUs with
[ "Writes", "unique", "sequential", "count", "to", "OTUs" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L954-L988
biocore/burrito-fillings
bfillings/usearch.py
get_fasta_from_uc_file
def get_fasta_from_uc_file(fasta_filepath, uc_filepath, hit_type="H", output_fna_filepath=None, label_prefix="", output_dir=None): """ writes fasta of sequences from uc file of type hit_type fasta_filepath: Filepath of original query fasta file uc_filepath: Filepath of .uc file created by usearch post error filtering hit_type: type to read from first field of .uc file, "H" for hits, "N" for no hits. output_fna_filepath = fasta output filepath label_prefix = Added before each fasta label, important when doing ref based OTU picking plus de novo clustering to preserve label matching. output_dir: output directory """ hit_type_index = 0 seq_label_index = 8 target_label_index = 9 labels_hits = {} labels_to_keep = [] for line in open(uc_filepath, "U"): if line.startswith("#") or len(line.strip()) == 0: continue curr_line = line.split('\t') if curr_line[0] == hit_type: labels_hits[curr_line[seq_label_index]] =\ curr_line[target_label_index].strip() labels_to_keep.append(curr_line[seq_label_index]) labels_to_keep = set(labels_to_keep) out_fna = open(output_fna_filepath, "w") for label, seq in parse_fasta(open(fasta_filepath, "U")): if label in labels_to_keep: if hit_type == "H": out_fna.write(">" + labels_hits[label] + "\n%s\n" % seq) if hit_type == "N": out_fna.write(">" + label + "\n%s\n" % seq) return output_fna_filepath, labels_hits
python
def get_fasta_from_uc_file(fasta_filepath, uc_filepath, hit_type="H", output_fna_filepath=None, label_prefix="", output_dir=None): """ writes fasta of sequences from uc file of type hit_type fasta_filepath: Filepath of original query fasta file uc_filepath: Filepath of .uc file created by usearch post error filtering hit_type: type to read from first field of .uc file, "H" for hits, "N" for no hits. output_fna_filepath = fasta output filepath label_prefix = Added before each fasta label, important when doing ref based OTU picking plus de novo clustering to preserve label matching. output_dir: output directory """ hit_type_index = 0 seq_label_index = 8 target_label_index = 9 labels_hits = {} labels_to_keep = [] for line in open(uc_filepath, "U"): if line.startswith("#") or len(line.strip()) == 0: continue curr_line = line.split('\t') if curr_line[0] == hit_type: labels_hits[curr_line[seq_label_index]] =\ curr_line[target_label_index].strip() labels_to_keep.append(curr_line[seq_label_index]) labels_to_keep = set(labels_to_keep) out_fna = open(output_fna_filepath, "w") for label, seq in parse_fasta(open(fasta_filepath, "U")): if label in labels_to_keep: if hit_type == "H": out_fna.write(">" + labels_hits[label] + "\n%s\n" % seq) if hit_type == "N": out_fna.write(">" + label + "\n%s\n" % seq) return output_fna_filepath, labels_hits
[ "def", "get_fasta_from_uc_file", "(", "fasta_filepath", ",", "uc_filepath", ",", "hit_type", "=", "\"H\"", ",", "output_fna_filepath", "=", "None", ",", "label_prefix", "=", "\"\"", ",", "output_dir", "=", "None", ")", ":", "hit_type_index", "=", "0", "seq_label...
writes fasta of sequences from uc file of type hit_type fasta_filepath: Filepath of original query fasta file uc_filepath: Filepath of .uc file created by usearch post error filtering hit_type: type to read from first field of .uc file, "H" for hits, "N" for no hits. output_fna_filepath = fasta output filepath label_prefix = Added before each fasta label, important when doing ref based OTU picking plus de novo clustering to preserve label matching. output_dir: output directory
[ "writes", "fasta", "of", "sequences", "from", "uc", "file", "of", "type", "hit_type" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L991-L1036