repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
greenbone/ospd
ospd/misc.py
valid_uuid
def valid_uuid(value): """ Check if value is a valid UUID. """ try: uuid.UUID(value, version=4) return True except (TypeError, ValueError, AttributeError): return False
python
def valid_uuid(value): """ Check if value is a valid UUID. """ try: uuid.UUID(value, version=4) return True except (TypeError, ValueError, AttributeError): return False
[ "def", "valid_uuid", "(", "value", ")", ":", "try", ":", "uuid", ".", "UUID", "(", "value", ",", "version", "=", "4", ")", "return", "True", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ")", ":", "return", "False" ]
Check if value is a valid UUID.
[ "Check", "if", "value", "is", "a", "valid", "UUID", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L762-L769
train
16,900
greenbone/ospd
ospd/misc.py
create_args_parser
def create_args_parser(description): """ Create a command-line arguments parser for OSPD. """ parser = argparse.ArgumentParser(description=description) def network_port(string): """ Check if provided string is a valid network port. """ value = int(string) if not 0 < value <= 65535: raise argparse.ArgumentTypeError( 'port must be in ]0,65535] interval') return value def cacert_file(cacert): """ Check if provided file is a valid CA Certificate """ try: context = ssl.create_default_context(cafile=cacert) except AttributeError: # Python version < 2.7.9 return cacert except IOError: raise argparse.ArgumentTypeError('CA Certificate not found') try: not_after = context.get_ca_certs()[0]['notAfter'] not_after = ssl.cert_time_to_seconds(not_after) not_before = context.get_ca_certs()[0]['notBefore'] not_before = ssl.cert_time_to_seconds(not_before) except (KeyError, IndexError): raise argparse.ArgumentTypeError('CA Certificate is erroneous') if not_after < int(time.time()): raise argparse.ArgumentTypeError('CA Certificate expired') if not_before > int(time.time()): raise argparse.ArgumentTypeError('CA Certificate not active yet') return cacert def log_level(string): """ Check if provided string is a valid log level. """ value = getattr(logging, string.upper(), None) if not isinstance(value, int): raise argparse.ArgumentTypeError( 'log level must be one of {debug,info,warning,error,critical}') return value def filename(string): """ Check if provided string is a valid file path. """ if not os.path.isfile(string): raise argparse.ArgumentTypeError( '%s is not a valid file path' % string) return string parser.add_argument('-p', '--port', default=PORT, type=network_port, help='TCP Port to listen on. Default: {0}'.format(PORT)) parser.add_argument('-b', '--bind-address', default=ADDRESS, help='Address to listen on. Default: {0}' .format(ADDRESS)) parser.add_argument('-u', '--unix-socket', help='Unix file socket to listen on.') parser.add_argument('-k', '--key-file', type=filename, help='Server key file. Default: {0}'.format(KEY_FILE)) parser.add_argument('-c', '--cert-file', type=filename, help='Server cert file. Default: {0}'.format(CERT_FILE)) parser.add_argument('--ca-file', type=cacert_file, help='CA cert file. Default: {0}'.format(CA_FILE)) parser.add_argument('-L', '--log-level', default='warning', type=log_level, help='Wished level of logging. Default: WARNING') parser.add_argument('--foreground', action='store_true', help='Run in foreground and logs all messages to console.') parser.add_argument('-l', '--log-file', type=filename, help='Path to the logging file.') parser.add_argument('--version', action='store_true', help='Print version then exit.') return parser
python
def create_args_parser(description): """ Create a command-line arguments parser for OSPD. """ parser = argparse.ArgumentParser(description=description) def network_port(string): """ Check if provided string is a valid network port. """ value = int(string) if not 0 < value <= 65535: raise argparse.ArgumentTypeError( 'port must be in ]0,65535] interval') return value def cacert_file(cacert): """ Check if provided file is a valid CA Certificate """ try: context = ssl.create_default_context(cafile=cacert) except AttributeError: # Python version < 2.7.9 return cacert except IOError: raise argparse.ArgumentTypeError('CA Certificate not found') try: not_after = context.get_ca_certs()[0]['notAfter'] not_after = ssl.cert_time_to_seconds(not_after) not_before = context.get_ca_certs()[0]['notBefore'] not_before = ssl.cert_time_to_seconds(not_before) except (KeyError, IndexError): raise argparse.ArgumentTypeError('CA Certificate is erroneous') if not_after < int(time.time()): raise argparse.ArgumentTypeError('CA Certificate expired') if not_before > int(time.time()): raise argparse.ArgumentTypeError('CA Certificate not active yet') return cacert def log_level(string): """ Check if provided string is a valid log level. """ value = getattr(logging, string.upper(), None) if not isinstance(value, int): raise argparse.ArgumentTypeError( 'log level must be one of {debug,info,warning,error,critical}') return value def filename(string): """ Check if provided string is a valid file path. """ if not os.path.isfile(string): raise argparse.ArgumentTypeError( '%s is not a valid file path' % string) return string parser.add_argument('-p', '--port', default=PORT, type=network_port, help='TCP Port to listen on. Default: {0}'.format(PORT)) parser.add_argument('-b', '--bind-address', default=ADDRESS, help='Address to listen on. Default: {0}' .format(ADDRESS)) parser.add_argument('-u', '--unix-socket', help='Unix file socket to listen on.') parser.add_argument('-k', '--key-file', type=filename, help='Server key file. Default: {0}'.format(KEY_FILE)) parser.add_argument('-c', '--cert-file', type=filename, help='Server cert file. Default: {0}'.format(CERT_FILE)) parser.add_argument('--ca-file', type=cacert_file, help='CA cert file. Default: {0}'.format(CA_FILE)) parser.add_argument('-L', '--log-level', default='warning', type=log_level, help='Wished level of logging. Default: WARNING') parser.add_argument('--foreground', action='store_true', help='Run in foreground and logs all messages to console.') parser.add_argument('-l', '--log-file', type=filename, help='Path to the logging file.') parser.add_argument('--version', action='store_true', help='Print version then exit.') return parser
[ "def", "create_args_parser", "(", "description", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "description", ")", "def", "network_port", "(", "string", ")", ":", "\"\"\" Check if provided string is a valid network port. \"\"\"", "value", "=", "int", "(", "string", ")", "if", "not", "0", "<", "value", "<=", "65535", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "'port must be in ]0,65535] interval'", ")", "return", "value", "def", "cacert_file", "(", "cacert", ")", ":", "\"\"\" Check if provided file is a valid CA Certificate \"\"\"", "try", ":", "context", "=", "ssl", ".", "create_default_context", "(", "cafile", "=", "cacert", ")", "except", "AttributeError", ":", "# Python version < 2.7.9", "return", "cacert", "except", "IOError", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "'CA Certificate not found'", ")", "try", ":", "not_after", "=", "context", ".", "get_ca_certs", "(", ")", "[", "0", "]", "[", "'notAfter'", "]", "not_after", "=", "ssl", ".", "cert_time_to_seconds", "(", "not_after", ")", "not_before", "=", "context", ".", "get_ca_certs", "(", ")", "[", "0", "]", "[", "'notBefore'", "]", "not_before", "=", "ssl", ".", "cert_time_to_seconds", "(", "not_before", ")", "except", "(", "KeyError", ",", "IndexError", ")", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "'CA Certificate is erroneous'", ")", "if", "not_after", "<", "int", "(", "time", ".", "time", "(", ")", ")", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "'CA Certificate expired'", ")", "if", "not_before", ">", "int", "(", "time", ".", "time", "(", ")", ")", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "'CA Certificate not active yet'", ")", "return", "cacert", "def", "log_level", "(", "string", ")", ":", "\"\"\" Check if provided string is a valid log level. \"\"\"", "value", "=", "getattr", "(", "logging", ",", "string", ".", "upper", "(", ")", ",", "None", ")", "if", "not", "isinstance", "(", "value", ",", "int", ")", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "'log level must be one of {debug,info,warning,error,critical}'", ")", "return", "value", "def", "filename", "(", "string", ")", ":", "\"\"\" Check if provided string is a valid file path. \"\"\"", "if", "not", "os", ".", "path", ".", "isfile", "(", "string", ")", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "'%s is not a valid file path'", "%", "string", ")", "return", "string", "parser", ".", "add_argument", "(", "'-p'", ",", "'--port'", ",", "default", "=", "PORT", ",", "type", "=", "network_port", ",", "help", "=", "'TCP Port to listen on. Default: {0}'", ".", "format", "(", "PORT", ")", ")", "parser", ".", "add_argument", "(", "'-b'", ",", "'--bind-address'", ",", "default", "=", "ADDRESS", ",", "help", "=", "'Address to listen on. Default: {0}'", ".", "format", "(", "ADDRESS", ")", ")", "parser", ".", "add_argument", "(", "'-u'", ",", "'--unix-socket'", ",", "help", "=", "'Unix file socket to listen on.'", ")", "parser", ".", "add_argument", "(", "'-k'", ",", "'--key-file'", ",", "type", "=", "filename", ",", "help", "=", "'Server key file. Default: {0}'", ".", "format", "(", "KEY_FILE", ")", ")", "parser", ".", "add_argument", "(", "'-c'", ",", "'--cert-file'", ",", "type", "=", "filename", ",", "help", "=", "'Server cert file. Default: {0}'", ".", "format", "(", "CERT_FILE", ")", ")", "parser", ".", "add_argument", "(", "'--ca-file'", ",", "type", "=", "cacert_file", ",", "help", "=", "'CA cert file. Default: {0}'", ".", "format", "(", "CA_FILE", ")", ")", "parser", ".", "add_argument", "(", "'-L'", ",", "'--log-level'", ",", "default", "=", "'warning'", ",", "type", "=", "log_level", ",", "help", "=", "'Wished level of logging. Default: WARNING'", ")", "parser", ".", "add_argument", "(", "'--foreground'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Run in foreground and logs all messages to console.'", ")", "parser", ".", "add_argument", "(", "'-l'", ",", "'--log-file'", ",", "type", "=", "filename", ",", "help", "=", "'Path to the logging file.'", ")", "parser", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Print version then exit.'", ")", "return", "parser" ]
Create a command-line arguments parser for OSPD.
[ "Create", "a", "command", "-", "line", "arguments", "parser", "for", "OSPD", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L772-L846
train
16,901
greenbone/ospd
ospd/misc.py
go_to_background
def go_to_background(): """ Daemonize the running process. """ try: if os.fork(): sys.exit() except OSError as errmsg: LOGGER.error('Fork failed: {0}'.format(errmsg)) sys.exit('Fork failed')
python
def go_to_background(): """ Daemonize the running process. """ try: if os.fork(): sys.exit() except OSError as errmsg: LOGGER.error('Fork failed: {0}'.format(errmsg)) sys.exit('Fork failed')
[ "def", "go_to_background", "(", ")", ":", "try", ":", "if", "os", ".", "fork", "(", ")", ":", "sys", ".", "exit", "(", ")", "except", "OSError", "as", "errmsg", ":", "LOGGER", ".", "error", "(", "'Fork failed: {0}'", ".", "format", "(", "errmsg", ")", ")", "sys", ".", "exit", "(", "'Fork failed'", ")" ]
Daemonize the running process.
[ "Daemonize", "the", "running", "process", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L849-L856
train
16,902
greenbone/ospd
ospd/misc.py
get_common_args
def get_common_args(parser, args=None): """ Return list of OSPD common command-line arguments from parser, after validating provided values or setting default ones. """ options = parser.parse_args(args) # TCP Port to listen on. port = options.port # Network address to bind listener to address = options.bind_address # Unix file socket to listen on unix_socket = options.unix_socket # Debug level. log_level = options.log_level # Server key path. keyfile = options.key_file or KEY_FILE # Server cert path. certfile = options.cert_file or CERT_FILE # CA cert path. cafile = options.ca_file or CA_FILE common_args = dict() common_args['port'] = port common_args['address'] = address common_args['unix_socket'] = unix_socket common_args['keyfile'] = keyfile common_args['certfile'] = certfile common_args['cafile'] = cafile common_args['log_level'] = log_level common_args['foreground'] = options.foreground common_args['log_file'] = options.log_file common_args['version'] = options.version return common_args
python
def get_common_args(parser, args=None): """ Return list of OSPD common command-line arguments from parser, after validating provided values or setting default ones. """ options = parser.parse_args(args) # TCP Port to listen on. port = options.port # Network address to bind listener to address = options.bind_address # Unix file socket to listen on unix_socket = options.unix_socket # Debug level. log_level = options.log_level # Server key path. keyfile = options.key_file or KEY_FILE # Server cert path. certfile = options.cert_file or CERT_FILE # CA cert path. cafile = options.ca_file or CA_FILE common_args = dict() common_args['port'] = port common_args['address'] = address common_args['unix_socket'] = unix_socket common_args['keyfile'] = keyfile common_args['certfile'] = certfile common_args['cafile'] = cafile common_args['log_level'] = log_level common_args['foreground'] = options.foreground common_args['log_file'] = options.log_file common_args['version'] = options.version return common_args
[ "def", "get_common_args", "(", "parser", ",", "args", "=", "None", ")", ":", "options", "=", "parser", ".", "parse_args", "(", "args", ")", "# TCP Port to listen on.", "port", "=", "options", ".", "port", "# Network address to bind listener to", "address", "=", "options", ".", "bind_address", "# Unix file socket to listen on", "unix_socket", "=", "options", ".", "unix_socket", "# Debug level.", "log_level", "=", "options", ".", "log_level", "# Server key path.", "keyfile", "=", "options", ".", "key_file", "or", "KEY_FILE", "# Server cert path.", "certfile", "=", "options", ".", "cert_file", "or", "CERT_FILE", "# CA cert path.", "cafile", "=", "options", ".", "ca_file", "or", "CA_FILE", "common_args", "=", "dict", "(", ")", "common_args", "[", "'port'", "]", "=", "port", "common_args", "[", "'address'", "]", "=", "address", "common_args", "[", "'unix_socket'", "]", "=", "unix_socket", "common_args", "[", "'keyfile'", "]", "=", "keyfile", "common_args", "[", "'certfile'", "]", "=", "certfile", "common_args", "[", "'cafile'", "]", "=", "cafile", "common_args", "[", "'log_level'", "]", "=", "log_level", "common_args", "[", "'foreground'", "]", "=", "options", ".", "foreground", "common_args", "[", "'log_file'", "]", "=", "options", ".", "log_file", "common_args", "[", "'version'", "]", "=", "options", ".", "version", "return", "common_args" ]
Return list of OSPD common command-line arguments from parser, after validating provided values or setting default ones.
[ "Return", "list", "of", "OSPD", "common", "command", "-", "line", "arguments", "from", "parser", "after", "validating", "provided", "values", "or", "setting", "default", "ones", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L859-L899
train
16,903
greenbone/ospd
ospd/misc.py
print_version
def print_version(wrapper): """ Prints the server version and license information.""" scanner_name = wrapper.get_scanner_name() server_version = wrapper.get_server_version() print("OSP Server for {0} version {1}".format(scanner_name, server_version)) protocol_version = wrapper.get_protocol_version() print("OSP Version: {0}".format(protocol_version)) daemon_name = wrapper.get_daemon_name() daemon_version = wrapper.get_daemon_version() print("Using: {0} {1}".format(daemon_name, daemon_version)) print("Copyright (C) 2014, 2015 Greenbone Networks GmbH\n" "License GPLv2+: GNU GPL version 2 or later\n" "This is free software: you are free to change" " and redistribute it.\n" "There is NO WARRANTY, to the extent permitted by law.")
python
def print_version(wrapper): """ Prints the server version and license information.""" scanner_name = wrapper.get_scanner_name() server_version = wrapper.get_server_version() print("OSP Server for {0} version {1}".format(scanner_name, server_version)) protocol_version = wrapper.get_protocol_version() print("OSP Version: {0}".format(protocol_version)) daemon_name = wrapper.get_daemon_name() daemon_version = wrapper.get_daemon_version() print("Using: {0} {1}".format(daemon_name, daemon_version)) print("Copyright (C) 2014, 2015 Greenbone Networks GmbH\n" "License GPLv2+: GNU GPL version 2 or later\n" "This is free software: you are free to change" " and redistribute it.\n" "There is NO WARRANTY, to the extent permitted by law.")
[ "def", "print_version", "(", "wrapper", ")", ":", "scanner_name", "=", "wrapper", ".", "get_scanner_name", "(", ")", "server_version", "=", "wrapper", ".", "get_server_version", "(", ")", "print", "(", "\"OSP Server for {0} version {1}\"", ".", "format", "(", "scanner_name", ",", "server_version", ")", ")", "protocol_version", "=", "wrapper", ".", "get_protocol_version", "(", ")", "print", "(", "\"OSP Version: {0}\"", ".", "format", "(", "protocol_version", ")", ")", "daemon_name", "=", "wrapper", ".", "get_daemon_name", "(", ")", "daemon_version", "=", "wrapper", ".", "get_daemon_version", "(", ")", "print", "(", "\"Using: {0} {1}\"", ".", "format", "(", "daemon_name", ",", "daemon_version", ")", ")", "print", "(", "\"Copyright (C) 2014, 2015 Greenbone Networks GmbH\\n\"", "\"License GPLv2+: GNU GPL version 2 or later\\n\"", "\"This is free software: you are free to change\"", "\" and redistribute it.\\n\"", "\"There is NO WARRANTY, to the extent permitted by law.\"", ")" ]
Prints the server version and license information.
[ "Prints", "the", "server", "version", "and", "license", "information", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L902-L917
train
16,904
greenbone/ospd
ospd/misc.py
main
def main(name, klass): """ OSPD Main function. """ # Common args parser. parser = create_args_parser(name) # Common args cargs = get_common_args(parser) logging.getLogger().setLevel(cargs['log_level']) wrapper = klass(certfile=cargs['certfile'], keyfile=cargs['keyfile'], cafile=cargs['cafile']) if cargs['version']: print_version(wrapper) sys.exit() if cargs['foreground']: console = logging.StreamHandler() console.setFormatter( logging.Formatter( '%(asctime)s %(name)s: %(levelname)s: %(message)s')) logging.getLogger().addHandler(console) elif cargs['log_file']: logfile = logging.handlers.WatchedFileHandler(cargs['log_file']) logfile.setFormatter( logging.Formatter( '%(asctime)s %(name)s: %(levelname)s: %(message)s')) logging.getLogger().addHandler(logfile) go_to_background() else: syslog = logging.handlers.SysLogHandler('/dev/log') syslog.setFormatter( logging.Formatter('%(name)s: %(levelname)s: %(message)s')) logging.getLogger().addHandler(syslog) # Duplicate syslog's file descriptor to stout/stderr. syslog_fd = syslog.socket.fileno() os.dup2(syslog_fd, 1) os.dup2(syslog_fd, 2) go_to_background() if not wrapper.check(): return 1 return wrapper.run(cargs['address'], cargs['port'], cargs['unix_socket'])
python
def main(name, klass): """ OSPD Main function. """ # Common args parser. parser = create_args_parser(name) # Common args cargs = get_common_args(parser) logging.getLogger().setLevel(cargs['log_level']) wrapper = klass(certfile=cargs['certfile'], keyfile=cargs['keyfile'], cafile=cargs['cafile']) if cargs['version']: print_version(wrapper) sys.exit() if cargs['foreground']: console = logging.StreamHandler() console.setFormatter( logging.Formatter( '%(asctime)s %(name)s: %(levelname)s: %(message)s')) logging.getLogger().addHandler(console) elif cargs['log_file']: logfile = logging.handlers.WatchedFileHandler(cargs['log_file']) logfile.setFormatter( logging.Formatter( '%(asctime)s %(name)s: %(levelname)s: %(message)s')) logging.getLogger().addHandler(logfile) go_to_background() else: syslog = logging.handlers.SysLogHandler('/dev/log') syslog.setFormatter( logging.Formatter('%(name)s: %(levelname)s: %(message)s')) logging.getLogger().addHandler(syslog) # Duplicate syslog's file descriptor to stout/stderr. syslog_fd = syslog.socket.fileno() os.dup2(syslog_fd, 1) os.dup2(syslog_fd, 2) go_to_background() if not wrapper.check(): return 1 return wrapper.run(cargs['address'], cargs['port'], cargs['unix_socket'])
[ "def", "main", "(", "name", ",", "klass", ")", ":", "# Common args parser.", "parser", "=", "create_args_parser", "(", "name", ")", "# Common args", "cargs", "=", "get_common_args", "(", "parser", ")", "logging", ".", "getLogger", "(", ")", ".", "setLevel", "(", "cargs", "[", "'log_level'", "]", ")", "wrapper", "=", "klass", "(", "certfile", "=", "cargs", "[", "'certfile'", "]", ",", "keyfile", "=", "cargs", "[", "'keyfile'", "]", ",", "cafile", "=", "cargs", "[", "'cafile'", "]", ")", "if", "cargs", "[", "'version'", "]", ":", "print_version", "(", "wrapper", ")", "sys", ".", "exit", "(", ")", "if", "cargs", "[", "'foreground'", "]", ":", "console", "=", "logging", ".", "StreamHandler", "(", ")", "console", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "'%(asctime)s %(name)s: %(levelname)s: %(message)s'", ")", ")", "logging", ".", "getLogger", "(", ")", ".", "addHandler", "(", "console", ")", "elif", "cargs", "[", "'log_file'", "]", ":", "logfile", "=", "logging", ".", "handlers", ".", "WatchedFileHandler", "(", "cargs", "[", "'log_file'", "]", ")", "logfile", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "'%(asctime)s %(name)s: %(levelname)s: %(message)s'", ")", ")", "logging", ".", "getLogger", "(", ")", ".", "addHandler", "(", "logfile", ")", "go_to_background", "(", ")", "else", ":", "syslog", "=", "logging", ".", "handlers", ".", "SysLogHandler", "(", "'/dev/log'", ")", "syslog", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "'%(name)s: %(levelname)s: %(message)s'", ")", ")", "logging", ".", "getLogger", "(", ")", ".", "addHandler", "(", "syslog", ")", "# Duplicate syslog's file descriptor to stout/stderr.", "syslog_fd", "=", "syslog", ".", "socket", ".", "fileno", "(", ")", "os", ".", "dup2", "(", "syslog_fd", ",", "1", ")", "os", ".", "dup2", "(", "syslog_fd", ",", "2", ")", "go_to_background", "(", ")", "if", "not", "wrapper", ".", "check", "(", ")", ":", "return", "1", "return", "wrapper", ".", "run", "(", "cargs", "[", "'address'", "]", ",", "cargs", "[", "'port'", "]", ",", "cargs", "[", "'unix_socket'", "]", ")" ]
OSPD Main function.
[ "OSPD", "Main", "function", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L920-L962
train
16,905
greenbone/ospd
ospd/misc.py
ScanCollection.add_result
def add_result(self, scan_id, result_type, host='', name='', value='', port='', test_id='', severity='', qod=''): """ Add a result to a scan in the table. """ assert scan_id assert len(name) or len(value) result = dict() result['type'] = result_type result['name'] = name result['severity'] = severity result['test_id'] = test_id result['value'] = value result['host'] = host result['port'] = port result['qod'] = qod results = self.scans_table[scan_id]['results'] results.append(result) # Set scan_info's results to propagate results to parent process. self.scans_table[scan_id]['results'] = results
python
def add_result(self, scan_id, result_type, host='', name='', value='', port='', test_id='', severity='', qod=''): """ Add a result to a scan in the table. """ assert scan_id assert len(name) or len(value) result = dict() result['type'] = result_type result['name'] = name result['severity'] = severity result['test_id'] = test_id result['value'] = value result['host'] = host result['port'] = port result['qod'] = qod results = self.scans_table[scan_id]['results'] results.append(result) # Set scan_info's results to propagate results to parent process. self.scans_table[scan_id]['results'] = results
[ "def", "add_result", "(", "self", ",", "scan_id", ",", "result_type", ",", "host", "=", "''", ",", "name", "=", "''", ",", "value", "=", "''", ",", "port", "=", "''", ",", "test_id", "=", "''", ",", "severity", "=", "''", ",", "qod", "=", "''", ")", ":", "assert", "scan_id", "assert", "len", "(", "name", ")", "or", "len", "(", "value", ")", "result", "=", "dict", "(", ")", "result", "[", "'type'", "]", "=", "result_type", "result", "[", "'name'", "]", "=", "name", "result", "[", "'severity'", "]", "=", "severity", "result", "[", "'test_id'", "]", "=", "test_id", "result", "[", "'value'", "]", "=", "value", "result", "[", "'host'", "]", "=", "host", "result", "[", "'port'", "]", "=", "port", "result", "[", "'qod'", "]", "=", "qod", "results", "=", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'results'", "]", "results", ".", "append", "(", "result", ")", "# Set scan_info's results to propagate results to parent process.", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'results'", "]", "=", "results" ]
Add a result to a scan in the table.
[ "Add", "a", "result", "to", "a", "scan", "in", "the", "table", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L87-L105
train
16,906
greenbone/ospd
ospd/misc.py
ScanCollection.get_hosts_unfinished
def get_hosts_unfinished(self, scan_id): """ Get a list of finished hosts.""" unfinished_hosts = list() for target in self.scans_table[scan_id]['finished_hosts']: unfinished_hosts.extend(target_str_to_list(target)) for target in self.scans_table[scan_id]['finished_hosts']: for host in self.scans_table[scan_id]['finished_hosts'][target]: unfinished_hosts.remove(host) return unfinished_hosts
python
def get_hosts_unfinished(self, scan_id): """ Get a list of finished hosts.""" unfinished_hosts = list() for target in self.scans_table[scan_id]['finished_hosts']: unfinished_hosts.extend(target_str_to_list(target)) for target in self.scans_table[scan_id]['finished_hosts']: for host in self.scans_table[scan_id]['finished_hosts'][target]: unfinished_hosts.remove(host) return unfinished_hosts
[ "def", "get_hosts_unfinished", "(", "self", ",", "scan_id", ")", ":", "unfinished_hosts", "=", "list", "(", ")", "for", "target", "in", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'finished_hosts'", "]", ":", "unfinished_hosts", ".", "extend", "(", "target_str_to_list", "(", "target", ")", ")", "for", "target", "in", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'finished_hosts'", "]", ":", "for", "host", "in", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'finished_hosts'", "]", "[", "target", "]", ":", "unfinished_hosts", ".", "remove", "(", "host", ")", "return", "unfinished_hosts" ]
Get a list of finished hosts.
[ "Get", "a", "list", "of", "finished", "hosts", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L130-L140
train
16,907
greenbone/ospd
ospd/misc.py
ScanCollection.results_iterator
def results_iterator(self, scan_id, pop_res): """ Returns an iterator over scan_id scan's results. If pop_res is True, it removed the fetched results from the list. """ if pop_res: result_aux = self.scans_table[scan_id]['results'] self.scans_table[scan_id]['results'] = list() return iter(result_aux) return iter(self.scans_table[scan_id]['results'])
python
def results_iterator(self, scan_id, pop_res): """ Returns an iterator over scan_id scan's results. If pop_res is True, it removed the fetched results from the list. """ if pop_res: result_aux = self.scans_table[scan_id]['results'] self.scans_table[scan_id]['results'] = list() return iter(result_aux) return iter(self.scans_table[scan_id]['results'])
[ "def", "results_iterator", "(", "self", ",", "scan_id", ",", "pop_res", ")", ":", "if", "pop_res", ":", "result_aux", "=", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'results'", "]", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'results'", "]", "=", "list", "(", ")", "return", "iter", "(", "result_aux", ")", "return", "iter", "(", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'results'", "]", ")" ]
Returns an iterator over scan_id scan's results. If pop_res is True, it removed the fetched results from the list.
[ "Returns", "an", "iterator", "over", "scan_id", "scan", "s", "results", ".", "If", "pop_res", "is", "True", "it", "removed", "the", "fetched", "results", "from", "the", "list", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L142-L151
train
16,908
greenbone/ospd
ospd/misc.py
ScanCollection.del_results_for_stopped_hosts
def del_results_for_stopped_hosts(self, scan_id): """ Remove results from the result table for those host """ unfinished_hosts = self.get_hosts_unfinished(scan_id) for result in self.results_iterator(scan_id, False): if result['host'] in unfinished_hosts: self.remove_single_result(scan_id, result)
python
def del_results_for_stopped_hosts(self, scan_id): """ Remove results from the result table for those host """ unfinished_hosts = self.get_hosts_unfinished(scan_id) for result in self.results_iterator(scan_id, False): if result['host'] in unfinished_hosts: self.remove_single_result(scan_id, result)
[ "def", "del_results_for_stopped_hosts", "(", "self", ",", "scan_id", ")", ":", "unfinished_hosts", "=", "self", ".", "get_hosts_unfinished", "(", "scan_id", ")", "for", "result", "in", "self", ".", "results_iterator", "(", "scan_id", ",", "False", ")", ":", "if", "result", "[", "'host'", "]", "in", "unfinished_hosts", ":", "self", ".", "remove_single_result", "(", "scan_id", ",", "result", ")" ]
Remove results from the result table for those host
[ "Remove", "results", "from", "the", "result", "table", "for", "those", "host" ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L162-L168
train
16,909
greenbone/ospd
ospd/misc.py
ScanCollection.create_scan
def create_scan(self, scan_id='', targets='', options=None, vts=''): """ Creates a new scan with provided scan information. """ if self.data_manager is None: self.data_manager = multiprocessing.Manager() # Check if it is possible to resume task. To avoid to resume, the # scan must be deleted from the scans_table. if scan_id and self.id_exists(scan_id) and ( self.get_status(scan_id) == ScanStatus.STOPPED): return self.resume_scan(scan_id, options) if not options: options = dict() scan_info = self.data_manager.dict() scan_info['results'] = list() scan_info['finished_hosts'] = dict( [[target, []] for target, _, _ in targets]) scan_info['progress'] = 0 scan_info['target_progress'] = dict( [[target, {}] for target, _, _ in targets]) scan_info['targets'] = targets scan_info['vts'] = vts scan_info['options'] = options scan_info['start_time'] = int(time.time()) scan_info['end_time'] = "0" scan_info['status'] = ScanStatus.INIT if scan_id is None or scan_id == '': scan_id = str(uuid.uuid4()) scan_info['scan_id'] = scan_id self.scans_table[scan_id] = scan_info return scan_id
python
def create_scan(self, scan_id='', targets='', options=None, vts=''): """ Creates a new scan with provided scan information. """ if self.data_manager is None: self.data_manager = multiprocessing.Manager() # Check if it is possible to resume task. To avoid to resume, the # scan must be deleted from the scans_table. if scan_id and self.id_exists(scan_id) and ( self.get_status(scan_id) == ScanStatus.STOPPED): return self.resume_scan(scan_id, options) if not options: options = dict() scan_info = self.data_manager.dict() scan_info['results'] = list() scan_info['finished_hosts'] = dict( [[target, []] for target, _, _ in targets]) scan_info['progress'] = 0 scan_info['target_progress'] = dict( [[target, {}] for target, _, _ in targets]) scan_info['targets'] = targets scan_info['vts'] = vts scan_info['options'] = options scan_info['start_time'] = int(time.time()) scan_info['end_time'] = "0" scan_info['status'] = ScanStatus.INIT if scan_id is None or scan_id == '': scan_id = str(uuid.uuid4()) scan_info['scan_id'] = scan_id self.scans_table[scan_id] = scan_info return scan_id
[ "def", "create_scan", "(", "self", ",", "scan_id", "=", "''", ",", "targets", "=", "''", ",", "options", "=", "None", ",", "vts", "=", "''", ")", ":", "if", "self", ".", "data_manager", "is", "None", ":", "self", ".", "data_manager", "=", "multiprocessing", ".", "Manager", "(", ")", "# Check if it is possible to resume task. To avoid to resume, the", "# scan must be deleted from the scans_table.", "if", "scan_id", "and", "self", ".", "id_exists", "(", "scan_id", ")", "and", "(", "self", ".", "get_status", "(", "scan_id", ")", "==", "ScanStatus", ".", "STOPPED", ")", ":", "return", "self", ".", "resume_scan", "(", "scan_id", ",", "options", ")", "if", "not", "options", ":", "options", "=", "dict", "(", ")", "scan_info", "=", "self", ".", "data_manager", ".", "dict", "(", ")", "scan_info", "[", "'results'", "]", "=", "list", "(", ")", "scan_info", "[", "'finished_hosts'", "]", "=", "dict", "(", "[", "[", "target", ",", "[", "]", "]", "for", "target", ",", "_", ",", "_", "in", "targets", "]", ")", "scan_info", "[", "'progress'", "]", "=", "0", "scan_info", "[", "'target_progress'", "]", "=", "dict", "(", "[", "[", "target", ",", "{", "}", "]", "for", "target", ",", "_", ",", "_", "in", "targets", "]", ")", "scan_info", "[", "'targets'", "]", "=", "targets", "scan_info", "[", "'vts'", "]", "=", "vts", "scan_info", "[", "'options'", "]", "=", "options", "scan_info", "[", "'start_time'", "]", "=", "int", "(", "time", ".", "time", "(", ")", ")", "scan_info", "[", "'end_time'", "]", "=", "\"0\"", "scan_info", "[", "'status'", "]", "=", "ScanStatus", ".", "INIT", "if", "scan_id", "is", "None", "or", "scan_id", "==", "''", ":", "scan_id", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "scan_info", "[", "'scan_id'", "]", "=", "scan_id", "self", ".", "scans_table", "[", "scan_id", "]", "=", "scan_info", "return", "scan_id" ]
Creates a new scan with provided scan information.
[ "Creates", "a", "new", "scan", "with", "provided", "scan", "information", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L191-L222
train
16,910
greenbone/ospd
ospd/misc.py
ScanCollection.set_option
def set_option(self, scan_id, name, value): """ Set a scan_id scan's name option to value. """ self.scans_table[scan_id]['options'][name] = value
python
def set_option(self, scan_id, name, value): """ Set a scan_id scan's name option to value. """ self.scans_table[scan_id]['options'][name] = value
[ "def", "set_option", "(", "self", ",", "scan_id", ",", "name", ",", "value", ")", ":", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'options'", "]", "[", "name", "]", "=", "value" ]
Set a scan_id scan's name option to value.
[ "Set", "a", "scan_id", "scan", "s", "name", "option", "to", "value", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L238-L241
train
16,911
greenbone/ospd
ospd/misc.py
ScanCollection.get_target_progress
def get_target_progress(self, scan_id, target): """ Get a target's current progress value. The value is calculated with the progress of each single host in the target.""" total_hosts = len(target_str_to_list(target)) host_progresses = self.scans_table[scan_id]['target_progress'].get(target) try: t_prog = sum(host_progresses.values()) / total_hosts except ZeroDivisionError: LOGGER.error("Zero division error in ", get_target_progress.__name__) raise return t_prog
python
def get_target_progress(self, scan_id, target): """ Get a target's current progress value. The value is calculated with the progress of each single host in the target.""" total_hosts = len(target_str_to_list(target)) host_progresses = self.scans_table[scan_id]['target_progress'].get(target) try: t_prog = sum(host_progresses.values()) / total_hosts except ZeroDivisionError: LOGGER.error("Zero division error in ", get_target_progress.__name__) raise return t_prog
[ "def", "get_target_progress", "(", "self", ",", "scan_id", ",", "target", ")", ":", "total_hosts", "=", "len", "(", "target_str_to_list", "(", "target", ")", ")", "host_progresses", "=", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'target_progress'", "]", ".", "get", "(", "target", ")", "try", ":", "t_prog", "=", "sum", "(", "host_progresses", ".", "values", "(", ")", ")", "/", "total_hosts", "except", "ZeroDivisionError", ":", "LOGGER", ".", "error", "(", "\"Zero division error in \"", ",", "get_target_progress", ".", "__name__", ")", "raise", "return", "t_prog" ]
Get a target's current progress value. The value is calculated with the progress of each single host in the target.
[ "Get", "a", "target", "s", "current", "progress", "value", ".", "The", "value", "is", "calculated", "with", "the", "progress", "of", "each", "single", "host", "in", "the", "target", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L248-L260
train
16,912
greenbone/ospd
ospd/misc.py
ScanCollection.get_target_list
def get_target_list(self, scan_id): """ Get a scan's target list. """ target_list = [] for target, _, _ in self.scans_table[scan_id]['targets']: target_list.append(target) return target_list
python
def get_target_list(self, scan_id): """ Get a scan's target list. """ target_list = [] for target, _, _ in self.scans_table[scan_id]['targets']: target_list.append(target) return target_list
[ "def", "get_target_list", "(", "self", ",", "scan_id", ")", ":", "target_list", "=", "[", "]", "for", "target", ",", "_", ",", "_", "in", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'targets'", "]", ":", "target_list", ".", "append", "(", "target", ")", "return", "target_list" ]
Get a scan's target list.
[ "Get", "a", "scan", "s", "target", "list", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L272-L278
train
16,913
greenbone/ospd
ospd/misc.py
ScanCollection.get_ports
def get_ports(self, scan_id, target): """ Get a scan's ports list. If a target is specified it will return the corresponding port for it. If not, it returns the port item of the first nested list in the target's list. """ if target: for item in self.scans_table[scan_id]['targets']: if target == item[0]: return item[1] return self.scans_table[scan_id]['targets'][0][1]
python
def get_ports(self, scan_id, target): """ Get a scan's ports list. If a target is specified it will return the corresponding port for it. If not, it returns the port item of the first nested list in the target's list. """ if target: for item in self.scans_table[scan_id]['targets']: if target == item[0]: return item[1] return self.scans_table[scan_id]['targets'][0][1]
[ "def", "get_ports", "(", "self", ",", "scan_id", ",", "target", ")", ":", "if", "target", ":", "for", "item", "in", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'targets'", "]", ":", "if", "target", "==", "item", "[", "0", "]", ":", "return", "item", "[", "1", "]", "return", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'targets'", "]", "[", "0", "]", "[", "1", "]" ]
Get a scan's ports list. If a target is specified it will return the corresponding port for it. If not, it returns the port item of the first nested list in the target's list.
[ "Get", "a", "scan", "s", "ports", "list", ".", "If", "a", "target", "is", "specified", "it", "will", "return", "the", "corresponding", "port", "for", "it", ".", "If", "not", "it", "returns", "the", "port", "item", "of", "the", "first", "nested", "list", "in", "the", "target", "s", "list", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L280-L291
train
16,914
greenbone/ospd
ospd/misc.py
ScanCollection.get_credentials
def get_credentials(self, scan_id, target): """ Get a scan's credential list. It return dictionary with the corresponding credential for a given target. """ if target: for item in self.scans_table[scan_id]['targets']: if target == item[0]: return item[2]
python
def get_credentials(self, scan_id, target): """ Get a scan's credential list. It return dictionary with the corresponding credential for a given target. """ if target: for item in self.scans_table[scan_id]['targets']: if target == item[0]: return item[2]
[ "def", "get_credentials", "(", "self", ",", "scan_id", ",", "target", ")", ":", "if", "target", ":", "for", "item", "in", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'targets'", "]", ":", "if", "target", "==", "item", "[", "0", "]", ":", "return", "item", "[", "2", "]" ]
Get a scan's credential list. It return dictionary with the corresponding credential for a given target.
[ "Get", "a", "scan", "s", "credential", "list", ".", "It", "return", "dictionary", "with", "the", "corresponding", "credential", "for", "a", "given", "target", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L293-L300
train
16,915
greenbone/ospd
ospd/misc.py
ScanCollection.delete_scan
def delete_scan(self, scan_id): """ Delete a scan if fully finished. """ if self.get_status(scan_id) == ScanStatus.RUNNING: return False self.scans_table.pop(scan_id) if len(self.scans_table) == 0: del self.data_manager self.data_manager = None return True
python
def delete_scan(self, scan_id): """ Delete a scan if fully finished. """ if self.get_status(scan_id) == ScanStatus.RUNNING: return False self.scans_table.pop(scan_id) if len(self.scans_table) == 0: del self.data_manager self.data_manager = None return True
[ "def", "delete_scan", "(", "self", ",", "scan_id", ")", ":", "if", "self", ".", "get_status", "(", "scan_id", ")", "==", "ScanStatus", ".", "RUNNING", ":", "return", "False", "self", ".", "scans_table", ".", "pop", "(", "scan_id", ")", "if", "len", "(", "self", ".", "scans_table", ")", "==", "0", ":", "del", "self", ".", "data_manager", "self", ".", "data_manager", "=", "None", "return", "True" ]
Delete a scan if fully finished.
[ "Delete", "a", "scan", "if", "fully", "finished", "." ]
cef773166b15a19c17764721d3fe404fa0e107bf
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L312-L321
train
16,916
Grokzen/pykwalify
pykwalify/types.py
is_timestamp
def is_timestamp(obj): """ Yaml either have automatically converted it to a datetime object or it is a string that will be validated later. """ return isinstance(obj, datetime.datetime) or is_string(obj) or is_int(obj) or is_float(obj)
python
def is_timestamp(obj): """ Yaml either have automatically converted it to a datetime object or it is a string that will be validated later. """ return isinstance(obj, datetime.datetime) or is_string(obj) or is_int(obj) or is_float(obj)
[ "def", "is_timestamp", "(", "obj", ")", ":", "return", "isinstance", "(", "obj", ",", "datetime", ".", "datetime", ")", "or", "is_string", "(", "obj", ")", "or", "is_int", "(", "obj", ")", "or", "is_float", "(", "obj", ")" ]
Yaml either have automatically converted it to a datetime object or it is a string that will be validated later.
[ "Yaml", "either", "have", "automatically", "converted", "it", "to", "a", "datetime", "object", "or", "it", "is", "a", "string", "that", "will", "be", "validated", "later", "." ]
02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7
https://github.com/Grokzen/pykwalify/blob/02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7/pykwalify/types.py#L131-L136
train
16,917
Grokzen/pykwalify
pykwalify/__init__.py
init_logging
def init_logging(log_level): """ Init logging settings with default set to INFO """ log_level = log_level_to_string_map[min(log_level, 5)] msg = "%(levelname)s - %(name)s:%(lineno)s - %(message)s" if log_level in os.environ else "%(levelname)s - %(message)s" logging_conf = { "version": 1, "root": { "level": log_level, "handlers": ["console"] }, "handlers": { "console": { "class": "logging.StreamHandler", "level": log_level, "formatter": "simple", "stream": "ext://sys.stdout" } }, "formatters": { "simple": { "format": " {0}".format(msg) } } } logging.config.dictConfig(logging_conf)
python
def init_logging(log_level): """ Init logging settings with default set to INFO """ log_level = log_level_to_string_map[min(log_level, 5)] msg = "%(levelname)s - %(name)s:%(lineno)s - %(message)s" if log_level in os.environ else "%(levelname)s - %(message)s" logging_conf = { "version": 1, "root": { "level": log_level, "handlers": ["console"] }, "handlers": { "console": { "class": "logging.StreamHandler", "level": log_level, "formatter": "simple", "stream": "ext://sys.stdout" } }, "formatters": { "simple": { "format": " {0}".format(msg) } } } logging.config.dictConfig(logging_conf)
[ "def", "init_logging", "(", "log_level", ")", ":", "log_level", "=", "log_level_to_string_map", "[", "min", "(", "log_level", ",", "5", ")", "]", "msg", "=", "\"%(levelname)s - %(name)s:%(lineno)s - %(message)s\"", "if", "log_level", "in", "os", ".", "environ", "else", "\"%(levelname)s - %(message)s\"", "logging_conf", "=", "{", "\"version\"", ":", "1", ",", "\"root\"", ":", "{", "\"level\"", ":", "log_level", ",", "\"handlers\"", ":", "[", "\"console\"", "]", "}", ",", "\"handlers\"", ":", "{", "\"console\"", ":", "{", "\"class\"", ":", "\"logging.StreamHandler\"", ",", "\"level\"", ":", "log_level", ",", "\"formatter\"", ":", "\"simple\"", ",", "\"stream\"", ":", "\"ext://sys.stdout\"", "}", "}", ",", "\"formatters\"", ":", "{", "\"simple\"", ":", "{", "\"format\"", ":", "\" {0}\"", ".", "format", "(", "msg", ")", "}", "}", "}", "logging", ".", "config", ".", "dictConfig", "(", "logging_conf", ")" ]
Init logging settings with default set to INFO
[ "Init", "logging", "settings", "with", "default", "set", "to", "INFO" ]
02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7
https://github.com/Grokzen/pykwalify/blob/02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7/pykwalify/__init__.py#L25-L54
train
16,918
Grokzen/pykwalify
pykwalify/rule.py
Rule.keywords
def keywords(self): """ Returns a list of all keywords that this rule object has defined. A keyword is considered defined if the value it returns != None. """ defined_keywords = [ ('allowempty_map', 'allowempty_map'), ('assertion', 'assertion'), ('default', 'default'), ('class', 'class'), ('desc', 'desc'), ('enum', 'enum'), ('example', 'example'), ('extensions', 'extensions'), ('format', 'format'), ('func', 'func'), ('ident', 'ident'), ('include_name', 'include'), ('length', 'length'), ('map_regex_rule', 'map_regex_rule'), ('mapping', 'mapping'), ('matching', 'matching'), ('matching_rule', 'matching_rule'), ('name', 'name'), ('nullable', 'nullable') ('parent', 'parent'), ('pattern', 'pattern'), ('pattern_regexp', 'pattern_regexp'), ('range', 'range'), ('regex_mappings', 'regex_mappings'), ('required', 'required'), ('schema', 'schema'), ('schema_str', 'schema_str'), ('sequence', 'sequence'), ('type', 'type'), ('type_class', 'type_class'), ('unique', 'unique'), ('version', 'version'), ] found_keywords = [] for var_name, keyword_name in defined_keywords: if getattr(self, var_name, None): found_keywords.append(keyword_name) return found_keywords
python
def keywords(self): """ Returns a list of all keywords that this rule object has defined. A keyword is considered defined if the value it returns != None. """ defined_keywords = [ ('allowempty_map', 'allowempty_map'), ('assertion', 'assertion'), ('default', 'default'), ('class', 'class'), ('desc', 'desc'), ('enum', 'enum'), ('example', 'example'), ('extensions', 'extensions'), ('format', 'format'), ('func', 'func'), ('ident', 'ident'), ('include_name', 'include'), ('length', 'length'), ('map_regex_rule', 'map_regex_rule'), ('mapping', 'mapping'), ('matching', 'matching'), ('matching_rule', 'matching_rule'), ('name', 'name'), ('nullable', 'nullable') ('parent', 'parent'), ('pattern', 'pattern'), ('pattern_regexp', 'pattern_regexp'), ('range', 'range'), ('regex_mappings', 'regex_mappings'), ('required', 'required'), ('schema', 'schema'), ('schema_str', 'schema_str'), ('sequence', 'sequence'), ('type', 'type'), ('type_class', 'type_class'), ('unique', 'unique'), ('version', 'version'), ] found_keywords = [] for var_name, keyword_name in defined_keywords: if getattr(self, var_name, None): found_keywords.append(keyword_name) return found_keywords
[ "def", "keywords", "(", "self", ")", ":", "defined_keywords", "=", "[", "(", "'allowempty_map'", ",", "'allowempty_map'", ")", ",", "(", "'assertion'", ",", "'assertion'", ")", ",", "(", "'default'", ",", "'default'", ")", ",", "(", "'class'", ",", "'class'", ")", ",", "(", "'desc'", ",", "'desc'", ")", ",", "(", "'enum'", ",", "'enum'", ")", ",", "(", "'example'", ",", "'example'", ")", ",", "(", "'extensions'", ",", "'extensions'", ")", ",", "(", "'format'", ",", "'format'", ")", ",", "(", "'func'", ",", "'func'", ")", ",", "(", "'ident'", ",", "'ident'", ")", ",", "(", "'include_name'", ",", "'include'", ")", ",", "(", "'length'", ",", "'length'", ")", ",", "(", "'map_regex_rule'", ",", "'map_regex_rule'", ")", ",", "(", "'mapping'", ",", "'mapping'", ")", ",", "(", "'matching'", ",", "'matching'", ")", ",", "(", "'matching_rule'", ",", "'matching_rule'", ")", ",", "(", "'name'", ",", "'name'", ")", ",", "(", "'nullable'", ",", "'nullable'", ")", "(", "'parent'", ",", "'parent'", ")", ",", "(", "'pattern'", ",", "'pattern'", ")", ",", "(", "'pattern_regexp'", ",", "'pattern_regexp'", ")", ",", "(", "'range'", ",", "'range'", ")", ",", "(", "'regex_mappings'", ",", "'regex_mappings'", ")", ",", "(", "'required'", ",", "'required'", ")", ",", "(", "'schema'", ",", "'schema'", ")", ",", "(", "'schema_str'", ",", "'schema_str'", ")", ",", "(", "'sequence'", ",", "'sequence'", ")", ",", "(", "'type'", ",", "'type'", ")", ",", "(", "'type_class'", ",", "'type_class'", ")", ",", "(", "'unique'", ",", "'unique'", ")", ",", "(", "'version'", ",", "'version'", ")", ",", "]", "found_keywords", "=", "[", "]", "for", "var_name", ",", "keyword_name", "in", "defined_keywords", ":", "if", "getattr", "(", "self", ",", "var_name", ",", "None", ")", ":", "found_keywords", ".", "append", "(", "keyword_name", ")", "return", "found_keywords" ]
Returns a list of all keywords that this rule object has defined. A keyword is considered defined if the value it returns != None.
[ "Returns", "a", "list", "of", "all", "keywords", "that", "this", "rule", "object", "has", "defined", ".", "A", "keyword", "is", "considered", "defined", "if", "the", "value", "it", "returns", "!", "=", "None", "." ]
02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7
https://github.com/Grokzen/pykwalify/blob/02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7/pykwalify/rule.py#L319-L364
train
16,919
Grokzen/pykwalify
pykwalify/core.py
Core._load_extensions
def _load_extensions(self): """ Load all extension files into the namespace pykwalify.ext """ log.debug(u"loading all extensions : %s", self.extensions) self.loaded_extensions = [] for f in self.extensions: if not os.path.isabs(f): f = os.path.abspath(f) if not os.path.exists(f): raise CoreError(u"Extension file: {0} not found on disk".format(f)) self.loaded_extensions.append(imp.load_source("", f)) log.debug(self.loaded_extensions) log.debug([dir(m) for m in self.loaded_extensions])
python
def _load_extensions(self): """ Load all extension files into the namespace pykwalify.ext """ log.debug(u"loading all extensions : %s", self.extensions) self.loaded_extensions = [] for f in self.extensions: if not os.path.isabs(f): f = os.path.abspath(f) if not os.path.exists(f): raise CoreError(u"Extension file: {0} not found on disk".format(f)) self.loaded_extensions.append(imp.load_source("", f)) log.debug(self.loaded_extensions) log.debug([dir(m) for m in self.loaded_extensions])
[ "def", "_load_extensions", "(", "self", ")", ":", "log", ".", "debug", "(", "u\"loading all extensions : %s\"", ",", "self", ".", "extensions", ")", "self", ".", "loaded_extensions", "=", "[", "]", "for", "f", "in", "self", ".", "extensions", ":", "if", "not", "os", ".", "path", ".", "isabs", "(", "f", ")", ":", "f", "=", "os", ".", "path", ".", "abspath", "(", "f", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "f", ")", ":", "raise", "CoreError", "(", "u\"Extension file: {0} not found on disk\"", ".", "format", "(", "f", ")", ")", "self", ".", "loaded_extensions", ".", "append", "(", "imp", ".", "load_source", "(", "\"\"", ",", "f", ")", ")", "log", ".", "debug", "(", "self", ".", "loaded_extensions", ")", "log", ".", "debug", "(", "[", "dir", "(", "m", ")", "for", "m", "in", "self", ".", "loaded_extensions", "]", ")" ]
Load all extension files into the namespace pykwalify.ext
[ "Load", "all", "extension", "files", "into", "the", "namespace", "pykwalify", ".", "ext" ]
02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7
https://github.com/Grokzen/pykwalify/blob/02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7/pykwalify/core.py#L131-L149
train
16,920
Grokzen/pykwalify
pykwalify/core.py
Core._handle_func
def _handle_func(self, value, rule, path, done=None): """ Helper function that should check if func is specified for this rule and then handle it for all cases in a generic way. """ func = rule.func # func keyword is not defined so nothing to do if not func: return found_method = False for extension in self.loaded_extensions: method = getattr(extension, func, None) if method: found_method = True # No exception will should be caught. If one is raised it should bubble up all the way. ret = method(value, rule, path) if ret is not True and ret is not None: msg = '%s. Path: {path}' % unicode(ret) self.errors.append(SchemaError.SchemaErrorEntry( msg=msg, path=path, value=None)) # If False or None or some other object that is interpreted as False if not ret: raise CoreError(u"Error when running extension function : {0}".format(func)) # Only run the first matched function. Sinc loading order is determined # it should be easy to determine which file is used before others break if not found_method: raise CoreError(u"Did not find method '{0}' in any loaded extension file".format(func))
python
def _handle_func(self, value, rule, path, done=None): """ Helper function that should check if func is specified for this rule and then handle it for all cases in a generic way. """ func = rule.func # func keyword is not defined so nothing to do if not func: return found_method = False for extension in self.loaded_extensions: method = getattr(extension, func, None) if method: found_method = True # No exception will should be caught. If one is raised it should bubble up all the way. ret = method(value, rule, path) if ret is not True and ret is not None: msg = '%s. Path: {path}' % unicode(ret) self.errors.append(SchemaError.SchemaErrorEntry( msg=msg, path=path, value=None)) # If False or None or some other object that is interpreted as False if not ret: raise CoreError(u"Error when running extension function : {0}".format(func)) # Only run the first matched function. Sinc loading order is determined # it should be easy to determine which file is used before others break if not found_method: raise CoreError(u"Did not find method '{0}' in any loaded extension file".format(func))
[ "def", "_handle_func", "(", "self", ",", "value", ",", "rule", ",", "path", ",", "done", "=", "None", ")", ":", "func", "=", "rule", ".", "func", "# func keyword is not defined so nothing to do", "if", "not", "func", ":", "return", "found_method", "=", "False", "for", "extension", "in", "self", ".", "loaded_extensions", ":", "method", "=", "getattr", "(", "extension", ",", "func", ",", "None", ")", "if", "method", ":", "found_method", "=", "True", "# No exception will should be caught. If one is raised it should bubble up all the way.", "ret", "=", "method", "(", "value", ",", "rule", ",", "path", ")", "if", "ret", "is", "not", "True", "and", "ret", "is", "not", "None", ":", "msg", "=", "'%s. Path: {path}'", "%", "unicode", "(", "ret", ")", "self", ".", "errors", ".", "append", "(", "SchemaError", ".", "SchemaErrorEntry", "(", "msg", "=", "msg", ",", "path", "=", "path", ",", "value", "=", "None", ")", ")", "# If False or None or some other object that is interpreted as False", "if", "not", "ret", ":", "raise", "CoreError", "(", "u\"Error when running extension function : {0}\"", ".", "format", "(", "func", ")", ")", "# Only run the first matched function. Sinc loading order is determined", "# it should be easy to determine which file is used before others", "break", "if", "not", "found_method", ":", "raise", "CoreError", "(", "u\"Did not find method '{0}' in any loaded extension file\"", ".", "format", "(", "func", ")", ")" ]
Helper function that should check if func is specified for this rule and then handle it for all cases in a generic way.
[ "Helper", "function", "that", "should", "check", "if", "func", "is", "specified", "for", "this", "rule", "and", "then", "handle", "it", "for", "all", "cases", "in", "a", "generic", "way", "." ]
02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7
https://github.com/Grokzen/pykwalify/blob/02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7/pykwalify/core.py#L241-L277
train
16,921
Grokzen/pykwalify
pykwalify/core.py
Core._validate_range
def _validate_range(self, max_, min_, max_ex, min_ex, value, path, prefix): """ Validate that value is within range values. """ if not isinstance(value, int) and not isinstance(value, float): raise CoreError("Value must be a integer type") log.debug( u"Validate range : %s : %s : %s : %s : %s : %s", max_, min_, max_ex, min_ex, value, path, ) if max_ is not None and max_ < value: self.errors.append(SchemaError.SchemaErrorEntry( msg=u"Type '{prefix}' has size of '{value}', greater than max limit '{max_}'. Path: '{path}'", path=path, value=nativestr(value) if tt['str'](value) else value, prefix=prefix, max_=max_)) if min_ is not None and min_ > value: self.errors.append(SchemaError.SchemaErrorEntry( msg=u"Type '{prefix}' has size of '{value}', less than min limit '{min_}'. Path: '{path}'", path=path, value=nativestr(value) if tt['str'](value) else value, prefix=prefix, min_=min_)) if max_ex is not None and max_ex <= value: self.errors.append(SchemaError.SchemaErrorEntry( msg=u"Type '{prefix}' has size of '{value}', greater than or equals to max limit(exclusive) '{max_ex}'. Path: '{path}'", path=path, value=nativestr(value) if tt['str'](value) else value, prefix=prefix, max_ex=max_ex)) if min_ex is not None and min_ex >= value: self.errors.append(SchemaError.SchemaErrorEntry( msg=u"Type '{prefix}' has size of '{value}', less than or equals to min limit(exclusive) '{min_ex}'. Path: '{path}'", path=path, value=nativestr(value) if tt['str'](value) else value, prefix=prefix, min_ex=min_ex))
python
def _validate_range(self, max_, min_, max_ex, min_ex, value, path, prefix): """ Validate that value is within range values. """ if not isinstance(value, int) and not isinstance(value, float): raise CoreError("Value must be a integer type") log.debug( u"Validate range : %s : %s : %s : %s : %s : %s", max_, min_, max_ex, min_ex, value, path, ) if max_ is not None and max_ < value: self.errors.append(SchemaError.SchemaErrorEntry( msg=u"Type '{prefix}' has size of '{value}', greater than max limit '{max_}'. Path: '{path}'", path=path, value=nativestr(value) if tt['str'](value) else value, prefix=prefix, max_=max_)) if min_ is not None and min_ > value: self.errors.append(SchemaError.SchemaErrorEntry( msg=u"Type '{prefix}' has size of '{value}', less than min limit '{min_}'. Path: '{path}'", path=path, value=nativestr(value) if tt['str'](value) else value, prefix=prefix, min_=min_)) if max_ex is not None and max_ex <= value: self.errors.append(SchemaError.SchemaErrorEntry( msg=u"Type '{prefix}' has size of '{value}', greater than or equals to max limit(exclusive) '{max_ex}'. Path: '{path}'", path=path, value=nativestr(value) if tt['str'](value) else value, prefix=prefix, max_ex=max_ex)) if min_ex is not None and min_ex >= value: self.errors.append(SchemaError.SchemaErrorEntry( msg=u"Type '{prefix}' has size of '{value}', less than or equals to min limit(exclusive) '{min_ex}'. Path: '{path}'", path=path, value=nativestr(value) if tt['str'](value) else value, prefix=prefix, min_ex=min_ex))
[ "def", "_validate_range", "(", "self", ",", "max_", ",", "min_", ",", "max_ex", ",", "min_ex", ",", "value", ",", "path", ",", "prefix", ")", ":", "if", "not", "isinstance", "(", "value", ",", "int", ")", "and", "not", "isinstance", "(", "value", ",", "float", ")", ":", "raise", "CoreError", "(", "\"Value must be a integer type\"", ")", "log", ".", "debug", "(", "u\"Validate range : %s : %s : %s : %s : %s : %s\"", ",", "max_", ",", "min_", ",", "max_ex", ",", "min_ex", ",", "value", ",", "path", ",", ")", "if", "max_", "is", "not", "None", "and", "max_", "<", "value", ":", "self", ".", "errors", ".", "append", "(", "SchemaError", ".", "SchemaErrorEntry", "(", "msg", "=", "u\"Type '{prefix}' has size of '{value}', greater than max limit '{max_}'. Path: '{path}'\"", ",", "path", "=", "path", ",", "value", "=", "nativestr", "(", "value", ")", "if", "tt", "[", "'str'", "]", "(", "value", ")", "else", "value", ",", "prefix", "=", "prefix", ",", "max_", "=", "max_", ")", ")", "if", "min_", "is", "not", "None", "and", "min_", ">", "value", ":", "self", ".", "errors", ".", "append", "(", "SchemaError", ".", "SchemaErrorEntry", "(", "msg", "=", "u\"Type '{prefix}' has size of '{value}', less than min limit '{min_}'. Path: '{path}'\"", ",", "path", "=", "path", ",", "value", "=", "nativestr", "(", "value", ")", "if", "tt", "[", "'str'", "]", "(", "value", ")", "else", "value", ",", "prefix", "=", "prefix", ",", "min_", "=", "min_", ")", ")", "if", "max_ex", "is", "not", "None", "and", "max_ex", "<=", "value", ":", "self", ".", "errors", ".", "append", "(", "SchemaError", ".", "SchemaErrorEntry", "(", "msg", "=", "u\"Type '{prefix}' has size of '{value}', greater than or equals to max limit(exclusive) '{max_ex}'. Path: '{path}'\"", ",", "path", "=", "path", ",", "value", "=", "nativestr", "(", "value", ")", "if", "tt", "[", "'str'", "]", "(", "value", ")", "else", "value", ",", "prefix", "=", "prefix", ",", "max_ex", "=", "max_ex", ")", ")", "if", "min_ex", "is", "not", "None", "and", "min_ex", ">=", "value", ":", "self", ".", "errors", ".", "append", "(", "SchemaError", ".", "SchemaErrorEntry", "(", "msg", "=", "u\"Type '{prefix}' has size of '{value}', less than or equals to min limit(exclusive) '{min_ex}'. Path: '{path}'\"", ",", "path", "=", "path", ",", "value", "=", "nativestr", "(", "value", ")", "if", "tt", "[", "'str'", "]", "(", "value", ")", "else", "value", ",", "prefix", "=", "prefix", ",", "min_ex", "=", "min_ex", ")", ")" ]
Validate that value is within range values.
[ "Validate", "that", "value", "is", "within", "range", "values", "." ]
02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7
https://github.com/Grokzen/pykwalify/blob/02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7/pykwalify/core.py#L919-L966
train
16,922
Grokzen/pykwalify
pykwalify/cli.py
run
def run(cli_args): """ Split the functionality into 2 methods. One for parsing the cli and one that runs the application. """ from .core import Core c = Core( source_file=cli_args["--data-file"], schema_files=cli_args["--schema-file"], extensions=cli_args['--extension'], strict_rule_validation=cli_args['--strict-rule-validation'], fix_ruby_style_regex=cli_args['--fix-ruby-style-regex'], allow_assertions=cli_args['--allow-assertions'], file_encoding=cli_args['--encoding'], ) c.validate() return c
python
def run(cli_args): """ Split the functionality into 2 methods. One for parsing the cli and one that runs the application. """ from .core import Core c = Core( source_file=cli_args["--data-file"], schema_files=cli_args["--schema-file"], extensions=cli_args['--extension'], strict_rule_validation=cli_args['--strict-rule-validation'], fix_ruby_style_regex=cli_args['--fix-ruby-style-regex'], allow_assertions=cli_args['--allow-assertions'], file_encoding=cli_args['--encoding'], ) c.validate() return c
[ "def", "run", "(", "cli_args", ")", ":", "from", ".", "core", "import", "Core", "c", "=", "Core", "(", "source_file", "=", "cli_args", "[", "\"--data-file\"", "]", ",", "schema_files", "=", "cli_args", "[", "\"--schema-file\"", "]", ",", "extensions", "=", "cli_args", "[", "'--extension'", "]", ",", "strict_rule_validation", "=", "cli_args", "[", "'--strict-rule-validation'", "]", ",", "fix_ruby_style_regex", "=", "cli_args", "[", "'--fix-ruby-style-regex'", "]", ",", "allow_assertions", "=", "cli_args", "[", "'--allow-assertions'", "]", ",", "file_encoding", "=", "cli_args", "[", "'--encoding'", "]", ",", ")", "c", ".", "validate", "(", ")", "return", "c" ]
Split the functionality into 2 methods. One for parsing the cli and one that runs the application.
[ "Split", "the", "functionality", "into", "2", "methods", "." ]
02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7
https://github.com/Grokzen/pykwalify/blob/02b7e21eafb97926f17b7c33e2ee7b3ea67c3ef7/pykwalify/cli.py#L68-L86
train
16,923
daler/gffutils
gffutils/pybedtools_integration.py
to_bedtool
def to_bedtool(iterator): """ Convert any iterator into a pybedtools.BedTool object. Note that the supplied iterator is not consumed by this function. To save to a temp file or to a known location, use the `.saveas()` method of the returned BedTool object. """ def gen(): for i in iterator: yield helpers.asinterval(i) return pybedtools.BedTool(gen())
python
def to_bedtool(iterator): """ Convert any iterator into a pybedtools.BedTool object. Note that the supplied iterator is not consumed by this function. To save to a temp file or to a known location, use the `.saveas()` method of the returned BedTool object. """ def gen(): for i in iterator: yield helpers.asinterval(i) return pybedtools.BedTool(gen())
[ "def", "to_bedtool", "(", "iterator", ")", ":", "def", "gen", "(", ")", ":", "for", "i", "in", "iterator", ":", "yield", "helpers", ".", "asinterval", "(", "i", ")", "return", "pybedtools", ".", "BedTool", "(", "gen", "(", ")", ")" ]
Convert any iterator into a pybedtools.BedTool object. Note that the supplied iterator is not consumed by this function. To save to a temp file or to a known location, use the `.saveas()` method of the returned BedTool object.
[ "Convert", "any", "iterator", "into", "a", "pybedtools", ".", "BedTool", "object", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/pybedtools_integration.py#L12-L23
train
16,924
daler/gffutils
gffutils/pybedtools_integration.py
tsses
def tsses(db, merge_overlapping=False, attrs=None, attrs_sep=":", merge_kwargs=None, as_bed6=False, bedtools_227_or_later=True): """ Create 1-bp transcription start sites for all transcripts in the database and return as a sorted pybedtools.BedTool object pointing to a temporary file. To save the file to a known location, use the `.moveto()` method on the resulting `pybedtools.BedTool` object. To extend regions upstream/downstream, see the `.slop()` method on the resulting `pybedtools.BedTool object`. Requires pybedtools. Parameters ---------- db : gffutils.FeatureDB The database to use as_bed6 : bool If True, output file is in BED6 format; otherwise it remains in the GFF/GTF format and dialect of the file used to create the database. Note that the merge options below necessarily force `as_bed6=True`. merge_overlapping : bool If True, output will be in BED format. Overlapping TSSes will be merged into a single feature, and their names will be collapsed using `merge_sep` and placed in the new name field. merge_kwargs : dict If `merge_overlapping=True`, these keyword arguments are passed to pybedtools.BedTool.merge(), which are in turn sent to `bedtools merge`. The merge operates on a BED6 file which will have had the name field constructed as specified by other arguments here. See the available options for your installed version of BEDTools; the defaults used here are `merge_kwargs=dict(o='distinct', c=4, s=True)`. Any provided `merge_kwargs` are used to *update* the default. It is recommended to not override `c=4` and `s=True`, otherwise the post-merge fixing may not work correctly. Good candidates for tweaking are `d` (merge distance), `o` (operation), `delim` (delimiter to use for collapse operations). attrs : str or list Only has an effect when `as_bed6=True` or `merge_overlapping=True`. Determines what goes in the name field of an output BED file. By default, "gene_id" for GTF databases and "ID" for GFF. If a list of attributes is supplied, e.g. ["gene_id", "transcript_id"], then these will be joined by `attr_join_sep` and then placed in the name field. attrs_sep: str If `as_bed6=True` or `merge_overlapping=True`, then use this character to separate attributes in the name field of the output BED. If also using `merge_overlapping=True`, you'll probably want this to be different than `merge_sep` in order to parse things out later. bedtools_227_or_later : bool In version 2.27, BEDTools changed the output for merge. By default, this function expects BEDTools version 2.27 or later, but set this to False to assume the older behavior. For testing purposes, the environment variable GFFUTILS_USES_BEDTOOLS_227_OR_LATER is set to either "true" or "false" and is used to override this argument. Examples -------- >>> import gffutils >>> db = gffutils.create_db( ... gffutils.example_filename('FBgn0031208.gtf'), ... ":memory:", ... keep_order=True, ... verbose=False) Default settings -- no merging, and report a separate TSS on each line even if they overlap (as in the first two): >>> print(tsses(db)) # doctest: +NORMALIZE_WHITESPACE chr2L gffutils_derived transcript_TSS 7529 7529 . + . gene_id "FBgn0031208"; transcript_id "FBtr0300689"; chr2L gffutils_derived transcript_TSS 7529 7529 . + . gene_id "FBgn0031208"; transcript_id "FBtr0300690"; chr2L gffutils_derived transcript_TSS 11000 11000 . - . gene_id "Fk_gene_1"; transcript_id "transcript_Fk_gene_1"; chr2L gffutils_derived transcript_TSS 12500 12500 . - . gene_id "Fk_gene_2"; transcript_id "transcript_Fk_gene_2"; <BLANKLINE> Default merging, showing the first two TSSes merged and reported as a single unique TSS for the gene. Note the conversion to BED: >>> x = tsses(db, merge_overlapping=True) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208 . + chr2L 10999 11000 Fk_gene_1 . - chr2L 12499 12500 Fk_gene_2 . - <BLANKLINE> Report both gene ID and transcript ID in the name. In some cases this can be easier to parse than the original GTF or GFF file. With no merging specified, we must add `as_bed6=True` to see the names in BED format. >>> x = tsses(db, attrs=['gene_id', 'transcript_id'], as_bed6=True) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208:FBtr0300689 . + chr2L 7528 7529 FBgn0031208:FBtr0300690 . + chr2L 10999 11000 Fk_gene_1:transcript_Fk_gene_1 . - chr2L 12499 12500 Fk_gene_2:transcript_Fk_gene_2 . - <BLANKLINE> Use a 3kb merge distance so the last 2 features are merged together: >>> x = tsses(db, merge_overlapping=True, merge_kwargs=dict(d=3000)) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208 . + chr2L 10999 12500 Fk_gene_1,Fk_gene_2 . - <BLANKLINE> The set of unique TSSes for each gene, +1kb upstream and 500bp downstream: >>> x = tsses(db, merge_overlapping=True) >>> x = x.slop(l=1000, r=500, s=True, genome='dm3') >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 6528 8029 FBgn0031208 . + chr2L 10499 12000 Fk_gene_1 . - chr2L 11999 13500 Fk_gene_2 . - <BLANKLINE> """ _override = os.environ.get('GFFUTILS_USES_BEDTOOLS_227_OR_LATER', None) if _override is not None: if _override == 'true': bedtools_227_or_later = True elif _override == 'false': bedtools_227_or_later = False else: raise ValueError( "Unknown value for GFFUTILS_USES_BEDTOOLS_227_OR_LATER " "environment variable: {0}".format(_override)) if bedtools_227_or_later: _merge_kwargs = dict(o='distinct', s=True, c='4,5,6') else: _merge_kwargs = dict(o='distinct', s=True, c='4') if merge_kwargs is not None: _merge_kwargs.update(merge_kwargs) def gen(): """ Generator of pybedtools.Intervals representing TSSes. """ for gene in db.features_of_type('gene'): for transcript in db.children(gene, level=1): if transcript.strand == '-': transcript.start = transcript.stop else: transcript.stop = transcript.start transcript.featuretype = transcript.featuretype + '_TSS' yield helpers.asinterval(transcript) # GFF/GTF format x = pybedtools.BedTool(gen()).sort() # Figure out default attrs to use, depending on the original format. if attrs is None: if db.dialect['fmt'] == 'gtf': attrs = 'gene_id' else: attrs = 'ID' if merge_overlapping or as_bed6: if isinstance(attrs, six.string_types): attrs = [attrs] def to_bed(f): """ Given a pybedtools.Interval, return a new Interval with the name set according to the kwargs provided above. """ name = attrs_sep.join([f.attrs[i] for i in attrs]) return pybedtools.Interval( f.chrom, f.start, f.stop, name, str(f.score), f.strand) x = x.each(to_bed).saveas() if merge_overlapping: if bedtools_227_or_later: x = x.merge(**_merge_kwargs) else: def fix_merge(f): f = featurefuncs.extend_fields(f, 6) return pybedtools.Interval( f.chrom, f.start, f.stop, f[4], '.', f[3]) x = x.merge(**_merge_kwargs).saveas().each(fix_merge).saveas() return x
python
def tsses(db, merge_overlapping=False, attrs=None, attrs_sep=":", merge_kwargs=None, as_bed6=False, bedtools_227_or_later=True): """ Create 1-bp transcription start sites for all transcripts in the database and return as a sorted pybedtools.BedTool object pointing to a temporary file. To save the file to a known location, use the `.moveto()` method on the resulting `pybedtools.BedTool` object. To extend regions upstream/downstream, see the `.slop()` method on the resulting `pybedtools.BedTool object`. Requires pybedtools. Parameters ---------- db : gffutils.FeatureDB The database to use as_bed6 : bool If True, output file is in BED6 format; otherwise it remains in the GFF/GTF format and dialect of the file used to create the database. Note that the merge options below necessarily force `as_bed6=True`. merge_overlapping : bool If True, output will be in BED format. Overlapping TSSes will be merged into a single feature, and their names will be collapsed using `merge_sep` and placed in the new name field. merge_kwargs : dict If `merge_overlapping=True`, these keyword arguments are passed to pybedtools.BedTool.merge(), which are in turn sent to `bedtools merge`. The merge operates on a BED6 file which will have had the name field constructed as specified by other arguments here. See the available options for your installed version of BEDTools; the defaults used here are `merge_kwargs=dict(o='distinct', c=4, s=True)`. Any provided `merge_kwargs` are used to *update* the default. It is recommended to not override `c=4` and `s=True`, otherwise the post-merge fixing may not work correctly. Good candidates for tweaking are `d` (merge distance), `o` (operation), `delim` (delimiter to use for collapse operations). attrs : str or list Only has an effect when `as_bed6=True` or `merge_overlapping=True`. Determines what goes in the name field of an output BED file. By default, "gene_id" for GTF databases and "ID" for GFF. If a list of attributes is supplied, e.g. ["gene_id", "transcript_id"], then these will be joined by `attr_join_sep` and then placed in the name field. attrs_sep: str If `as_bed6=True` or `merge_overlapping=True`, then use this character to separate attributes in the name field of the output BED. If also using `merge_overlapping=True`, you'll probably want this to be different than `merge_sep` in order to parse things out later. bedtools_227_or_later : bool In version 2.27, BEDTools changed the output for merge. By default, this function expects BEDTools version 2.27 or later, but set this to False to assume the older behavior. For testing purposes, the environment variable GFFUTILS_USES_BEDTOOLS_227_OR_LATER is set to either "true" or "false" and is used to override this argument. Examples -------- >>> import gffutils >>> db = gffutils.create_db( ... gffutils.example_filename('FBgn0031208.gtf'), ... ":memory:", ... keep_order=True, ... verbose=False) Default settings -- no merging, and report a separate TSS on each line even if they overlap (as in the first two): >>> print(tsses(db)) # doctest: +NORMALIZE_WHITESPACE chr2L gffutils_derived transcript_TSS 7529 7529 . + . gene_id "FBgn0031208"; transcript_id "FBtr0300689"; chr2L gffutils_derived transcript_TSS 7529 7529 . + . gene_id "FBgn0031208"; transcript_id "FBtr0300690"; chr2L gffutils_derived transcript_TSS 11000 11000 . - . gene_id "Fk_gene_1"; transcript_id "transcript_Fk_gene_1"; chr2L gffutils_derived transcript_TSS 12500 12500 . - . gene_id "Fk_gene_2"; transcript_id "transcript_Fk_gene_2"; <BLANKLINE> Default merging, showing the first two TSSes merged and reported as a single unique TSS for the gene. Note the conversion to BED: >>> x = tsses(db, merge_overlapping=True) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208 . + chr2L 10999 11000 Fk_gene_1 . - chr2L 12499 12500 Fk_gene_2 . - <BLANKLINE> Report both gene ID and transcript ID in the name. In some cases this can be easier to parse than the original GTF or GFF file. With no merging specified, we must add `as_bed6=True` to see the names in BED format. >>> x = tsses(db, attrs=['gene_id', 'transcript_id'], as_bed6=True) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208:FBtr0300689 . + chr2L 7528 7529 FBgn0031208:FBtr0300690 . + chr2L 10999 11000 Fk_gene_1:transcript_Fk_gene_1 . - chr2L 12499 12500 Fk_gene_2:transcript_Fk_gene_2 . - <BLANKLINE> Use a 3kb merge distance so the last 2 features are merged together: >>> x = tsses(db, merge_overlapping=True, merge_kwargs=dict(d=3000)) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208 . + chr2L 10999 12500 Fk_gene_1,Fk_gene_2 . - <BLANKLINE> The set of unique TSSes for each gene, +1kb upstream and 500bp downstream: >>> x = tsses(db, merge_overlapping=True) >>> x = x.slop(l=1000, r=500, s=True, genome='dm3') >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 6528 8029 FBgn0031208 . + chr2L 10499 12000 Fk_gene_1 . - chr2L 11999 13500 Fk_gene_2 . - <BLANKLINE> """ _override = os.environ.get('GFFUTILS_USES_BEDTOOLS_227_OR_LATER', None) if _override is not None: if _override == 'true': bedtools_227_or_later = True elif _override == 'false': bedtools_227_or_later = False else: raise ValueError( "Unknown value for GFFUTILS_USES_BEDTOOLS_227_OR_LATER " "environment variable: {0}".format(_override)) if bedtools_227_or_later: _merge_kwargs = dict(o='distinct', s=True, c='4,5,6') else: _merge_kwargs = dict(o='distinct', s=True, c='4') if merge_kwargs is not None: _merge_kwargs.update(merge_kwargs) def gen(): """ Generator of pybedtools.Intervals representing TSSes. """ for gene in db.features_of_type('gene'): for transcript in db.children(gene, level=1): if transcript.strand == '-': transcript.start = transcript.stop else: transcript.stop = transcript.start transcript.featuretype = transcript.featuretype + '_TSS' yield helpers.asinterval(transcript) # GFF/GTF format x = pybedtools.BedTool(gen()).sort() # Figure out default attrs to use, depending on the original format. if attrs is None: if db.dialect['fmt'] == 'gtf': attrs = 'gene_id' else: attrs = 'ID' if merge_overlapping or as_bed6: if isinstance(attrs, six.string_types): attrs = [attrs] def to_bed(f): """ Given a pybedtools.Interval, return a new Interval with the name set according to the kwargs provided above. """ name = attrs_sep.join([f.attrs[i] for i in attrs]) return pybedtools.Interval( f.chrom, f.start, f.stop, name, str(f.score), f.strand) x = x.each(to_bed).saveas() if merge_overlapping: if bedtools_227_or_later: x = x.merge(**_merge_kwargs) else: def fix_merge(f): f = featurefuncs.extend_fields(f, 6) return pybedtools.Interval( f.chrom, f.start, f.stop, f[4], '.', f[3]) x = x.merge(**_merge_kwargs).saveas().each(fix_merge).saveas() return x
[ "def", "tsses", "(", "db", ",", "merge_overlapping", "=", "False", ",", "attrs", "=", "None", ",", "attrs_sep", "=", "\":\"", ",", "merge_kwargs", "=", "None", ",", "as_bed6", "=", "False", ",", "bedtools_227_or_later", "=", "True", ")", ":", "_override", "=", "os", ".", "environ", ".", "get", "(", "'GFFUTILS_USES_BEDTOOLS_227_OR_LATER'", ",", "None", ")", "if", "_override", "is", "not", "None", ":", "if", "_override", "==", "'true'", ":", "bedtools_227_or_later", "=", "True", "elif", "_override", "==", "'false'", ":", "bedtools_227_or_later", "=", "False", "else", ":", "raise", "ValueError", "(", "\"Unknown value for GFFUTILS_USES_BEDTOOLS_227_OR_LATER \"", "\"environment variable: {0}\"", ".", "format", "(", "_override", ")", ")", "if", "bedtools_227_or_later", ":", "_merge_kwargs", "=", "dict", "(", "o", "=", "'distinct'", ",", "s", "=", "True", ",", "c", "=", "'4,5,6'", ")", "else", ":", "_merge_kwargs", "=", "dict", "(", "o", "=", "'distinct'", ",", "s", "=", "True", ",", "c", "=", "'4'", ")", "if", "merge_kwargs", "is", "not", "None", ":", "_merge_kwargs", ".", "update", "(", "merge_kwargs", ")", "def", "gen", "(", ")", ":", "\"\"\"\n Generator of pybedtools.Intervals representing TSSes.\n \"\"\"", "for", "gene", "in", "db", ".", "features_of_type", "(", "'gene'", ")", ":", "for", "transcript", "in", "db", ".", "children", "(", "gene", ",", "level", "=", "1", ")", ":", "if", "transcript", ".", "strand", "==", "'-'", ":", "transcript", ".", "start", "=", "transcript", ".", "stop", "else", ":", "transcript", ".", "stop", "=", "transcript", ".", "start", "transcript", ".", "featuretype", "=", "transcript", ".", "featuretype", "+", "'_TSS'", "yield", "helpers", ".", "asinterval", "(", "transcript", ")", "# GFF/GTF format", "x", "=", "pybedtools", ".", "BedTool", "(", "gen", "(", ")", ")", ".", "sort", "(", ")", "# Figure out default attrs to use, depending on the original format.", "if", "attrs", "is", "None", ":", "if", "db", ".", "dialect", "[", "'fmt'", "]", "==", "'gtf'", ":", "attrs", "=", "'gene_id'", "else", ":", "attrs", "=", "'ID'", "if", "merge_overlapping", "or", "as_bed6", ":", "if", "isinstance", "(", "attrs", ",", "six", ".", "string_types", ")", ":", "attrs", "=", "[", "attrs", "]", "def", "to_bed", "(", "f", ")", ":", "\"\"\"\n Given a pybedtools.Interval, return a new Interval with the name\n set according to the kwargs provided above.\n \"\"\"", "name", "=", "attrs_sep", ".", "join", "(", "[", "f", ".", "attrs", "[", "i", "]", "for", "i", "in", "attrs", "]", ")", "return", "pybedtools", ".", "Interval", "(", "f", ".", "chrom", ",", "f", ".", "start", ",", "f", ".", "stop", ",", "name", ",", "str", "(", "f", ".", "score", ")", ",", "f", ".", "strand", ")", "x", "=", "x", ".", "each", "(", "to_bed", ")", ".", "saveas", "(", ")", "if", "merge_overlapping", ":", "if", "bedtools_227_or_later", ":", "x", "=", "x", ".", "merge", "(", "*", "*", "_merge_kwargs", ")", "else", ":", "def", "fix_merge", "(", "f", ")", ":", "f", "=", "featurefuncs", ".", "extend_fields", "(", "f", ",", "6", ")", "return", "pybedtools", ".", "Interval", "(", "f", ".", "chrom", ",", "f", ".", "start", ",", "f", ".", "stop", ",", "f", "[", "4", "]", ",", "'.'", ",", "f", "[", "3", "]", ")", "x", "=", "x", ".", "merge", "(", "*", "*", "_merge_kwargs", ")", ".", "saveas", "(", ")", ".", "each", "(", "fix_merge", ")", ".", "saveas", "(", ")", "return", "x" ]
Create 1-bp transcription start sites for all transcripts in the database and return as a sorted pybedtools.BedTool object pointing to a temporary file. To save the file to a known location, use the `.moveto()` method on the resulting `pybedtools.BedTool` object. To extend regions upstream/downstream, see the `.slop()` method on the resulting `pybedtools.BedTool object`. Requires pybedtools. Parameters ---------- db : gffutils.FeatureDB The database to use as_bed6 : bool If True, output file is in BED6 format; otherwise it remains in the GFF/GTF format and dialect of the file used to create the database. Note that the merge options below necessarily force `as_bed6=True`. merge_overlapping : bool If True, output will be in BED format. Overlapping TSSes will be merged into a single feature, and their names will be collapsed using `merge_sep` and placed in the new name field. merge_kwargs : dict If `merge_overlapping=True`, these keyword arguments are passed to pybedtools.BedTool.merge(), which are in turn sent to `bedtools merge`. The merge operates on a BED6 file which will have had the name field constructed as specified by other arguments here. See the available options for your installed version of BEDTools; the defaults used here are `merge_kwargs=dict(o='distinct', c=4, s=True)`. Any provided `merge_kwargs` are used to *update* the default. It is recommended to not override `c=4` and `s=True`, otherwise the post-merge fixing may not work correctly. Good candidates for tweaking are `d` (merge distance), `o` (operation), `delim` (delimiter to use for collapse operations). attrs : str or list Only has an effect when `as_bed6=True` or `merge_overlapping=True`. Determines what goes in the name field of an output BED file. By default, "gene_id" for GTF databases and "ID" for GFF. If a list of attributes is supplied, e.g. ["gene_id", "transcript_id"], then these will be joined by `attr_join_sep` and then placed in the name field. attrs_sep: str If `as_bed6=True` or `merge_overlapping=True`, then use this character to separate attributes in the name field of the output BED. If also using `merge_overlapping=True`, you'll probably want this to be different than `merge_sep` in order to parse things out later. bedtools_227_or_later : bool In version 2.27, BEDTools changed the output for merge. By default, this function expects BEDTools version 2.27 or later, but set this to False to assume the older behavior. For testing purposes, the environment variable GFFUTILS_USES_BEDTOOLS_227_OR_LATER is set to either "true" or "false" and is used to override this argument. Examples -------- >>> import gffutils >>> db = gffutils.create_db( ... gffutils.example_filename('FBgn0031208.gtf'), ... ":memory:", ... keep_order=True, ... verbose=False) Default settings -- no merging, and report a separate TSS on each line even if they overlap (as in the first two): >>> print(tsses(db)) # doctest: +NORMALIZE_WHITESPACE chr2L gffutils_derived transcript_TSS 7529 7529 . + . gene_id "FBgn0031208"; transcript_id "FBtr0300689"; chr2L gffutils_derived transcript_TSS 7529 7529 . + . gene_id "FBgn0031208"; transcript_id "FBtr0300690"; chr2L gffutils_derived transcript_TSS 11000 11000 . - . gene_id "Fk_gene_1"; transcript_id "transcript_Fk_gene_1"; chr2L gffutils_derived transcript_TSS 12500 12500 . - . gene_id "Fk_gene_2"; transcript_id "transcript_Fk_gene_2"; <BLANKLINE> Default merging, showing the first two TSSes merged and reported as a single unique TSS for the gene. Note the conversion to BED: >>> x = tsses(db, merge_overlapping=True) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208 . + chr2L 10999 11000 Fk_gene_1 . - chr2L 12499 12500 Fk_gene_2 . - <BLANKLINE> Report both gene ID and transcript ID in the name. In some cases this can be easier to parse than the original GTF or GFF file. With no merging specified, we must add `as_bed6=True` to see the names in BED format. >>> x = tsses(db, attrs=['gene_id', 'transcript_id'], as_bed6=True) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208:FBtr0300689 . + chr2L 7528 7529 FBgn0031208:FBtr0300690 . + chr2L 10999 11000 Fk_gene_1:transcript_Fk_gene_1 . - chr2L 12499 12500 Fk_gene_2:transcript_Fk_gene_2 . - <BLANKLINE> Use a 3kb merge distance so the last 2 features are merged together: >>> x = tsses(db, merge_overlapping=True, merge_kwargs=dict(d=3000)) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208 . + chr2L 10999 12500 Fk_gene_1,Fk_gene_2 . - <BLANKLINE> The set of unique TSSes for each gene, +1kb upstream and 500bp downstream: >>> x = tsses(db, merge_overlapping=True) >>> x = x.slop(l=1000, r=500, s=True, genome='dm3') >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 6528 8029 FBgn0031208 . + chr2L 10499 12000 Fk_gene_1 . - chr2L 11999 13500 Fk_gene_2 . - <BLANKLINE>
[ "Create", "1", "-", "bp", "transcription", "start", "sites", "for", "all", "transcripts", "in", "the", "database", "and", "return", "as", "a", "sorted", "pybedtools", ".", "BedTool", "object", "pointing", "to", "a", "temporary", "file", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/pybedtools_integration.py#L26-L237
train
16,925
daler/gffutils
gffutils/gffwriter.py
GFFWriter.close
def close(self): """ Close the stream. Assumes stream has 'close' method. """ self.out_stream.close() # If we're asked to write in place, substitute the named # temporary file for the current file if self.in_place: shutil.move(self.temp_file.name, self.out)
python
def close(self): """ Close the stream. Assumes stream has 'close' method. """ self.out_stream.close() # If we're asked to write in place, substitute the named # temporary file for the current file if self.in_place: shutil.move(self.temp_file.name, self.out)
[ "def", "close", "(", "self", ")", ":", "self", ".", "out_stream", ".", "close", "(", ")", "# If we're asked to write in place, substitute the named", "# temporary file for the current file", "if", "self", ".", "in_place", ":", "shutil", ".", "move", "(", "self", ".", "temp_file", ".", "name", ",", "self", ".", "out", ")" ]
Close the stream. Assumes stream has 'close' method.
[ "Close", "the", "stream", ".", "Assumes", "stream", "has", "close", "method", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/gffwriter.py#L162-L170
train
16,926
daler/gffutils
gffutils/biopython_integration.py
to_seqfeature
def to_seqfeature(feature): """ Converts a gffutils.Feature object to a Bio.SeqFeature object. The GFF fields `source`, `score`, `seqid`, and `frame` are stored as qualifiers. GFF `attributes` are also stored as qualifiers. Parameters ---------- feature : Feature object, or string If string, assume it is a GFF or GTF-format line; otherwise just use the provided feature directly. """ if isinstance(feature, six.string_types): feature = feature_from_line(feature) qualifiers = { 'source': [feature.source], 'score': [feature.score], 'seqid': [feature.seqid], 'frame': [feature.frame], } qualifiers.update(feature.attributes) return SeqFeature( # Convert from GFF 1-based to standard Python 0-based indexing used by # BioPython FeatureLocation(feature.start - 1, feature.stop), id=feature.id, type=feature.featuretype, strand=_biopython_strand[feature.strand], qualifiers=qualifiers )
python
def to_seqfeature(feature): """ Converts a gffutils.Feature object to a Bio.SeqFeature object. The GFF fields `source`, `score`, `seqid`, and `frame` are stored as qualifiers. GFF `attributes` are also stored as qualifiers. Parameters ---------- feature : Feature object, or string If string, assume it is a GFF or GTF-format line; otherwise just use the provided feature directly. """ if isinstance(feature, six.string_types): feature = feature_from_line(feature) qualifiers = { 'source': [feature.source], 'score': [feature.score], 'seqid': [feature.seqid], 'frame': [feature.frame], } qualifiers.update(feature.attributes) return SeqFeature( # Convert from GFF 1-based to standard Python 0-based indexing used by # BioPython FeatureLocation(feature.start - 1, feature.stop), id=feature.id, type=feature.featuretype, strand=_biopython_strand[feature.strand], qualifiers=qualifiers )
[ "def", "to_seqfeature", "(", "feature", ")", ":", "if", "isinstance", "(", "feature", ",", "six", ".", "string_types", ")", ":", "feature", "=", "feature_from_line", "(", "feature", ")", "qualifiers", "=", "{", "'source'", ":", "[", "feature", ".", "source", "]", ",", "'score'", ":", "[", "feature", ".", "score", "]", ",", "'seqid'", ":", "[", "feature", ".", "seqid", "]", ",", "'frame'", ":", "[", "feature", ".", "frame", "]", ",", "}", "qualifiers", ".", "update", "(", "feature", ".", "attributes", ")", "return", "SeqFeature", "(", "# Convert from GFF 1-based to standard Python 0-based indexing used by", "# BioPython", "FeatureLocation", "(", "feature", ".", "start", "-", "1", ",", "feature", ".", "stop", ")", ",", "id", "=", "feature", ".", "id", ",", "type", "=", "feature", ".", "featuretype", ",", "strand", "=", "_biopython_strand", "[", "feature", ".", "strand", "]", ",", "qualifiers", "=", "qualifiers", ")" ]
Converts a gffutils.Feature object to a Bio.SeqFeature object. The GFF fields `source`, `score`, `seqid`, and `frame` are stored as qualifiers. GFF `attributes` are also stored as qualifiers. Parameters ---------- feature : Feature object, or string If string, assume it is a GFF or GTF-format line; otherwise just use the provided feature directly.
[ "Converts", "a", "gffutils", ".", "Feature", "object", "to", "a", "Bio", ".", "SeqFeature", "object", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/biopython_integration.py#L21-L52
train
16,927
daler/gffutils
gffutils/biopython_integration.py
from_seqfeature
def from_seqfeature(s, **kwargs): """ Converts a Bio.SeqFeature object to a gffutils.Feature object. The GFF fields `source`, `score`, `seqid`, and `frame` are assumed to be stored as qualifiers. Any other qualifiers will be assumed to be GFF attributes. """ source = s.qualifiers.get('source', '.')[0] score = s.qualifiers.get('score', '.')[0] seqid = s.qualifiers.get('seqid', '.')[0] frame = s.qualifiers.get('frame', '.')[0] strand = _feature_strand[s.strand] # BioPython parses 1-based GenBank positions into 0-based for use within # Python. We need to convert back to 1-based GFF format here. start = s.location.start.position + 1 stop = s.location.end.position featuretype = s.type id = s.id attributes = dict(s.qualifiers) attributes.pop('source', '.') attributes.pop('score', '.') attributes.pop('seqid', '.') attributes.pop('frame', '.') return Feature(seqid, source, featuretype, start, stop, score, strand, frame, attributes, id=id, **kwargs)
python
def from_seqfeature(s, **kwargs): """ Converts a Bio.SeqFeature object to a gffutils.Feature object. The GFF fields `source`, `score`, `seqid`, and `frame` are assumed to be stored as qualifiers. Any other qualifiers will be assumed to be GFF attributes. """ source = s.qualifiers.get('source', '.')[0] score = s.qualifiers.get('score', '.')[0] seqid = s.qualifiers.get('seqid', '.')[0] frame = s.qualifiers.get('frame', '.')[0] strand = _feature_strand[s.strand] # BioPython parses 1-based GenBank positions into 0-based for use within # Python. We need to convert back to 1-based GFF format here. start = s.location.start.position + 1 stop = s.location.end.position featuretype = s.type id = s.id attributes = dict(s.qualifiers) attributes.pop('source', '.') attributes.pop('score', '.') attributes.pop('seqid', '.') attributes.pop('frame', '.') return Feature(seqid, source, featuretype, start, stop, score, strand, frame, attributes, id=id, **kwargs)
[ "def", "from_seqfeature", "(", "s", ",", "*", "*", "kwargs", ")", ":", "source", "=", "s", ".", "qualifiers", ".", "get", "(", "'source'", ",", "'.'", ")", "[", "0", "]", "score", "=", "s", ".", "qualifiers", ".", "get", "(", "'score'", ",", "'.'", ")", "[", "0", "]", "seqid", "=", "s", ".", "qualifiers", ".", "get", "(", "'seqid'", ",", "'.'", ")", "[", "0", "]", "frame", "=", "s", ".", "qualifiers", ".", "get", "(", "'frame'", ",", "'.'", ")", "[", "0", "]", "strand", "=", "_feature_strand", "[", "s", ".", "strand", "]", "# BioPython parses 1-based GenBank positions into 0-based for use within", "# Python. We need to convert back to 1-based GFF format here.", "start", "=", "s", ".", "location", ".", "start", ".", "position", "+", "1", "stop", "=", "s", ".", "location", ".", "end", ".", "position", "featuretype", "=", "s", ".", "type", "id", "=", "s", ".", "id", "attributes", "=", "dict", "(", "s", ".", "qualifiers", ")", "attributes", ".", "pop", "(", "'source'", ",", "'.'", ")", "attributes", ".", "pop", "(", "'score'", ",", "'.'", ")", "attributes", ".", "pop", "(", "'seqid'", ",", "'.'", ")", "attributes", ".", "pop", "(", "'frame'", ",", "'.'", ")", "return", "Feature", "(", "seqid", ",", "source", ",", "featuretype", ",", "start", ",", "stop", ",", "score", ",", "strand", ",", "frame", ",", "attributes", ",", "id", "=", "id", ",", "*", "*", "kwargs", ")" ]
Converts a Bio.SeqFeature object to a gffutils.Feature object. The GFF fields `source`, `score`, `seqid`, and `frame` are assumed to be stored as qualifiers. Any other qualifiers will be assumed to be GFF attributes.
[ "Converts", "a", "Bio", ".", "SeqFeature", "object", "to", "a", "gffutils", ".", "Feature", "object", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/biopython_integration.py#L55-L81
train
16,928
daler/gffutils
gffutils/interface.py
FeatureDB.set_pragmas
def set_pragmas(self, pragmas): """ Set pragmas for the current database connection. Parameters ---------- pragmas : dict Dictionary of pragmas; see constants.default_pragmas for a template and http://www.sqlite.org/pragma.html for a full list. """ self.pragmas = pragmas c = self.conn.cursor() c.executescript( ';\n'.join( ['PRAGMA %s=%s' % i for i in self.pragmas.items()] ) ) self.conn.commit()
python
def set_pragmas(self, pragmas): """ Set pragmas for the current database connection. Parameters ---------- pragmas : dict Dictionary of pragmas; see constants.default_pragmas for a template and http://www.sqlite.org/pragma.html for a full list. """ self.pragmas = pragmas c = self.conn.cursor() c.executescript( ';\n'.join( ['PRAGMA %s=%s' % i for i in self.pragmas.items()] ) ) self.conn.commit()
[ "def", "set_pragmas", "(", "self", ",", "pragmas", ")", ":", "self", ".", "pragmas", "=", "pragmas", "c", "=", "self", ".", "conn", ".", "cursor", "(", ")", "c", ".", "executescript", "(", "';\\n'", ".", "join", "(", "[", "'PRAGMA %s=%s'", "%", "i", "for", "i", "in", "self", ".", "pragmas", ".", "items", "(", ")", "]", ")", ")", "self", ".", "conn", ".", "commit", "(", ")" ]
Set pragmas for the current database connection. Parameters ---------- pragmas : dict Dictionary of pragmas; see constants.default_pragmas for a template and http://www.sqlite.org/pragma.html for a full list.
[ "Set", "pragmas", "for", "the", "current", "database", "connection", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/interface.py#L163-L180
train
16,929
daler/gffutils
gffutils/interface.py
FeatureDB._feature_returner
def _feature_returner(self, **kwargs): """ Returns a feature, adding additional database-specific defaults """ kwargs.setdefault('dialect', self.dialect) kwargs.setdefault('keep_order', self.keep_order) kwargs.setdefault('sort_attribute_values', self.sort_attribute_values) return Feature(**kwargs)
python
def _feature_returner(self, **kwargs): """ Returns a feature, adding additional database-specific defaults """ kwargs.setdefault('dialect', self.dialect) kwargs.setdefault('keep_order', self.keep_order) kwargs.setdefault('sort_attribute_values', self.sort_attribute_values) return Feature(**kwargs)
[ "def", "_feature_returner", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'dialect'", ",", "self", ".", "dialect", ")", "kwargs", ".", "setdefault", "(", "'keep_order'", ",", "self", ".", "keep_order", ")", "kwargs", ".", "setdefault", "(", "'sort_attribute_values'", ",", "self", ".", "sort_attribute_values", ")", "return", "Feature", "(", "*", "*", "kwargs", ")" ]
Returns a feature, adding additional database-specific defaults
[ "Returns", "a", "feature", "adding", "additional", "database", "-", "specific", "defaults" ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/interface.py#L182-L189
train
16,930
daler/gffutils
gffutils/interface.py
FeatureDB.schema
def schema(self): """ Returns the database schema as a string. """ c = self.conn.cursor() c.execute( ''' SELECT sql FROM sqlite_master ''') results = [] for i, in c: if i is not None: results.append(i) return '\n'.join(results)
python
def schema(self): """ Returns the database schema as a string. """ c = self.conn.cursor() c.execute( ''' SELECT sql FROM sqlite_master ''') results = [] for i, in c: if i is not None: results.append(i) return '\n'.join(results)
[ "def", "schema", "(", "self", ")", ":", "c", "=", "self", ".", "conn", ".", "cursor", "(", ")", "c", ".", "execute", "(", "'''\n SELECT sql FROM sqlite_master\n '''", ")", "results", "=", "[", "]", "for", "i", ",", "in", "c", ":", "if", "i", "is", "not", "None", ":", "results", ".", "append", "(", "i", ")", "return", "'\\n'", ".", "join", "(", "results", ")" ]
Returns the database schema as a string.
[ "Returns", "the", "database", "schema", "as", "a", "string", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/interface.py#L199-L212
train
16,931
daler/gffutils
gffutils/interface.py
FeatureDB.featuretypes
def featuretypes(self): """ Iterate over feature types found in the database. Returns ------- A generator object that yields featuretypes (as strings) """ c = self.conn.cursor() c.execute( ''' SELECT DISTINCT featuretype from features ''') for i, in c: yield i
python
def featuretypes(self): """ Iterate over feature types found in the database. Returns ------- A generator object that yields featuretypes (as strings) """ c = self.conn.cursor() c.execute( ''' SELECT DISTINCT featuretype from features ''') for i, in c: yield i
[ "def", "featuretypes", "(", "self", ")", ":", "c", "=", "self", ".", "conn", ".", "cursor", "(", ")", "c", ".", "execute", "(", "'''\n SELECT DISTINCT featuretype from features\n '''", ")", "for", "i", ",", "in", "c", ":", "yield", "i" ]
Iterate over feature types found in the database. Returns ------- A generator object that yields featuretypes (as strings)
[ "Iterate", "over", "feature", "types", "found", "in", "the", "database", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/interface.py#L339-L353
train
16,932
daler/gffutils
gffutils/interface.py
FeatureDB.execute
def execute(self, query): """ Execute arbitrary queries on the db. .. seealso:: :class:`FeatureDB.schema` may be helpful when writing your own queries. Parameters ---------- query : str Query to execute -- trailing ";" optional. Returns ------- A sqlite3.Cursor object that can be iterated over. """ c = self.conn.cursor() return c.execute(query)
python
def execute(self, query): """ Execute arbitrary queries on the db. .. seealso:: :class:`FeatureDB.schema` may be helpful when writing your own queries. Parameters ---------- query : str Query to execute -- trailing ";" optional. Returns ------- A sqlite3.Cursor object that can be iterated over. """ c = self.conn.cursor() return c.execute(query)
[ "def", "execute", "(", "self", ",", "query", ")", ":", "c", "=", "self", ".", "conn", ".", "cursor", "(", ")", "return", "c", ".", "execute", "(", "query", ")" ]
Execute arbitrary queries on the db. .. seealso:: :class:`FeatureDB.schema` may be helpful when writing your own queries. Parameters ---------- query : str Query to execute -- trailing ";" optional. Returns ------- A sqlite3.Cursor object that can be iterated over.
[ "Execute", "arbitrary", "queries", "on", "the", "db", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/interface.py#L440-L461
train
16,933
daler/gffutils
gffutils/interface.py
FeatureDB.interfeatures
def interfeatures(self, features, new_featuretype=None, merge_attributes=True, dialect=None, attribute_func=None, update_attributes=None): """ Construct new features representing the space between features. For example, if `features` is a list of exons, then this method will return the introns. If `features` is a list of genes, then this method will return the intergenic regions. Providing N features will return N - 1 new features. This method purposefully does *not* do any merging or sorting of coordinates, so you may want to use :meth:`FeatureDB.merge` first, or when selecting features use the `order_by` kwarg, e.g., `db.features_of_type('gene', order_by=('seqid', 'start'))`. Parameters ---------- features : iterable of :class:`feature.Feature` instances Sorted, merged iterable new_featuretype : string or None The new features will all be of this type, or, if None (default) then the featuretypes will be constructed from the neighboring features, e.g., `inter_exon_exon`. merge_attributes : bool If True, new features' attributes will be a merge of the neighboring features' attributes. This is useful if you have provided a list of exons; the introns will then retain the transcript and/or gene parents as a single item. Otherwise, if False, the attribute will be a comma-separated list of values, potentially listing the same gene ID twice. attribute_func : callable or None If None, then nothing special is done to the attributes. If callable, then the callable accepts two attribute dictionaries and returns a single attribute dictionary. If `merge_attributes` is True, then `attribute_func` is called before `merge_attributes`. This could be useful for manually managing IDs for the new features. update_attributes : dict After attributes have been modified and merged, this dictionary can be used to replace parts of the attributes dictionary. Returns ------- A generator that yields :class:`Feature` objects """ for i, f in enumerate(features): # no inter-feature for the first one if i == 0: interfeature_start = f.stop last_feature = f continue interfeature_stop = f.start if new_featuretype is None: new_featuretype = 'inter_%s_%s' % ( last_feature.featuretype, f.featuretype) if last_feature.strand != f.strand: new_strand = '.' else: new_strand = f.strand if last_feature.chrom != f.chrom: # We've moved to a new chromosome. For example, if we're # getting intergenic regions from all genes, they will be on # different chromosomes. We still assume sorted features, but # don't complain if they're on different chromosomes -- just # move on. last_feature = f continue strand = new_strand chrom = last_feature.chrom # Shrink interfeature_start += 1 interfeature_stop -= 1 if merge_attributes: new_attributes = helpers.merge_attributes( last_feature.attributes, f.attributes) else: new_attributes = {} if update_attributes: new_attributes.update(update_attributes) new_bin = bins.bins( interfeature_start, interfeature_stop, one=True) _id = None fields = dict( seqid=chrom, source='gffutils_derived', featuretype=new_featuretype, start=interfeature_start, end=interfeature_stop, score='.', strand=strand, frame='.', attributes=new_attributes, bin=new_bin) if dialect is None: # Support for @classmethod -- if calling from the class, then # self.dialect is not defined, so defer to Feature's default # (which will be constants.dialect, or GFF3). try: dialect = self.dialect except AttributeError: dialect = None yield self._feature_returner(**fields) interfeature_start = f.stop
python
def interfeatures(self, features, new_featuretype=None, merge_attributes=True, dialect=None, attribute_func=None, update_attributes=None): """ Construct new features representing the space between features. For example, if `features` is a list of exons, then this method will return the introns. If `features` is a list of genes, then this method will return the intergenic regions. Providing N features will return N - 1 new features. This method purposefully does *not* do any merging or sorting of coordinates, so you may want to use :meth:`FeatureDB.merge` first, or when selecting features use the `order_by` kwarg, e.g., `db.features_of_type('gene', order_by=('seqid', 'start'))`. Parameters ---------- features : iterable of :class:`feature.Feature` instances Sorted, merged iterable new_featuretype : string or None The new features will all be of this type, or, if None (default) then the featuretypes will be constructed from the neighboring features, e.g., `inter_exon_exon`. merge_attributes : bool If True, new features' attributes will be a merge of the neighboring features' attributes. This is useful if you have provided a list of exons; the introns will then retain the transcript and/or gene parents as a single item. Otherwise, if False, the attribute will be a comma-separated list of values, potentially listing the same gene ID twice. attribute_func : callable or None If None, then nothing special is done to the attributes. If callable, then the callable accepts two attribute dictionaries and returns a single attribute dictionary. If `merge_attributes` is True, then `attribute_func` is called before `merge_attributes`. This could be useful for manually managing IDs for the new features. update_attributes : dict After attributes have been modified and merged, this dictionary can be used to replace parts of the attributes dictionary. Returns ------- A generator that yields :class:`Feature` objects """ for i, f in enumerate(features): # no inter-feature for the first one if i == 0: interfeature_start = f.stop last_feature = f continue interfeature_stop = f.start if new_featuretype is None: new_featuretype = 'inter_%s_%s' % ( last_feature.featuretype, f.featuretype) if last_feature.strand != f.strand: new_strand = '.' else: new_strand = f.strand if last_feature.chrom != f.chrom: # We've moved to a new chromosome. For example, if we're # getting intergenic regions from all genes, they will be on # different chromosomes. We still assume sorted features, but # don't complain if they're on different chromosomes -- just # move on. last_feature = f continue strand = new_strand chrom = last_feature.chrom # Shrink interfeature_start += 1 interfeature_stop -= 1 if merge_attributes: new_attributes = helpers.merge_attributes( last_feature.attributes, f.attributes) else: new_attributes = {} if update_attributes: new_attributes.update(update_attributes) new_bin = bins.bins( interfeature_start, interfeature_stop, one=True) _id = None fields = dict( seqid=chrom, source='gffutils_derived', featuretype=new_featuretype, start=interfeature_start, end=interfeature_stop, score='.', strand=strand, frame='.', attributes=new_attributes, bin=new_bin) if dialect is None: # Support for @classmethod -- if calling from the class, then # self.dialect is not defined, so defer to Feature's default # (which will be constants.dialect, or GFF3). try: dialect = self.dialect except AttributeError: dialect = None yield self._feature_returner(**fields) interfeature_start = f.stop
[ "def", "interfeatures", "(", "self", ",", "features", ",", "new_featuretype", "=", "None", ",", "merge_attributes", "=", "True", ",", "dialect", "=", "None", ",", "attribute_func", "=", "None", ",", "update_attributes", "=", "None", ")", ":", "for", "i", ",", "f", "in", "enumerate", "(", "features", ")", ":", "# no inter-feature for the first one", "if", "i", "==", "0", ":", "interfeature_start", "=", "f", ".", "stop", "last_feature", "=", "f", "continue", "interfeature_stop", "=", "f", ".", "start", "if", "new_featuretype", "is", "None", ":", "new_featuretype", "=", "'inter_%s_%s'", "%", "(", "last_feature", ".", "featuretype", ",", "f", ".", "featuretype", ")", "if", "last_feature", ".", "strand", "!=", "f", ".", "strand", ":", "new_strand", "=", "'.'", "else", ":", "new_strand", "=", "f", ".", "strand", "if", "last_feature", ".", "chrom", "!=", "f", ".", "chrom", ":", "# We've moved to a new chromosome. For example, if we're", "# getting intergenic regions from all genes, they will be on", "# different chromosomes. We still assume sorted features, but", "# don't complain if they're on different chromosomes -- just", "# move on.", "last_feature", "=", "f", "continue", "strand", "=", "new_strand", "chrom", "=", "last_feature", ".", "chrom", "# Shrink", "interfeature_start", "+=", "1", "interfeature_stop", "-=", "1", "if", "merge_attributes", ":", "new_attributes", "=", "helpers", ".", "merge_attributes", "(", "last_feature", ".", "attributes", ",", "f", ".", "attributes", ")", "else", ":", "new_attributes", "=", "{", "}", "if", "update_attributes", ":", "new_attributes", ".", "update", "(", "update_attributes", ")", "new_bin", "=", "bins", ".", "bins", "(", "interfeature_start", ",", "interfeature_stop", ",", "one", "=", "True", ")", "_id", "=", "None", "fields", "=", "dict", "(", "seqid", "=", "chrom", ",", "source", "=", "'gffutils_derived'", ",", "featuretype", "=", "new_featuretype", ",", "start", "=", "interfeature_start", ",", "end", "=", "interfeature_stop", ",", "score", "=", "'.'", ",", "strand", "=", "strand", ",", "frame", "=", "'.'", ",", "attributes", "=", "new_attributes", ",", "bin", "=", "new_bin", ")", "if", "dialect", "is", "None", ":", "# Support for @classmethod -- if calling from the class, then", "# self.dialect is not defined, so defer to Feature's default", "# (which will be constants.dialect, or GFF3).", "try", ":", "dialect", "=", "self", ".", "dialect", "except", "AttributeError", ":", "dialect", "=", "None", "yield", "self", ".", "_feature_returner", "(", "*", "*", "fields", ")", "interfeature_start", "=", "f", ".", "stop" ]
Construct new features representing the space between features. For example, if `features` is a list of exons, then this method will return the introns. If `features` is a list of genes, then this method will return the intergenic regions. Providing N features will return N - 1 new features. This method purposefully does *not* do any merging or sorting of coordinates, so you may want to use :meth:`FeatureDB.merge` first, or when selecting features use the `order_by` kwarg, e.g., `db.features_of_type('gene', order_by=('seqid', 'start'))`. Parameters ---------- features : iterable of :class:`feature.Feature` instances Sorted, merged iterable new_featuretype : string or None The new features will all be of this type, or, if None (default) then the featuretypes will be constructed from the neighboring features, e.g., `inter_exon_exon`. merge_attributes : bool If True, new features' attributes will be a merge of the neighboring features' attributes. This is useful if you have provided a list of exons; the introns will then retain the transcript and/or gene parents as a single item. Otherwise, if False, the attribute will be a comma-separated list of values, potentially listing the same gene ID twice. attribute_func : callable or None If None, then nothing special is done to the attributes. If callable, then the callable accepts two attribute dictionaries and returns a single attribute dictionary. If `merge_attributes` is True, then `attribute_func` is called before `merge_attributes`. This could be useful for manually managing IDs for the new features. update_attributes : dict After attributes have been modified and merged, this dictionary can be used to replace parts of the attributes dictionary. Returns ------- A generator that yields :class:`Feature` objects
[ "Construct", "new", "features", "representing", "the", "space", "between", "features", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/interface.py#L650-L766
train
16,934
daler/gffutils
gffutils/interface.py
FeatureDB.update
def update(self, data, make_backup=True, **kwargs): """ Update database with features in `data`. data : str, iterable, FeatureDB instance If FeatureDB, all data will be used. If string, assume it's a filename of a GFF or GTF file. Otherwise, assume it's an iterable of Feature objects. The classes in gffutils.iterators may be helpful in this case. make_backup : bool If True, and the database you're about to update is a file on disk, makes a copy of the existing database and saves it with a .bak extension. Notes ----- Other kwargs are used in the same way as in gffutils.create_db; see the help for that function for details. Returns ------- FeatureDB with updated features. """ from gffutils import create from gffutils import iterators if make_backup: if isinstance(self.dbfn, six.string_types): shutil.copy2(self.dbfn, self.dbfn + '.bak') # get iterator-specific kwargs _iterator_kwargs = {} for k, v in kwargs.items(): if k in constants._iterator_kwargs: _iterator_kwargs[k] = v # Handle all sorts of input data = iterators.DataIterator(data, **_iterator_kwargs) if self.dialect['fmt'] == 'gtf': if 'id_spec' not in kwargs: kwargs['id_spec'] = { 'gene': 'gene_id', 'transcript': 'transcript_id'} db = create._GTFDBCreator( data=data, dbfn=self.dbfn, dialect=self.dialect, **kwargs) elif self.dialect['fmt'] == 'gff3': if 'id_spec' not in kwargs: kwargs['id_spec'] = 'ID' db = create._GFFDBCreator( data=data, dbfn=self.dbfn, dialect=self.dialect, **kwargs) else: raise ValueError db._populate_from_lines(data) db._update_relations() db._finalize() return db
python
def update(self, data, make_backup=True, **kwargs): """ Update database with features in `data`. data : str, iterable, FeatureDB instance If FeatureDB, all data will be used. If string, assume it's a filename of a GFF or GTF file. Otherwise, assume it's an iterable of Feature objects. The classes in gffutils.iterators may be helpful in this case. make_backup : bool If True, and the database you're about to update is a file on disk, makes a copy of the existing database and saves it with a .bak extension. Notes ----- Other kwargs are used in the same way as in gffutils.create_db; see the help for that function for details. Returns ------- FeatureDB with updated features. """ from gffutils import create from gffutils import iterators if make_backup: if isinstance(self.dbfn, six.string_types): shutil.copy2(self.dbfn, self.dbfn + '.bak') # get iterator-specific kwargs _iterator_kwargs = {} for k, v in kwargs.items(): if k in constants._iterator_kwargs: _iterator_kwargs[k] = v # Handle all sorts of input data = iterators.DataIterator(data, **_iterator_kwargs) if self.dialect['fmt'] == 'gtf': if 'id_spec' not in kwargs: kwargs['id_spec'] = { 'gene': 'gene_id', 'transcript': 'transcript_id'} db = create._GTFDBCreator( data=data, dbfn=self.dbfn, dialect=self.dialect, **kwargs) elif self.dialect['fmt'] == 'gff3': if 'id_spec' not in kwargs: kwargs['id_spec'] = 'ID' db = create._GFFDBCreator( data=data, dbfn=self.dbfn, dialect=self.dialect, **kwargs) else: raise ValueError db._populate_from_lines(data) db._update_relations() db._finalize() return db
[ "def", "update", "(", "self", ",", "data", ",", "make_backup", "=", "True", ",", "*", "*", "kwargs", ")", ":", "from", "gffutils", "import", "create", "from", "gffutils", "import", "iterators", "if", "make_backup", ":", "if", "isinstance", "(", "self", ".", "dbfn", ",", "six", ".", "string_types", ")", ":", "shutil", ".", "copy2", "(", "self", ".", "dbfn", ",", "self", ".", "dbfn", "+", "'.bak'", ")", "# get iterator-specific kwargs", "_iterator_kwargs", "=", "{", "}", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "if", "k", "in", "constants", ".", "_iterator_kwargs", ":", "_iterator_kwargs", "[", "k", "]", "=", "v", "# Handle all sorts of input", "data", "=", "iterators", ".", "DataIterator", "(", "data", ",", "*", "*", "_iterator_kwargs", ")", "if", "self", ".", "dialect", "[", "'fmt'", "]", "==", "'gtf'", ":", "if", "'id_spec'", "not", "in", "kwargs", ":", "kwargs", "[", "'id_spec'", "]", "=", "{", "'gene'", ":", "'gene_id'", ",", "'transcript'", ":", "'transcript_id'", "}", "db", "=", "create", ".", "_GTFDBCreator", "(", "data", "=", "data", ",", "dbfn", "=", "self", ".", "dbfn", ",", "dialect", "=", "self", ".", "dialect", ",", "*", "*", "kwargs", ")", "elif", "self", ".", "dialect", "[", "'fmt'", "]", "==", "'gff3'", ":", "if", "'id_spec'", "not", "in", "kwargs", ":", "kwargs", "[", "'id_spec'", "]", "=", "'ID'", "db", "=", "create", ".", "_GFFDBCreator", "(", "data", "=", "data", ",", "dbfn", "=", "self", ".", "dbfn", ",", "dialect", "=", "self", ".", "dialect", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "ValueError", "db", ".", "_populate_from_lines", "(", "data", ")", "db", ".", "_update_relations", "(", ")", "db", ".", "_finalize", "(", ")", "return", "db" ]
Update database with features in `data`. data : str, iterable, FeatureDB instance If FeatureDB, all data will be used. If string, assume it's a filename of a GFF or GTF file. Otherwise, assume it's an iterable of Feature objects. The classes in gffutils.iterators may be helpful in this case. make_backup : bool If True, and the database you're about to update is a file on disk, makes a copy of the existing database and saves it with a .bak extension. Notes ----- Other kwargs are used in the same way as in gffutils.create_db; see the help for that function for details. Returns ------- FeatureDB with updated features.
[ "Update", "database", "with", "features", "in", "data", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/interface.py#L814-L871
train
16,935
daler/gffutils
gffutils/interface.py
FeatureDB.create_introns
def create_introns(self, exon_featuretype='exon', grandparent_featuretype='gene', parent_featuretype=None, new_featuretype='intron', merge_attributes=True): """ Create introns from existing annotations. Parameters ---------- exon_featuretype : string Feature type to use in order to infer introns. Typically `"exon"`. grandparent_featuretype : string If `grandparent_featuretype` is not None, then group exons by children of this featuretype. If `granparent_featuretype` is "gene" (default), then introns will be created for all first-level children of genes. This may include mRNA, rRNA, ncRNA, etc. If you only want to infer introns from one of these featuretypes (e.g., mRNA), then use the `parent_featuretype` kwarg which is mutually exclusive with `grandparent_featuretype`. parent_featuretype : string If `parent_featuretype` is not None, then only use this featuretype to infer introns. Use this if you only want a subset of featuretypes to have introns (e.g., "mRNA" only, and not ncRNA or rRNA). Mutually exclusive with `grandparent_featuretype`. new_featuretype : string Feature type to use for the inferred introns; default is `"intron"`. merge_attributes : bool Whether or not to merge attributes from all exons. If False then no attributes will be created for the introns. Returns ------- A generator object that yields :class:`Feature` objects representing new introns Notes ----- The returned generator can be passed directly to the :meth:`FeatureDB.update` method to permanently add them to the database, e.g., :: db.update(db.create_introns()) """ if (grandparent_featuretype and parent_featuretype) or ( grandparent_featuretype is None and parent_featuretype is None ): raise ValueError("exactly one of `grandparent_featuretype` or " "`parent_featuretype` should be provided") if grandparent_featuretype: def child_gen(): for gene in self.features_of_type(grandparent_featuretype): for child in self.children(gene, level=1): yield child elif parent_featuretype: def child_gen(): for child in self.features_of_type(parent_featuretype): yield child for child in child_gen(): exons = self.children(child, level=1, featuretype=exon_featuretype, order_by='start') for intron in self.interfeatures( exons, new_featuretype=new_featuretype, merge_attributes=merge_attributes, dialect=self.dialect ): yield intron
python
def create_introns(self, exon_featuretype='exon', grandparent_featuretype='gene', parent_featuretype=None, new_featuretype='intron', merge_attributes=True): """ Create introns from existing annotations. Parameters ---------- exon_featuretype : string Feature type to use in order to infer introns. Typically `"exon"`. grandparent_featuretype : string If `grandparent_featuretype` is not None, then group exons by children of this featuretype. If `granparent_featuretype` is "gene" (default), then introns will be created for all first-level children of genes. This may include mRNA, rRNA, ncRNA, etc. If you only want to infer introns from one of these featuretypes (e.g., mRNA), then use the `parent_featuretype` kwarg which is mutually exclusive with `grandparent_featuretype`. parent_featuretype : string If `parent_featuretype` is not None, then only use this featuretype to infer introns. Use this if you only want a subset of featuretypes to have introns (e.g., "mRNA" only, and not ncRNA or rRNA). Mutually exclusive with `grandparent_featuretype`. new_featuretype : string Feature type to use for the inferred introns; default is `"intron"`. merge_attributes : bool Whether or not to merge attributes from all exons. If False then no attributes will be created for the introns. Returns ------- A generator object that yields :class:`Feature` objects representing new introns Notes ----- The returned generator can be passed directly to the :meth:`FeatureDB.update` method to permanently add them to the database, e.g., :: db.update(db.create_introns()) """ if (grandparent_featuretype and parent_featuretype) or ( grandparent_featuretype is None and parent_featuretype is None ): raise ValueError("exactly one of `grandparent_featuretype` or " "`parent_featuretype` should be provided") if grandparent_featuretype: def child_gen(): for gene in self.features_of_type(grandparent_featuretype): for child in self.children(gene, level=1): yield child elif parent_featuretype: def child_gen(): for child in self.features_of_type(parent_featuretype): yield child for child in child_gen(): exons = self.children(child, level=1, featuretype=exon_featuretype, order_by='start') for intron in self.interfeatures( exons, new_featuretype=new_featuretype, merge_attributes=merge_attributes, dialect=self.dialect ): yield intron
[ "def", "create_introns", "(", "self", ",", "exon_featuretype", "=", "'exon'", ",", "grandparent_featuretype", "=", "'gene'", ",", "parent_featuretype", "=", "None", ",", "new_featuretype", "=", "'intron'", ",", "merge_attributes", "=", "True", ")", ":", "if", "(", "grandparent_featuretype", "and", "parent_featuretype", ")", "or", "(", "grandparent_featuretype", "is", "None", "and", "parent_featuretype", "is", "None", ")", ":", "raise", "ValueError", "(", "\"exactly one of `grandparent_featuretype` or \"", "\"`parent_featuretype` should be provided\"", ")", "if", "grandparent_featuretype", ":", "def", "child_gen", "(", ")", ":", "for", "gene", "in", "self", ".", "features_of_type", "(", "grandparent_featuretype", ")", ":", "for", "child", "in", "self", ".", "children", "(", "gene", ",", "level", "=", "1", ")", ":", "yield", "child", "elif", "parent_featuretype", ":", "def", "child_gen", "(", ")", ":", "for", "child", "in", "self", ".", "features_of_type", "(", "parent_featuretype", ")", ":", "yield", "child", "for", "child", "in", "child_gen", "(", ")", ":", "exons", "=", "self", ".", "children", "(", "child", ",", "level", "=", "1", ",", "featuretype", "=", "exon_featuretype", ",", "order_by", "=", "'start'", ")", "for", "intron", "in", "self", ".", "interfeatures", "(", "exons", ",", "new_featuretype", "=", "new_featuretype", ",", "merge_attributes", "=", "merge_attributes", ",", "dialect", "=", "self", ".", "dialect", ")", ":", "yield", "intron" ]
Create introns from existing annotations. Parameters ---------- exon_featuretype : string Feature type to use in order to infer introns. Typically `"exon"`. grandparent_featuretype : string If `grandparent_featuretype` is not None, then group exons by children of this featuretype. If `granparent_featuretype` is "gene" (default), then introns will be created for all first-level children of genes. This may include mRNA, rRNA, ncRNA, etc. If you only want to infer introns from one of these featuretypes (e.g., mRNA), then use the `parent_featuretype` kwarg which is mutually exclusive with `grandparent_featuretype`. parent_featuretype : string If `parent_featuretype` is not None, then only use this featuretype to infer introns. Use this if you only want a subset of featuretypes to have introns (e.g., "mRNA" only, and not ncRNA or rRNA). Mutually exclusive with `grandparent_featuretype`. new_featuretype : string Feature type to use for the inferred introns; default is `"intron"`. merge_attributes : bool Whether or not to merge attributes from all exons. If False then no attributes will be created for the introns. Returns ------- A generator object that yields :class:`Feature` objects representing new introns Notes ----- The returned generator can be passed directly to the :meth:`FeatureDB.update` method to permanently add them to the database, e.g., :: db.update(db.create_introns())
[ "Create", "introns", "from", "existing", "annotations", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/interface.py#L945-L1017
train
16,936
daler/gffutils
gffutils/interface.py
FeatureDB.merge
def merge(self, features, ignore_strand=False): """ Merge overlapping features together. Parameters ---------- features : iterator of Feature instances ignore_strand : bool If True, features on multiple strands will be merged, and the final strand will be set to '.'. Otherwise, ValueError will be raised if trying to merge features on differnt strands. Returns ------- A generator object that yields :class:`Feature` objects representing the newly merged features. """ # Consume iterator up front... features = list(features) if len(features) == 0: raise StopIteration # Either set all strands to '+' or check for strand-consistency. if ignore_strand: strand = '.' else: strands = [i.strand for i in features] if len(set(strands)) > 1: raise ValueError('Specify ignore_strand=True to force merging ' 'of multiple strands') strand = strands[0] # Sanity check to make sure all features are from the same chromosome. chroms = [i.chrom for i in features] if len(set(chroms)) > 1: raise NotImplementedError('Merging multiple chromosomes not ' 'implemented') chrom = chroms[0] # To start, we create a merged feature of just the first feature. current_merged_start = features[0].start current_merged_stop = features[0].stop # We don't need to check the first one, so start at feature #2. for feature in features[1:]: # Does this feature start within the currently merged feature?... if feature.start <= current_merged_stop + 1: # ...It starts within, so leave current_merged_start where it # is. Does it extend any farther? if feature.stop >= current_merged_stop: # Extends further, so set a new stop position current_merged_stop = feature.stop else: # If feature.stop < current_merged_stop, it's completely # within the previous feature. Nothing more to do. continue else: # The start position is outside the merged feature, so we're # done with the current merged feature. Prepare for output... merged_feature = dict( seqid=feature.chrom, source='.', featuretype=feature.featuretype, start=current_merged_start, end=current_merged_stop, score='.', strand=strand, frame='.', attributes='') yield self._feature_returner(**merged_feature) # and we start a new one, initializing with this feature's # start and stop. current_merged_start = feature.start current_merged_stop = feature.stop # need to yield the last one. if len(features) == 1: feature = features[0] merged_feature = dict( seqid=feature.chrom, source='.', featuretype=feature.featuretype, start=current_merged_start, end=current_merged_stop, score='.', strand=strand, frame='.', attributes='') yield self._feature_returner(**merged_feature)
python
def merge(self, features, ignore_strand=False): """ Merge overlapping features together. Parameters ---------- features : iterator of Feature instances ignore_strand : bool If True, features on multiple strands will be merged, and the final strand will be set to '.'. Otherwise, ValueError will be raised if trying to merge features on differnt strands. Returns ------- A generator object that yields :class:`Feature` objects representing the newly merged features. """ # Consume iterator up front... features = list(features) if len(features) == 0: raise StopIteration # Either set all strands to '+' or check for strand-consistency. if ignore_strand: strand = '.' else: strands = [i.strand for i in features] if len(set(strands)) > 1: raise ValueError('Specify ignore_strand=True to force merging ' 'of multiple strands') strand = strands[0] # Sanity check to make sure all features are from the same chromosome. chroms = [i.chrom for i in features] if len(set(chroms)) > 1: raise NotImplementedError('Merging multiple chromosomes not ' 'implemented') chrom = chroms[0] # To start, we create a merged feature of just the first feature. current_merged_start = features[0].start current_merged_stop = features[0].stop # We don't need to check the first one, so start at feature #2. for feature in features[1:]: # Does this feature start within the currently merged feature?... if feature.start <= current_merged_stop + 1: # ...It starts within, so leave current_merged_start where it # is. Does it extend any farther? if feature.stop >= current_merged_stop: # Extends further, so set a new stop position current_merged_stop = feature.stop else: # If feature.stop < current_merged_stop, it's completely # within the previous feature. Nothing more to do. continue else: # The start position is outside the merged feature, so we're # done with the current merged feature. Prepare for output... merged_feature = dict( seqid=feature.chrom, source='.', featuretype=feature.featuretype, start=current_merged_start, end=current_merged_stop, score='.', strand=strand, frame='.', attributes='') yield self._feature_returner(**merged_feature) # and we start a new one, initializing with this feature's # start and stop. current_merged_start = feature.start current_merged_stop = feature.stop # need to yield the last one. if len(features) == 1: feature = features[0] merged_feature = dict( seqid=feature.chrom, source='.', featuretype=feature.featuretype, start=current_merged_start, end=current_merged_stop, score='.', strand=strand, frame='.', attributes='') yield self._feature_returner(**merged_feature)
[ "def", "merge", "(", "self", ",", "features", ",", "ignore_strand", "=", "False", ")", ":", "# Consume iterator up front...", "features", "=", "list", "(", "features", ")", "if", "len", "(", "features", ")", "==", "0", ":", "raise", "StopIteration", "# Either set all strands to '+' or check for strand-consistency.", "if", "ignore_strand", ":", "strand", "=", "'.'", "else", ":", "strands", "=", "[", "i", ".", "strand", "for", "i", "in", "features", "]", "if", "len", "(", "set", "(", "strands", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "'Specify ignore_strand=True to force merging '", "'of multiple strands'", ")", "strand", "=", "strands", "[", "0", "]", "# Sanity check to make sure all features are from the same chromosome.", "chroms", "=", "[", "i", ".", "chrom", "for", "i", "in", "features", "]", "if", "len", "(", "set", "(", "chroms", ")", ")", ">", "1", ":", "raise", "NotImplementedError", "(", "'Merging multiple chromosomes not '", "'implemented'", ")", "chrom", "=", "chroms", "[", "0", "]", "# To start, we create a merged feature of just the first feature.", "current_merged_start", "=", "features", "[", "0", "]", ".", "start", "current_merged_stop", "=", "features", "[", "0", "]", ".", "stop", "# We don't need to check the first one, so start at feature #2.", "for", "feature", "in", "features", "[", "1", ":", "]", ":", "# Does this feature start within the currently merged feature?...", "if", "feature", ".", "start", "<=", "current_merged_stop", "+", "1", ":", "# ...It starts within, so leave current_merged_start where it", "# is. Does it extend any farther?", "if", "feature", ".", "stop", ">=", "current_merged_stop", ":", "# Extends further, so set a new stop position", "current_merged_stop", "=", "feature", ".", "stop", "else", ":", "# If feature.stop < current_merged_stop, it's completely", "# within the previous feature. Nothing more to do.", "continue", "else", ":", "# The start position is outside the merged feature, so we're", "# done with the current merged feature. Prepare for output...", "merged_feature", "=", "dict", "(", "seqid", "=", "feature", ".", "chrom", ",", "source", "=", "'.'", ",", "featuretype", "=", "feature", ".", "featuretype", ",", "start", "=", "current_merged_start", ",", "end", "=", "current_merged_stop", ",", "score", "=", "'.'", ",", "strand", "=", "strand", ",", "frame", "=", "'.'", ",", "attributes", "=", "''", ")", "yield", "self", ".", "_feature_returner", "(", "*", "*", "merged_feature", ")", "# and we start a new one, initializing with this feature's", "# start and stop.", "current_merged_start", "=", "feature", ".", "start", "current_merged_stop", "=", "feature", ".", "stop", "# need to yield the last one.", "if", "len", "(", "features", ")", "==", "1", ":", "feature", "=", "features", "[", "0", "]", "merged_feature", "=", "dict", "(", "seqid", "=", "feature", ".", "chrom", ",", "source", "=", "'.'", ",", "featuretype", "=", "feature", ".", "featuretype", ",", "start", "=", "current_merged_start", ",", "end", "=", "current_merged_stop", ",", "score", "=", "'.'", ",", "strand", "=", "strand", ",", "frame", "=", "'.'", ",", "attributes", "=", "''", ")", "yield", "self", ".", "_feature_returner", "(", "*", "*", "merged_feature", ")" ]
Merge overlapping features together. Parameters ---------- features : iterator of Feature instances ignore_strand : bool If True, features on multiple strands will be merged, and the final strand will be set to '.'. Otherwise, ValueError will be raised if trying to merge features on differnt strands. Returns ------- A generator object that yields :class:`Feature` objects representing the newly merged features.
[ "Merge", "overlapping", "features", "together", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/interface.py#L1019-L1112
train
16,937
daler/gffutils
gffutils/interface.py
FeatureDB.children_bp
def children_bp(self, feature, child_featuretype='exon', merge=False, ignore_strand=False): """ Total bp of all children of a featuretype. Useful for getting the exonic bp of an mRNA. Parameters ---------- feature : str or Feature instance child_featuretype : str Which featuretype to consider. For example, to get exonic bp of an mRNA, use `child_featuretype='exon'`. merge : bool Whether or not to merge child features together before summing them. ignore_strand : bool If True, then overlapping features on different strands will be merged together; otherwise, merging features with different strands will result in a ValueError. Returns ------- Integer representing the total number of bp. """ children = self.children(feature, featuretype=child_featuretype, order_by='start') if merge: children = self.merge(children, ignore_strand=ignore_strand) total = 0 for child in children: total += len(child) return total
python
def children_bp(self, feature, child_featuretype='exon', merge=False, ignore_strand=False): """ Total bp of all children of a featuretype. Useful for getting the exonic bp of an mRNA. Parameters ---------- feature : str or Feature instance child_featuretype : str Which featuretype to consider. For example, to get exonic bp of an mRNA, use `child_featuretype='exon'`. merge : bool Whether or not to merge child features together before summing them. ignore_strand : bool If True, then overlapping features on different strands will be merged together; otherwise, merging features with different strands will result in a ValueError. Returns ------- Integer representing the total number of bp. """ children = self.children(feature, featuretype=child_featuretype, order_by='start') if merge: children = self.merge(children, ignore_strand=ignore_strand) total = 0 for child in children: total += len(child) return total
[ "def", "children_bp", "(", "self", ",", "feature", ",", "child_featuretype", "=", "'exon'", ",", "merge", "=", "False", ",", "ignore_strand", "=", "False", ")", ":", "children", "=", "self", ".", "children", "(", "feature", ",", "featuretype", "=", "child_featuretype", ",", "order_by", "=", "'start'", ")", "if", "merge", ":", "children", "=", "self", ".", "merge", "(", "children", ",", "ignore_strand", "=", "ignore_strand", ")", "total", "=", "0", "for", "child", "in", "children", ":", "total", "+=", "len", "(", "child", ")", "return", "total" ]
Total bp of all children of a featuretype. Useful for getting the exonic bp of an mRNA. Parameters ---------- feature : str or Feature instance child_featuretype : str Which featuretype to consider. For example, to get exonic bp of an mRNA, use `child_featuretype='exon'`. merge : bool Whether or not to merge child features together before summing them. ignore_strand : bool If True, then overlapping features on different strands will be merged together; otherwise, merging features with different strands will result in a ValueError. Returns ------- Integer representing the total number of bp.
[ "Total", "bp", "of", "all", "children", "of", "a", "featuretype", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/interface.py#L1114-L1152
train
16,938
daler/gffutils
gffutils/interface.py
FeatureDB.bed12
def bed12(self, feature, block_featuretype=['exon'], thick_featuretype=['CDS'], thin_featuretype=None, name_field='ID', color=None): """ Converts `feature` into a BED12 format. GFF and GTF files do not necessarily define genes consistently, so this method provides flexiblity in specifying what to call a "transcript". Parameters ---------- feature : str or Feature instance In most cases, this feature should be a transcript rather than a gene. block_featuretype : str or list Which featuretype to use as the exons. These are represented as blocks in the BED12 format. Typically 'exon'. Use the `thick_featuretype` and `thin_featuretype` arguments to control the display of CDS as thicker blocks and UTRs as thinner blocks. Note that the features for `thick` or `thin` are *not* automatically included in the blocks; if you do want them included, then those featuretypes should be added to this `block_features` list. If no child features of type `block_featuretype` are found, then the full `feature` is returned in BED12 format as if it had a single exon. thick_featuretype : str or list Child featuretype(s) to use in order to determine the boundaries of the "thick" blocks. In BED12 format, these represent coding sequences; typically this would be set to "CDS". This argument is mutually exclusive with `thin_featuretype`. Specifically, the BED12 thickStart will be the start coord of the first `thick` item and the thickEnd will be the stop coord of the last `thick` item. thin_featuretype : str or list Child featuretype(s) to use in order to determine the boundaries of the "thin" blocks. In BED12 format, these represent untranslated regions. Typically "utr" or ['three_prime_UTR', 'five_prime_UTR']. Mutually exclusive with `thick_featuretype`. Specifically, the BED12 thickStart field will be the stop coord of the first `thin` item and the thickEnd field will be the start coord of the last `thin` item. name_field : str Which attribute of `feature` to use as the feature's name. If this field is not present, a "." placeholder will be used instead. color : None or str If None, then use black (0,0,0) as the RGB color; otherwise this should be a comma-separated string of R,G,B values each of which are integers in the range 0-255. """ if thick_featuretype and thin_featuretype: raise ValueError("Can only specify one of `thick_featuertype` or " "`thin_featuretype`") exons = list(self.children(feature, featuretype=block_featuretype, order_by='start')) if len(exons) == 0: exons = [feature] feature = self[feature] first = exons[0].start last = exons[-1].stop if first != feature.start: raise ValueError( "Start of first exon (%s) does not match start of feature (%s)" % (first, feature.start)) if last != feature.stop: raise ValueError( "End of last exon (%s) does not match end of feature (%s)" % (last, feature.stop)) if color is None: color = '0,0,0' color = color.replace(' ', '').strip() # Use field names as defined at # http://genome.ucsc.edu/FAQ/FAQformat.html#format1 chrom = feature.chrom chromStart = feature.start - 1 chromEnd = feature.stop orig = constants.always_return_list constants.always_return_list = True try: name = feature[name_field][0] except KeyError: name = "." constants.always_return_list = orig score = feature.score if score == '.': score = '0' strand = feature.strand itemRgb = color blockCount = len(exons) blockSizes = [len(i) for i in exons] blockStarts = [i.start - 1 - chromStart for i in exons] if thick_featuretype: thick = list(self.children(feature, featuretype=thick_featuretype, order_by='start')) if len(thick) == 0: thickStart = feature.start thickEnd = feature.stop else: thickStart = thick[0].start - 1 # BED 0-based coords thickEnd = thick[-1].stop if thin_featuretype: thin = list(self.children(feature, featuretype=thin_featuretype, order_by='start')) if len(thin) == 0: thickStart = feature.start thickEnd = feature.stop else: thickStart = thin[0].stop thickEnd = thin[-1].start - 1 # BED 0-based coords tst = chromStart + blockStarts[-1] + blockSizes[-1] assert tst == chromEnd, "tst=%s; chromEnd=%s" % (tst, chromEnd) fields = [ chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount, ','.join(map(str, blockSizes)), ','.join(map(str, blockStarts))] return '\t'.join(map(str, fields))
python
def bed12(self, feature, block_featuretype=['exon'], thick_featuretype=['CDS'], thin_featuretype=None, name_field='ID', color=None): """ Converts `feature` into a BED12 format. GFF and GTF files do not necessarily define genes consistently, so this method provides flexiblity in specifying what to call a "transcript". Parameters ---------- feature : str or Feature instance In most cases, this feature should be a transcript rather than a gene. block_featuretype : str or list Which featuretype to use as the exons. These are represented as blocks in the BED12 format. Typically 'exon'. Use the `thick_featuretype` and `thin_featuretype` arguments to control the display of CDS as thicker blocks and UTRs as thinner blocks. Note that the features for `thick` or `thin` are *not* automatically included in the blocks; if you do want them included, then those featuretypes should be added to this `block_features` list. If no child features of type `block_featuretype` are found, then the full `feature` is returned in BED12 format as if it had a single exon. thick_featuretype : str or list Child featuretype(s) to use in order to determine the boundaries of the "thick" blocks. In BED12 format, these represent coding sequences; typically this would be set to "CDS". This argument is mutually exclusive with `thin_featuretype`. Specifically, the BED12 thickStart will be the start coord of the first `thick` item and the thickEnd will be the stop coord of the last `thick` item. thin_featuretype : str or list Child featuretype(s) to use in order to determine the boundaries of the "thin" blocks. In BED12 format, these represent untranslated regions. Typically "utr" or ['three_prime_UTR', 'five_prime_UTR']. Mutually exclusive with `thick_featuretype`. Specifically, the BED12 thickStart field will be the stop coord of the first `thin` item and the thickEnd field will be the start coord of the last `thin` item. name_field : str Which attribute of `feature` to use as the feature's name. If this field is not present, a "." placeholder will be used instead. color : None or str If None, then use black (0,0,0) as the RGB color; otherwise this should be a comma-separated string of R,G,B values each of which are integers in the range 0-255. """ if thick_featuretype and thin_featuretype: raise ValueError("Can only specify one of `thick_featuertype` or " "`thin_featuretype`") exons = list(self.children(feature, featuretype=block_featuretype, order_by='start')) if len(exons) == 0: exons = [feature] feature = self[feature] first = exons[0].start last = exons[-1].stop if first != feature.start: raise ValueError( "Start of first exon (%s) does not match start of feature (%s)" % (first, feature.start)) if last != feature.stop: raise ValueError( "End of last exon (%s) does not match end of feature (%s)" % (last, feature.stop)) if color is None: color = '0,0,0' color = color.replace(' ', '').strip() # Use field names as defined at # http://genome.ucsc.edu/FAQ/FAQformat.html#format1 chrom = feature.chrom chromStart = feature.start - 1 chromEnd = feature.stop orig = constants.always_return_list constants.always_return_list = True try: name = feature[name_field][0] except KeyError: name = "." constants.always_return_list = orig score = feature.score if score == '.': score = '0' strand = feature.strand itemRgb = color blockCount = len(exons) blockSizes = [len(i) for i in exons] blockStarts = [i.start - 1 - chromStart for i in exons] if thick_featuretype: thick = list(self.children(feature, featuretype=thick_featuretype, order_by='start')) if len(thick) == 0: thickStart = feature.start thickEnd = feature.stop else: thickStart = thick[0].start - 1 # BED 0-based coords thickEnd = thick[-1].stop if thin_featuretype: thin = list(self.children(feature, featuretype=thin_featuretype, order_by='start')) if len(thin) == 0: thickStart = feature.start thickEnd = feature.stop else: thickStart = thin[0].stop thickEnd = thin[-1].start - 1 # BED 0-based coords tst = chromStart + blockStarts[-1] + blockSizes[-1] assert tst == chromEnd, "tst=%s; chromEnd=%s" % (tst, chromEnd) fields = [ chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount, ','.join(map(str, blockSizes)), ','.join(map(str, blockStarts))] return '\t'.join(map(str, fields))
[ "def", "bed12", "(", "self", ",", "feature", ",", "block_featuretype", "=", "[", "'exon'", "]", ",", "thick_featuretype", "=", "[", "'CDS'", "]", ",", "thin_featuretype", "=", "None", ",", "name_field", "=", "'ID'", ",", "color", "=", "None", ")", ":", "if", "thick_featuretype", "and", "thin_featuretype", ":", "raise", "ValueError", "(", "\"Can only specify one of `thick_featuertype` or \"", "\"`thin_featuretype`\"", ")", "exons", "=", "list", "(", "self", ".", "children", "(", "feature", ",", "featuretype", "=", "block_featuretype", ",", "order_by", "=", "'start'", ")", ")", "if", "len", "(", "exons", ")", "==", "0", ":", "exons", "=", "[", "feature", "]", "feature", "=", "self", "[", "feature", "]", "first", "=", "exons", "[", "0", "]", ".", "start", "last", "=", "exons", "[", "-", "1", "]", ".", "stop", "if", "first", "!=", "feature", ".", "start", ":", "raise", "ValueError", "(", "\"Start of first exon (%s) does not match start of feature (%s)\"", "%", "(", "first", ",", "feature", ".", "start", ")", ")", "if", "last", "!=", "feature", ".", "stop", ":", "raise", "ValueError", "(", "\"End of last exon (%s) does not match end of feature (%s)\"", "%", "(", "last", ",", "feature", ".", "stop", ")", ")", "if", "color", "is", "None", ":", "color", "=", "'0,0,0'", "color", "=", "color", ".", "replace", "(", "' '", ",", "''", ")", ".", "strip", "(", ")", "# Use field names as defined at", "# http://genome.ucsc.edu/FAQ/FAQformat.html#format1", "chrom", "=", "feature", ".", "chrom", "chromStart", "=", "feature", ".", "start", "-", "1", "chromEnd", "=", "feature", ".", "stop", "orig", "=", "constants", ".", "always_return_list", "constants", ".", "always_return_list", "=", "True", "try", ":", "name", "=", "feature", "[", "name_field", "]", "[", "0", "]", "except", "KeyError", ":", "name", "=", "\".\"", "constants", ".", "always_return_list", "=", "orig", "score", "=", "feature", ".", "score", "if", "score", "==", "'.'", ":", "score", "=", "'0'", "strand", "=", "feature", ".", "strand", "itemRgb", "=", "color", "blockCount", "=", "len", "(", "exons", ")", "blockSizes", "=", "[", "len", "(", "i", ")", "for", "i", "in", "exons", "]", "blockStarts", "=", "[", "i", ".", "start", "-", "1", "-", "chromStart", "for", "i", "in", "exons", "]", "if", "thick_featuretype", ":", "thick", "=", "list", "(", "self", ".", "children", "(", "feature", ",", "featuretype", "=", "thick_featuretype", ",", "order_by", "=", "'start'", ")", ")", "if", "len", "(", "thick", ")", "==", "0", ":", "thickStart", "=", "feature", ".", "start", "thickEnd", "=", "feature", ".", "stop", "else", ":", "thickStart", "=", "thick", "[", "0", "]", ".", "start", "-", "1", "# BED 0-based coords", "thickEnd", "=", "thick", "[", "-", "1", "]", ".", "stop", "if", "thin_featuretype", ":", "thin", "=", "list", "(", "self", ".", "children", "(", "feature", ",", "featuretype", "=", "thin_featuretype", ",", "order_by", "=", "'start'", ")", ")", "if", "len", "(", "thin", ")", "==", "0", ":", "thickStart", "=", "feature", ".", "start", "thickEnd", "=", "feature", ".", "stop", "else", ":", "thickStart", "=", "thin", "[", "0", "]", ".", "stop", "thickEnd", "=", "thin", "[", "-", "1", "]", ".", "start", "-", "1", "# BED 0-based coords", "tst", "=", "chromStart", "+", "blockStarts", "[", "-", "1", "]", "+", "blockSizes", "[", "-", "1", "]", "assert", "tst", "==", "chromEnd", ",", "\"tst=%s; chromEnd=%s\"", "%", "(", "tst", ",", "chromEnd", ")", "fields", "=", "[", "chrom", ",", "chromStart", ",", "chromEnd", ",", "name", ",", "score", ",", "strand", ",", "thickStart", ",", "thickEnd", ",", "itemRgb", ",", "blockCount", ",", "','", ".", "join", "(", "map", "(", "str", ",", "blockSizes", ")", ")", ",", "','", ".", "join", "(", "map", "(", "str", ",", "blockStarts", ")", ")", "]", "return", "'\\t'", ".", "join", "(", "map", "(", "str", ",", "fields", ")", ")" ]
Converts `feature` into a BED12 format. GFF and GTF files do not necessarily define genes consistently, so this method provides flexiblity in specifying what to call a "transcript". Parameters ---------- feature : str or Feature instance In most cases, this feature should be a transcript rather than a gene. block_featuretype : str or list Which featuretype to use as the exons. These are represented as blocks in the BED12 format. Typically 'exon'. Use the `thick_featuretype` and `thin_featuretype` arguments to control the display of CDS as thicker blocks and UTRs as thinner blocks. Note that the features for `thick` or `thin` are *not* automatically included in the blocks; if you do want them included, then those featuretypes should be added to this `block_features` list. If no child features of type `block_featuretype` are found, then the full `feature` is returned in BED12 format as if it had a single exon. thick_featuretype : str or list Child featuretype(s) to use in order to determine the boundaries of the "thick" blocks. In BED12 format, these represent coding sequences; typically this would be set to "CDS". This argument is mutually exclusive with `thin_featuretype`. Specifically, the BED12 thickStart will be the start coord of the first `thick` item and the thickEnd will be the stop coord of the last `thick` item. thin_featuretype : str or list Child featuretype(s) to use in order to determine the boundaries of the "thin" blocks. In BED12 format, these represent untranslated regions. Typically "utr" or ['three_prime_UTR', 'five_prime_UTR']. Mutually exclusive with `thick_featuretype`. Specifically, the BED12 thickStart field will be the stop coord of the first `thin` item and the thickEnd field will be the start coord of the last `thin` item. name_field : str Which attribute of `feature` to use as the feature's name. If this field is not present, a "." placeholder will be used instead. color : None or str If None, then use black (0,0,0) as the RGB color; otherwise this should be a comma-separated string of R,G,B values each of which are integers in the range 0-255.
[ "Converts", "feature", "into", "a", "BED12", "format", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/interface.py#L1154-L1299
train
16,939
daler/gffutils
gffutils/iterators.py
DataIterator
def DataIterator(data, checklines=10, transform=None, force_dialect_check=False, from_string=False, **kwargs): """ Iterate over features, no matter how they are provided. Parameters ---------- data : str, iterable of Feature objs, FeatureDB `data` can be a string (filename, URL, or contents of a file, if from_string=True), any arbitrary iterable of features, or a FeatureDB (in which case its all_features() method will be called). checklines : int Number of lines to check in order to infer a dialect. transform : None or callable If not None, `transform` should accept a Feature object as its only argument and return either a (possibly modified) Feature object or a value that evaluates to False. If the return value is False, the feature will be skipped. force_dialect_check : bool If True, check the dialect of every feature. Thorough, but can be slow. from_string : bool If True, `data` should be interpreted as the contents of a file rather than the filename itself. dialect : None or dict Provide the dialect, which will override auto-detected dialects. If provided, you should probably also use `force_dialect_check=False` and `checklines=0` but this is not enforced. """ _kwargs = dict(data=data, checklines=checklines, transform=transform, force_dialect_check=force_dialect_check, **kwargs) if isinstance(data, six.string_types): if from_string: return _StringIterator(**_kwargs) else: if os.path.exists(data): return _FileIterator(**_kwargs) elif is_url(data): return _UrlIterator(**_kwargs) elif isinstance(data, FeatureDB): _kwargs['data'] = data.all_features() return _FeatureIterator(**_kwargs) else: return _FeatureIterator(**_kwargs)
python
def DataIterator(data, checklines=10, transform=None, force_dialect_check=False, from_string=False, **kwargs): """ Iterate over features, no matter how they are provided. Parameters ---------- data : str, iterable of Feature objs, FeatureDB `data` can be a string (filename, URL, or contents of a file, if from_string=True), any arbitrary iterable of features, or a FeatureDB (in which case its all_features() method will be called). checklines : int Number of lines to check in order to infer a dialect. transform : None or callable If not None, `transform` should accept a Feature object as its only argument and return either a (possibly modified) Feature object or a value that evaluates to False. If the return value is False, the feature will be skipped. force_dialect_check : bool If True, check the dialect of every feature. Thorough, but can be slow. from_string : bool If True, `data` should be interpreted as the contents of a file rather than the filename itself. dialect : None or dict Provide the dialect, which will override auto-detected dialects. If provided, you should probably also use `force_dialect_check=False` and `checklines=0` but this is not enforced. """ _kwargs = dict(data=data, checklines=checklines, transform=transform, force_dialect_check=force_dialect_check, **kwargs) if isinstance(data, six.string_types): if from_string: return _StringIterator(**_kwargs) else: if os.path.exists(data): return _FileIterator(**_kwargs) elif is_url(data): return _UrlIterator(**_kwargs) elif isinstance(data, FeatureDB): _kwargs['data'] = data.all_features() return _FeatureIterator(**_kwargs) else: return _FeatureIterator(**_kwargs)
[ "def", "DataIterator", "(", "data", ",", "checklines", "=", "10", ",", "transform", "=", "None", ",", "force_dialect_check", "=", "False", ",", "from_string", "=", "False", ",", "*", "*", "kwargs", ")", ":", "_kwargs", "=", "dict", "(", "data", "=", "data", ",", "checklines", "=", "checklines", ",", "transform", "=", "transform", ",", "force_dialect_check", "=", "force_dialect_check", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "data", ",", "six", ".", "string_types", ")", ":", "if", "from_string", ":", "return", "_StringIterator", "(", "*", "*", "_kwargs", ")", "else", ":", "if", "os", ".", "path", ".", "exists", "(", "data", ")", ":", "return", "_FileIterator", "(", "*", "*", "_kwargs", ")", "elif", "is_url", "(", "data", ")", ":", "return", "_UrlIterator", "(", "*", "*", "_kwargs", ")", "elif", "isinstance", "(", "data", ",", "FeatureDB", ")", ":", "_kwargs", "[", "'data'", "]", "=", "data", ".", "all_features", "(", ")", "return", "_FeatureIterator", "(", "*", "*", "_kwargs", ")", "else", ":", "return", "_FeatureIterator", "(", "*", "*", "_kwargs", ")" ]
Iterate over features, no matter how they are provided. Parameters ---------- data : str, iterable of Feature objs, FeatureDB `data` can be a string (filename, URL, or contents of a file, if from_string=True), any arbitrary iterable of features, or a FeatureDB (in which case its all_features() method will be called). checklines : int Number of lines to check in order to infer a dialect. transform : None or callable If not None, `transform` should accept a Feature object as its only argument and return either a (possibly modified) Feature object or a value that evaluates to False. If the return value is False, the feature will be skipped. force_dialect_check : bool If True, check the dialect of every feature. Thorough, but can be slow. from_string : bool If True, `data` should be interpreted as the contents of a file rather than the filename itself. dialect : None or dict Provide the dialect, which will override auto-detected dialects. If provided, you should probably also use `force_dialect_check=False` and `checklines=0` but this is not enforced.
[ "Iterate", "over", "features", "no", "matter", "how", "they", "are", "provided", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/iterators.py#L229-L279
train
16,940
daler/gffutils
gffutils/inspect.py
inspect
def inspect(data, look_for=['featuretype', 'chrom', 'attribute_keys', 'feature_count'], limit=None, verbose=True): """ Inspect a GFF or GTF data source. This function is useful for figuring out the different featuretypes found in a file (for potential removal before creating a FeatureDB). Returns a dictionary with a key for each item in `look_for` and a corresponding value that is a dictionary of how many of each unique item were found. There will always be a `feature_count` key, indicating how many features were looked at (if `limit` is provided, then `feature_count` will be the same as `limit`). For example, if `look_for` is ['chrom', 'featuretype'], then the result will be a dictionary like:: { 'chrom': { 'chr1': 500, 'chr2': 435, 'chr3': 200, ... ... }. 'featuretype': { 'gene': 150, 'exon': 324, ... }, 'feature_count': 5000 } Parameters ---------- data : str, FeatureDB instance, or iterator of Features If `data` is a string, assume it's a GFF or GTF filename. If it's a FeatureDB instance, then its `all_features()` method will be automatically called. Otherwise, assume it's an iterable of Feature objects. look_for : list List of things to keep track of. Options are: - any attribute of a Feature object, such as chrom, source, start, stop, strand. - "attribute_keys", which will look at all the individual attribute keys of each feature limit : int Number of features to look at. Default is no limit. verbose : bool Report how many features have been processed. Returns ------- dict """ results = {} obj_attrs = [] for i in look_for: if i not in ['attribute_keys', 'feature_count']: obj_attrs.append(i) results[i] = Counter() attr_keys = 'attribute_keys' in look_for d = iterators.DataIterator(data) feature_count = 0 for f in d: if verbose: sys.stderr.write('\r%s features inspected' % feature_count) sys.stderr.flush() for obj_attr in obj_attrs: results[obj_attr].update([getattr(f, obj_attr)]) if attr_keys: results['attribute_keys'].update(f.attributes.keys()) feature_count += 1 if limit and feature_count == limit: break new_results = {} for k, v in results.items(): new_results[k] = dict(v) new_results['feature_count'] = feature_count return new_results
python
def inspect(data, look_for=['featuretype', 'chrom', 'attribute_keys', 'feature_count'], limit=None, verbose=True): """ Inspect a GFF or GTF data source. This function is useful for figuring out the different featuretypes found in a file (for potential removal before creating a FeatureDB). Returns a dictionary with a key for each item in `look_for` and a corresponding value that is a dictionary of how many of each unique item were found. There will always be a `feature_count` key, indicating how many features were looked at (if `limit` is provided, then `feature_count` will be the same as `limit`). For example, if `look_for` is ['chrom', 'featuretype'], then the result will be a dictionary like:: { 'chrom': { 'chr1': 500, 'chr2': 435, 'chr3': 200, ... ... }. 'featuretype': { 'gene': 150, 'exon': 324, ... }, 'feature_count': 5000 } Parameters ---------- data : str, FeatureDB instance, or iterator of Features If `data` is a string, assume it's a GFF or GTF filename. If it's a FeatureDB instance, then its `all_features()` method will be automatically called. Otherwise, assume it's an iterable of Feature objects. look_for : list List of things to keep track of. Options are: - any attribute of a Feature object, such as chrom, source, start, stop, strand. - "attribute_keys", which will look at all the individual attribute keys of each feature limit : int Number of features to look at. Default is no limit. verbose : bool Report how many features have been processed. Returns ------- dict """ results = {} obj_attrs = [] for i in look_for: if i not in ['attribute_keys', 'feature_count']: obj_attrs.append(i) results[i] = Counter() attr_keys = 'attribute_keys' in look_for d = iterators.DataIterator(data) feature_count = 0 for f in d: if verbose: sys.stderr.write('\r%s features inspected' % feature_count) sys.stderr.flush() for obj_attr in obj_attrs: results[obj_attr].update([getattr(f, obj_attr)]) if attr_keys: results['attribute_keys'].update(f.attributes.keys()) feature_count += 1 if limit and feature_count == limit: break new_results = {} for k, v in results.items(): new_results[k] = dict(v) new_results['feature_count'] = feature_count return new_results
[ "def", "inspect", "(", "data", ",", "look_for", "=", "[", "'featuretype'", ",", "'chrom'", ",", "'attribute_keys'", ",", "'feature_count'", "]", ",", "limit", "=", "None", ",", "verbose", "=", "True", ")", ":", "results", "=", "{", "}", "obj_attrs", "=", "[", "]", "for", "i", "in", "look_for", ":", "if", "i", "not", "in", "[", "'attribute_keys'", ",", "'feature_count'", "]", ":", "obj_attrs", ".", "append", "(", "i", ")", "results", "[", "i", "]", "=", "Counter", "(", ")", "attr_keys", "=", "'attribute_keys'", "in", "look_for", "d", "=", "iterators", ".", "DataIterator", "(", "data", ")", "feature_count", "=", "0", "for", "f", "in", "d", ":", "if", "verbose", ":", "sys", ".", "stderr", ".", "write", "(", "'\\r%s features inspected'", "%", "feature_count", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "for", "obj_attr", "in", "obj_attrs", ":", "results", "[", "obj_attr", "]", ".", "update", "(", "[", "getattr", "(", "f", ",", "obj_attr", ")", "]", ")", "if", "attr_keys", ":", "results", "[", "'attribute_keys'", "]", ".", "update", "(", "f", ".", "attributes", ".", "keys", "(", ")", ")", "feature_count", "+=", "1", "if", "limit", "and", "feature_count", "==", "limit", ":", "break", "new_results", "=", "{", "}", "for", "k", ",", "v", "in", "results", ".", "items", "(", ")", ":", "new_results", "[", "k", "]", "=", "dict", "(", "v", ")", "new_results", "[", "'feature_count'", "]", "=", "feature_count", "return", "new_results" ]
Inspect a GFF or GTF data source. This function is useful for figuring out the different featuretypes found in a file (for potential removal before creating a FeatureDB). Returns a dictionary with a key for each item in `look_for` and a corresponding value that is a dictionary of how many of each unique item were found. There will always be a `feature_count` key, indicating how many features were looked at (if `limit` is provided, then `feature_count` will be the same as `limit`). For example, if `look_for` is ['chrom', 'featuretype'], then the result will be a dictionary like:: { 'chrom': { 'chr1': 500, 'chr2': 435, 'chr3': 200, ... ... }. 'featuretype': { 'gene': 150, 'exon': 324, ... }, 'feature_count': 5000 } Parameters ---------- data : str, FeatureDB instance, or iterator of Features If `data` is a string, assume it's a GFF or GTF filename. If it's a FeatureDB instance, then its `all_features()` method will be automatically called. Otherwise, assume it's an iterable of Feature objects. look_for : list List of things to keep track of. Options are: - any attribute of a Feature object, such as chrom, source, start, stop, strand. - "attribute_keys", which will look at all the individual attribute keys of each feature limit : int Number of features to look at. Default is no limit. verbose : bool Report how many features have been processed. Returns ------- dict
[ "Inspect", "a", "GFF", "or", "GTF", "data", "source", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/inspect.py#L7-L105
train
16,941
daler/gffutils
gffutils/scripts/gffutils-flybase-convert.py
clean_gff
def clean_gff(gff, cleaned, add_chr=False, chroms_to_ignore=None, featuretypes_to_ignore=None): """ Cleans a GFF file by removing features on unwanted chromosomes and of unwanted featuretypes. Optionally adds "chr" to chrom names. """ logger.info("Cleaning GFF") chroms_to_ignore = chroms_to_ignore or [] featuretypes_to_ignore = featuretypes_to_ignore or [] with open(cleaned, 'w') as fout: for i in gffutils.iterators.DataIterator(gff): if add_chr: i.chrom = "chr" + i.chrom if i.chrom in chroms_to_ignore: continue if i.featuretype in featuretypes_to_ignore: continue fout.write(str(i) + '\n') return cleaned
python
def clean_gff(gff, cleaned, add_chr=False, chroms_to_ignore=None, featuretypes_to_ignore=None): """ Cleans a GFF file by removing features on unwanted chromosomes and of unwanted featuretypes. Optionally adds "chr" to chrom names. """ logger.info("Cleaning GFF") chroms_to_ignore = chroms_to_ignore or [] featuretypes_to_ignore = featuretypes_to_ignore or [] with open(cleaned, 'w') as fout: for i in gffutils.iterators.DataIterator(gff): if add_chr: i.chrom = "chr" + i.chrom if i.chrom in chroms_to_ignore: continue if i.featuretype in featuretypes_to_ignore: continue fout.write(str(i) + '\n') return cleaned
[ "def", "clean_gff", "(", "gff", ",", "cleaned", ",", "add_chr", "=", "False", ",", "chroms_to_ignore", "=", "None", ",", "featuretypes_to_ignore", "=", "None", ")", ":", "logger", ".", "info", "(", "\"Cleaning GFF\"", ")", "chroms_to_ignore", "=", "chroms_to_ignore", "or", "[", "]", "featuretypes_to_ignore", "=", "featuretypes_to_ignore", "or", "[", "]", "with", "open", "(", "cleaned", ",", "'w'", ")", "as", "fout", ":", "for", "i", "in", "gffutils", ".", "iterators", ".", "DataIterator", "(", "gff", ")", ":", "if", "add_chr", ":", "i", ".", "chrom", "=", "\"chr\"", "+", "i", ".", "chrom", "if", "i", ".", "chrom", "in", "chroms_to_ignore", ":", "continue", "if", "i", ".", "featuretype", "in", "featuretypes_to_ignore", ":", "continue", "fout", ".", "write", "(", "str", "(", "i", ")", "+", "'\\n'", ")", "return", "cleaned" ]
Cleans a GFF file by removing features on unwanted chromosomes and of unwanted featuretypes. Optionally adds "chr" to chrom names.
[ "Cleans", "a", "GFF", "file", "by", "removing", "features", "on", "unwanted", "chromosomes", "and", "of", "unwanted", "featuretypes", ".", "Optionally", "adds", "chr", "to", "chrom", "names", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/scripts/gffutils-flybase-convert.py#L25-L45
train
16,942
daler/gffutils
gffutils/feature.py
feature_from_line
def feature_from_line(line, dialect=None, strict=True, keep_order=False): """ Given a line from a GFF file, return a Feature object Parameters ---------- line : string strict : bool If True (default), assume `line` is a single, tab-delimited string that has at least 9 fields. If False, then the input can have a more flexible format, useful for creating single ad hoc features or for writing tests. In this case, `line` can be a multi-line string (as long as it has a single non-empty line), and, as long as there are only 9 fields (standard GFF/GTF), then it's OK to use spaces instead of tabs to separate fields in `line`. But if >9 fields are to be used, then tabs must be used. keep_order, dialect Passed directly to :class:`Feature`; see docstring for that class for description Returns ------- A new :class:`Feature` object. """ if not strict: lines = line.splitlines(False) _lines = [] for i in lines: i = i.strip() if len(i) > 0: _lines.append(i) assert len(_lines) == 1, _lines line = _lines[0] if '\t' in line: fields = line.rstrip('\n\r').split('\t') else: fields = line.rstrip('\n\r').split(None, 8) else: fields = line.rstrip('\n\r').split('\t') try: attr_string = fields[8] except IndexError: attr_string = "" attrs, _dialect = parser._split_keyvals(attr_string, dialect=dialect) d = dict(list(zip(constants._gffkeys, fields))) d['attributes'] = attrs d['extra'] = fields[9:] d['keep_order'] = keep_order if dialect is None: dialect = _dialect return Feature(dialect=dialect, **d)
python
def feature_from_line(line, dialect=None, strict=True, keep_order=False): """ Given a line from a GFF file, return a Feature object Parameters ---------- line : string strict : bool If True (default), assume `line` is a single, tab-delimited string that has at least 9 fields. If False, then the input can have a more flexible format, useful for creating single ad hoc features or for writing tests. In this case, `line` can be a multi-line string (as long as it has a single non-empty line), and, as long as there are only 9 fields (standard GFF/GTF), then it's OK to use spaces instead of tabs to separate fields in `line`. But if >9 fields are to be used, then tabs must be used. keep_order, dialect Passed directly to :class:`Feature`; see docstring for that class for description Returns ------- A new :class:`Feature` object. """ if not strict: lines = line.splitlines(False) _lines = [] for i in lines: i = i.strip() if len(i) > 0: _lines.append(i) assert len(_lines) == 1, _lines line = _lines[0] if '\t' in line: fields = line.rstrip('\n\r').split('\t') else: fields = line.rstrip('\n\r').split(None, 8) else: fields = line.rstrip('\n\r').split('\t') try: attr_string = fields[8] except IndexError: attr_string = "" attrs, _dialect = parser._split_keyvals(attr_string, dialect=dialect) d = dict(list(zip(constants._gffkeys, fields))) d['attributes'] = attrs d['extra'] = fields[9:] d['keep_order'] = keep_order if dialect is None: dialect = _dialect return Feature(dialect=dialect, **d)
[ "def", "feature_from_line", "(", "line", ",", "dialect", "=", "None", ",", "strict", "=", "True", ",", "keep_order", "=", "False", ")", ":", "if", "not", "strict", ":", "lines", "=", "line", ".", "splitlines", "(", "False", ")", "_lines", "=", "[", "]", "for", "i", "in", "lines", ":", "i", "=", "i", ".", "strip", "(", ")", "if", "len", "(", "i", ")", ">", "0", ":", "_lines", ".", "append", "(", "i", ")", "assert", "len", "(", "_lines", ")", "==", "1", ",", "_lines", "line", "=", "_lines", "[", "0", "]", "if", "'\\t'", "in", "line", ":", "fields", "=", "line", ".", "rstrip", "(", "'\\n\\r'", ")", ".", "split", "(", "'\\t'", ")", "else", ":", "fields", "=", "line", ".", "rstrip", "(", "'\\n\\r'", ")", ".", "split", "(", "None", ",", "8", ")", "else", ":", "fields", "=", "line", ".", "rstrip", "(", "'\\n\\r'", ")", ".", "split", "(", "'\\t'", ")", "try", ":", "attr_string", "=", "fields", "[", "8", "]", "except", "IndexError", ":", "attr_string", "=", "\"\"", "attrs", ",", "_dialect", "=", "parser", ".", "_split_keyvals", "(", "attr_string", ",", "dialect", "=", "dialect", ")", "d", "=", "dict", "(", "list", "(", "zip", "(", "constants", ".", "_gffkeys", ",", "fields", ")", ")", ")", "d", "[", "'attributes'", "]", "=", "attrs", "d", "[", "'extra'", "]", "=", "fields", "[", "9", ":", "]", "d", "[", "'keep_order'", "]", "=", "keep_order", "if", "dialect", "is", "None", ":", "dialect", "=", "_dialect", "return", "Feature", "(", "dialect", "=", "dialect", ",", "*", "*", "d", ")" ]
Given a line from a GFF file, return a Feature object Parameters ---------- line : string strict : bool If True (default), assume `line` is a single, tab-delimited string that has at least 9 fields. If False, then the input can have a more flexible format, useful for creating single ad hoc features or for writing tests. In this case, `line` can be a multi-line string (as long as it has a single non-empty line), and, as long as there are only 9 fields (standard GFF/GTF), then it's OK to use spaces instead of tabs to separate fields in `line`. But if >9 fields are to be used, then tabs must be used. keep_order, dialect Passed directly to :class:`Feature`; see docstring for that class for description Returns ------- A new :class:`Feature` object.
[ "Given", "a", "line", "from", "a", "GFF", "file", "return", "a", "Feature", "object" ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/feature.py#L356-L411
train
16,943
daler/gffutils
gffutils/feature.py
Feature.calc_bin
def calc_bin(self, _bin=None): """ Calculate the smallest UCSC genomic bin that will contain this feature. """ if _bin is None: try: _bin = bins.bins(self.start, self.end, one=True) except TypeError: _bin = None return _bin
python
def calc_bin(self, _bin=None): """ Calculate the smallest UCSC genomic bin that will contain this feature. """ if _bin is None: try: _bin = bins.bins(self.start, self.end, one=True) except TypeError: _bin = None return _bin
[ "def", "calc_bin", "(", "self", ",", "_bin", "=", "None", ")", ":", "if", "_bin", "is", "None", ":", "try", ":", "_bin", "=", "bins", ".", "bins", "(", "self", ".", "start", ",", "self", ".", "end", ",", "one", "=", "True", ")", "except", "TypeError", ":", "_bin", "=", "None", "return", "_bin" ]
Calculate the smallest UCSC genomic bin that will contain this feature.
[ "Calculate", "the", "smallest", "UCSC", "genomic", "bin", "that", "will", "contain", "this", "feature", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/feature.py#L182-L191
train
16,944
daler/gffutils
gffutils/feature.py
Feature.astuple
def astuple(self, encoding=None): """ Return a tuple suitable for import into a database. Attributes field and extra field jsonified into strings. The order of fields is such that they can be supplied as arguments for the query defined in :attr:`gffutils.constants._INSERT`. If `encoding` is not None, then convert string fields to unicode using the provided encoding. Returns ------- Tuple """ if not encoding: return ( self.id, self.seqid, self.source, self.featuretype, self.start, self.end, self.score, self.strand, self.frame, helpers._jsonify(self.attributes), helpers._jsonify(self.extra), self.calc_bin() ) return ( self.id.decode(encoding), self.seqid.decode(encoding), self.source.decode(encoding), self.featuretype.decode(encoding), self.start, self.end, self.score.decode(encoding), self.strand.decode(encoding), self.frame.decode(encoding), helpers._jsonify(self.attributes).decode(encoding), helpers._jsonify(self.extra).decode(encoding), self.calc_bin() )
python
def astuple(self, encoding=None): """ Return a tuple suitable for import into a database. Attributes field and extra field jsonified into strings. The order of fields is such that they can be supplied as arguments for the query defined in :attr:`gffutils.constants._INSERT`. If `encoding` is not None, then convert string fields to unicode using the provided encoding. Returns ------- Tuple """ if not encoding: return ( self.id, self.seqid, self.source, self.featuretype, self.start, self.end, self.score, self.strand, self.frame, helpers._jsonify(self.attributes), helpers._jsonify(self.extra), self.calc_bin() ) return ( self.id.decode(encoding), self.seqid.decode(encoding), self.source.decode(encoding), self.featuretype.decode(encoding), self.start, self.end, self.score.decode(encoding), self.strand.decode(encoding), self.frame.decode(encoding), helpers._jsonify(self.attributes).decode(encoding), helpers._jsonify(self.extra).decode(encoding), self.calc_bin() )
[ "def", "astuple", "(", "self", ",", "encoding", "=", "None", ")", ":", "if", "not", "encoding", ":", "return", "(", "self", ".", "id", ",", "self", ".", "seqid", ",", "self", ".", "source", ",", "self", ".", "featuretype", ",", "self", ".", "start", ",", "self", ".", "end", ",", "self", ".", "score", ",", "self", ".", "strand", ",", "self", ".", "frame", ",", "helpers", ".", "_jsonify", "(", "self", ".", "attributes", ")", ",", "helpers", ".", "_jsonify", "(", "self", ".", "extra", ")", ",", "self", ".", "calc_bin", "(", ")", ")", "return", "(", "self", ".", "id", ".", "decode", "(", "encoding", ")", ",", "self", ".", "seqid", ".", "decode", "(", "encoding", ")", ",", "self", ".", "source", ".", "decode", "(", "encoding", ")", ",", "self", ".", "featuretype", ".", "decode", "(", "encoding", ")", ",", "self", ".", "start", ",", "self", ".", "end", ",", "self", ".", "score", ".", "decode", "(", "encoding", ")", ",", "self", ".", "strand", ".", "decode", "(", "encoding", ")", ",", "self", ".", "frame", ".", "decode", "(", "encoding", ")", ",", "helpers", ".", "_jsonify", "(", "self", ".", "attributes", ")", ".", "decode", "(", "encoding", ")", ",", "helpers", ".", "_jsonify", "(", "self", ".", "extra", ")", ".", "decode", "(", "encoding", ")", ",", "self", ".", "calc_bin", "(", ")", ")" ]
Return a tuple suitable for import into a database. Attributes field and extra field jsonified into strings. The order of fields is such that they can be supplied as arguments for the query defined in :attr:`gffutils.constants._INSERT`. If `encoding` is not None, then convert string fields to unicode using the provided encoding. Returns ------- Tuple
[ "Return", "a", "tuple", "suitable", "for", "import", "into", "a", "database", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/feature.py#L293-L322
train
16,945
daler/gffutils
gffutils/feature.py
Feature.sequence
def sequence(self, fasta, use_strand=True): """ Retrieves the sequence of this feature as a string. Uses the pyfaidx package. Parameters ---------- fasta : str If str, then it's a FASTA-format filename; otherwise assume it's a pyfaidx.Fasta object. use_strand : bool If True (default), the sequence returned will be reverse-complemented for minus-strand features. Returns ------- string """ if isinstance(fasta, six.string_types): fasta = Fasta(fasta, as_raw=False) # recall GTF/GFF is 1-based closed; pyfaidx uses Python slice notation # and is therefore 0-based half-open. seq = fasta[self.chrom][self.start-1:self.stop] if use_strand and self.strand == '-': seq = seq.reverse.complement return seq.seq
python
def sequence(self, fasta, use_strand=True): """ Retrieves the sequence of this feature as a string. Uses the pyfaidx package. Parameters ---------- fasta : str If str, then it's a FASTA-format filename; otherwise assume it's a pyfaidx.Fasta object. use_strand : bool If True (default), the sequence returned will be reverse-complemented for minus-strand features. Returns ------- string """ if isinstance(fasta, six.string_types): fasta = Fasta(fasta, as_raw=False) # recall GTF/GFF is 1-based closed; pyfaidx uses Python slice notation # and is therefore 0-based half-open. seq = fasta[self.chrom][self.start-1:self.stop] if use_strand and self.strand == '-': seq = seq.reverse.complement return seq.seq
[ "def", "sequence", "(", "self", ",", "fasta", ",", "use_strand", "=", "True", ")", ":", "if", "isinstance", "(", "fasta", ",", "six", ".", "string_types", ")", ":", "fasta", "=", "Fasta", "(", "fasta", ",", "as_raw", "=", "False", ")", "# recall GTF/GFF is 1-based closed; pyfaidx uses Python slice notation", "# and is therefore 0-based half-open.", "seq", "=", "fasta", "[", "self", ".", "chrom", "]", "[", "self", ".", "start", "-", "1", ":", "self", ".", "stop", "]", "if", "use_strand", "and", "self", ".", "strand", "==", "'-'", ":", "seq", "=", "seq", ".", "reverse", ".", "complement", "return", "seq", ".", "seq" ]
Retrieves the sequence of this feature as a string. Uses the pyfaidx package. Parameters ---------- fasta : str If str, then it's a FASTA-format filename; otherwise assume it's a pyfaidx.Fasta object. use_strand : bool If True (default), the sequence returned will be reverse-complemented for minus-strand features. Returns ------- string
[ "Retrieves", "the", "sequence", "of", "this", "feature", "as", "a", "string", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/feature.py#L324-L353
train
16,946
daler/gffutils
gffutils/helpers.py
infer_dialect
def infer_dialect(attributes): """ Infer the dialect based on the attributes. Parameters ---------- attributes : str or iterable A single attributes string from a GTF or GFF line, or an iterable of such strings. Returns ------- Dictionary representing the inferred dialect """ if isinstance(attributes, six.string_types): attributes = [attributes] dialects = [parser._split_keyvals(i)[1] for i in attributes] return _choose_dialect(dialects)
python
def infer_dialect(attributes): """ Infer the dialect based on the attributes. Parameters ---------- attributes : str or iterable A single attributes string from a GTF or GFF line, or an iterable of such strings. Returns ------- Dictionary representing the inferred dialect """ if isinstance(attributes, six.string_types): attributes = [attributes] dialects = [parser._split_keyvals(i)[1] for i in attributes] return _choose_dialect(dialects)
[ "def", "infer_dialect", "(", "attributes", ")", ":", "if", "isinstance", "(", "attributes", ",", "six", ".", "string_types", ")", ":", "attributes", "=", "[", "attributes", "]", "dialects", "=", "[", "parser", ".", "_split_keyvals", "(", "i", ")", "[", "1", "]", "for", "i", "in", "attributes", "]", "return", "_choose_dialect", "(", "dialects", ")" ]
Infer the dialect based on the attributes. Parameters ---------- attributes : str or iterable A single attributes string from a GTF or GFF line, or an iterable of such strings. Returns ------- Dictionary representing the inferred dialect
[ "Infer", "the", "dialect", "based", "on", "the", "attributes", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L25-L42
train
16,947
daler/gffutils
gffutils/helpers.py
_choose_dialect
def _choose_dialect(dialects): """ Given a list of dialects, choose the one to use as the "canonical" version. If `dialects` is an empty list, then use the default GFF3 dialect Parameters ---------- dialects : iterable iterable of dialect dictionaries Returns ------- dict """ # NOTE: can use helpers.dialect_compare if you need to make this more # complex.... # For now, this function favors the first dialect, and then appends the # order of additional fields seen in the attributes of other lines giving # priority to dialects that come first in the iterable. if len(dialects) == 0: return constants.dialect final_order = [] for dialect in dialects: for o in dialect['order']: if o not in final_order: final_order.append(o) dialect = dialects[0] dialect['order'] = final_order return dialect
python
def _choose_dialect(dialects): """ Given a list of dialects, choose the one to use as the "canonical" version. If `dialects` is an empty list, then use the default GFF3 dialect Parameters ---------- dialects : iterable iterable of dialect dictionaries Returns ------- dict """ # NOTE: can use helpers.dialect_compare if you need to make this more # complex.... # For now, this function favors the first dialect, and then appends the # order of additional fields seen in the attributes of other lines giving # priority to dialects that come first in the iterable. if len(dialects) == 0: return constants.dialect final_order = [] for dialect in dialects: for o in dialect['order']: if o not in final_order: final_order.append(o) dialect = dialects[0] dialect['order'] = final_order return dialect
[ "def", "_choose_dialect", "(", "dialects", ")", ":", "# NOTE: can use helpers.dialect_compare if you need to make this more", "# complex....", "# For now, this function favors the first dialect, and then appends the", "# order of additional fields seen in the attributes of other lines giving", "# priority to dialects that come first in the iterable.", "if", "len", "(", "dialects", ")", "==", "0", ":", "return", "constants", ".", "dialect", "final_order", "=", "[", "]", "for", "dialect", "in", "dialects", ":", "for", "o", "in", "dialect", "[", "'order'", "]", ":", "if", "o", "not", "in", "final_order", ":", "final_order", ".", "append", "(", "o", ")", "dialect", "=", "dialects", "[", "0", "]", "dialect", "[", "'order'", "]", "=", "final_order", "return", "dialect" ]
Given a list of dialects, choose the one to use as the "canonical" version. If `dialects` is an empty list, then use the default GFF3 dialect Parameters ---------- dialects : iterable iterable of dialect dictionaries Returns ------- dict
[ "Given", "a", "list", "of", "dialects", "choose", "the", "one", "to", "use", "as", "the", "canonical", "version", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L45-L75
train
16,948
daler/gffutils
gffutils/helpers.py
_bin_from_dict
def _bin_from_dict(d): """ Given a dictionary yielded by the parser, return the genomic "UCSC" bin """ try: start = int(d['start']) end = int(d['end']) return bins.bins(start, end, one=True) # e.g., if "." except ValueError: return None
python
def _bin_from_dict(d): """ Given a dictionary yielded by the parser, return the genomic "UCSC" bin """ try: start = int(d['start']) end = int(d['end']) return bins.bins(start, end, one=True) # e.g., if "." except ValueError: return None
[ "def", "_bin_from_dict", "(", "d", ")", ":", "try", ":", "start", "=", "int", "(", "d", "[", "'start'", "]", ")", "end", "=", "int", "(", "d", "[", "'end'", "]", ")", "return", "bins", ".", "bins", "(", "start", ",", "end", ",", "one", "=", "True", ")", "# e.g., if \".\"", "except", "ValueError", ":", "return", "None" ]
Given a dictionary yielded by the parser, return the genomic "UCSC" bin
[ "Given", "a", "dictionary", "yielded", "by", "the", "parser", "return", "the", "genomic", "UCSC", "bin" ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L242-L253
train
16,949
daler/gffutils
gffutils/helpers.py
_jsonify
def _jsonify(x): """Use most compact form of JSON""" if isinstance(x, dict_class): return json.dumps(x._d, separators=(',', ':')) return json.dumps(x, separators=(',', ':'))
python
def _jsonify(x): """Use most compact form of JSON""" if isinstance(x, dict_class): return json.dumps(x._d, separators=(',', ':')) return json.dumps(x, separators=(',', ':'))
[ "def", "_jsonify", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "dict_class", ")", ":", "return", "json", ".", "dumps", "(", "x", ".", "_d", ",", "separators", "=", "(", "','", ",", "':'", ")", ")", "return", "json", ".", "dumps", "(", "x", ",", "separators", "=", "(", "','", ",", "':'", ")", ")" ]
Use most compact form of JSON
[ "Use", "most", "compact", "form", "of", "JSON" ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L256-L260
train
16,950
daler/gffutils
gffutils/helpers.py
_unjsonify
def _unjsonify(x, isattributes=False): """Convert JSON string to an ordered defaultdict.""" if isattributes: obj = json.loads(x) return dict_class(obj) return json.loads(x)
python
def _unjsonify(x, isattributes=False): """Convert JSON string to an ordered defaultdict.""" if isattributes: obj = json.loads(x) return dict_class(obj) return json.loads(x)
[ "def", "_unjsonify", "(", "x", ",", "isattributes", "=", "False", ")", ":", "if", "isattributes", ":", "obj", "=", "json", ".", "loads", "(", "x", ")", "return", "dict_class", "(", "obj", ")", "return", "json", ".", "loads", "(", "x", ")" ]
Convert JSON string to an ordered defaultdict.
[ "Convert", "JSON", "string", "to", "an", "ordered", "defaultdict", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L263-L268
train
16,951
daler/gffutils
gffutils/helpers.py
_feature_to_fields
def _feature_to_fields(f, jsonify=True): """ Convert feature to tuple, for faster sqlite3 import """ x = [] for k in constants._keys: v = getattr(f, k) if jsonify and (k in ('attributes', 'extra')): x.append(_jsonify(v)) else: x.append(v) return tuple(x)
python
def _feature_to_fields(f, jsonify=True): """ Convert feature to tuple, for faster sqlite3 import """ x = [] for k in constants._keys: v = getattr(f, k) if jsonify and (k in ('attributes', 'extra')): x.append(_jsonify(v)) else: x.append(v) return tuple(x)
[ "def", "_feature_to_fields", "(", "f", ",", "jsonify", "=", "True", ")", ":", "x", "=", "[", "]", "for", "k", "in", "constants", ".", "_keys", ":", "v", "=", "getattr", "(", "f", ",", "k", ")", "if", "jsonify", "and", "(", "k", "in", "(", "'attributes'", ",", "'extra'", ")", ")", ":", "x", ".", "append", "(", "_jsonify", "(", "v", ")", ")", "else", ":", "x", ".", "append", "(", "v", ")", "return", "tuple", "(", "x", ")" ]
Convert feature to tuple, for faster sqlite3 import
[ "Convert", "feature", "to", "tuple", "for", "faster", "sqlite3", "import" ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L271-L282
train
16,952
daler/gffutils
gffutils/helpers.py
_dict_to_fields
def _dict_to_fields(d, jsonify=True): """ Convert dict to tuple, for faster sqlite3 import """ x = [] for k in constants._keys: v = d[k] if jsonify and (k in ('attributes', 'extra')): x.append(_jsonify(v)) else: x.append(v) return tuple(x)
python
def _dict_to_fields(d, jsonify=True): """ Convert dict to tuple, for faster sqlite3 import """ x = [] for k in constants._keys: v = d[k] if jsonify and (k in ('attributes', 'extra')): x.append(_jsonify(v)) else: x.append(v) return tuple(x)
[ "def", "_dict_to_fields", "(", "d", ",", "jsonify", "=", "True", ")", ":", "x", "=", "[", "]", "for", "k", "in", "constants", ".", "_keys", ":", "v", "=", "d", "[", "k", "]", "if", "jsonify", "and", "(", "k", "in", "(", "'attributes'", ",", "'extra'", ")", ")", ":", "x", ".", "append", "(", "_jsonify", "(", "v", ")", ")", "else", ":", "x", ".", "append", "(", "v", ")", "return", "tuple", "(", "x", ")" ]
Convert dict to tuple, for faster sqlite3 import
[ "Convert", "dict", "to", "tuple", "for", "faster", "sqlite3", "import" ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L285-L296
train
16,953
daler/gffutils
gffutils/helpers.py
merge_attributes
def merge_attributes(attr1, attr2): """ Merges two attribute dictionaries into a single dictionary. Parameters ---------- `attr1`, `attr2` : dict Returns ------- dict """ new_d = copy.deepcopy(attr1) new_d.update(attr2) #all of attr2 key : values just overwrote attr1, fix it for k, v in new_d.items(): if not isinstance(v, list): new_d[k] = [v] for k, v in six.iteritems(attr1): if k in attr2: if not isinstance(v, list): v = [v] new_d[k].extend(v) return dict((k, sorted(set(v))) for k, v in new_d.items())
python
def merge_attributes(attr1, attr2): """ Merges two attribute dictionaries into a single dictionary. Parameters ---------- `attr1`, `attr2` : dict Returns ------- dict """ new_d = copy.deepcopy(attr1) new_d.update(attr2) #all of attr2 key : values just overwrote attr1, fix it for k, v in new_d.items(): if not isinstance(v, list): new_d[k] = [v] for k, v in six.iteritems(attr1): if k in attr2: if not isinstance(v, list): v = [v] new_d[k].extend(v) return dict((k, sorted(set(v))) for k, v in new_d.items())
[ "def", "merge_attributes", "(", "attr1", ",", "attr2", ")", ":", "new_d", "=", "copy", ".", "deepcopy", "(", "attr1", ")", "new_d", ".", "update", "(", "attr2", ")", "#all of attr2 key : values just overwrote attr1, fix it", "for", "k", ",", "v", "in", "new_d", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "v", ",", "list", ")", ":", "new_d", "[", "k", "]", "=", "[", "v", "]", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "attr1", ")", ":", "if", "k", "in", "attr2", ":", "if", "not", "isinstance", "(", "v", ",", "list", ")", ":", "v", "=", "[", "v", "]", "new_d", "[", "k", "]", ".", "extend", "(", "v", ")", "return", "dict", "(", "(", "k", ",", "sorted", "(", "set", "(", "v", ")", ")", ")", "for", "k", ",", "v", "in", "new_d", ".", "items", "(", ")", ")" ]
Merges two attribute dictionaries into a single dictionary. Parameters ---------- `attr1`, `attr2` : dict Returns ------- dict
[ "Merges", "two", "attribute", "dictionaries", "into", "a", "single", "dictionary", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L307-L333
train
16,954
daler/gffutils
gffutils/helpers.py
dialect_compare
def dialect_compare(dialect1, dialect2): """ Compares two dialects. """ orig = set(dialect1.items()) new = set(dialect2.items()) return dict( added=dict(list(new.difference(orig))), removed=dict(list(orig.difference(new))) )
python
def dialect_compare(dialect1, dialect2): """ Compares two dialects. """ orig = set(dialect1.items()) new = set(dialect2.items()) return dict( added=dict(list(new.difference(orig))), removed=dict(list(orig.difference(new))) )
[ "def", "dialect_compare", "(", "dialect1", ",", "dialect2", ")", ":", "orig", "=", "set", "(", "dialect1", ".", "items", "(", ")", ")", "new", "=", "set", "(", "dialect2", ".", "items", "(", ")", ")", "return", "dict", "(", "added", "=", "dict", "(", "list", "(", "new", ".", "difference", "(", "orig", ")", ")", ")", ",", "removed", "=", "dict", "(", "list", "(", "orig", ".", "difference", "(", "new", ")", ")", ")", ")" ]
Compares two dialects.
[ "Compares", "two", "dialects", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L336-L345
train
16,955
daler/gffutils
gffutils/helpers.py
sanitize_gff_db
def sanitize_gff_db(db, gid_field="gid"): """ Sanitize given GFF db. Returns a sanitized GFF db. Sanitizing means: - Ensuring that start < stop for all features - Standardizing gene units by adding a 'gid' attribute that makes the file grep-able TODO: Do something with negative coordinates? """ def sanitized_iterator(): # Iterate through the database by each gene's records for gene_recs in db.iter_by_parent_childs(): # The gene's ID gene_id = gene_recs[0].id for rec in gene_recs: # Fixup coordinates if necessary if rec.start > rec.stop: rec.start, rec.stop = rec.stop, rec.start # Add a gene id field to each gene's records rec.attributes[gid_field] = [gene_id] yield rec # Return sanitized GFF database sanitized_db = \ gffutils.create_db(sanitized_iterator(), ":memory:", verbose=False) return sanitized_db
python
def sanitize_gff_db(db, gid_field="gid"): """ Sanitize given GFF db. Returns a sanitized GFF db. Sanitizing means: - Ensuring that start < stop for all features - Standardizing gene units by adding a 'gid' attribute that makes the file grep-able TODO: Do something with negative coordinates? """ def sanitized_iterator(): # Iterate through the database by each gene's records for gene_recs in db.iter_by_parent_childs(): # The gene's ID gene_id = gene_recs[0].id for rec in gene_recs: # Fixup coordinates if necessary if rec.start > rec.stop: rec.start, rec.stop = rec.stop, rec.start # Add a gene id field to each gene's records rec.attributes[gid_field] = [gene_id] yield rec # Return sanitized GFF database sanitized_db = \ gffutils.create_db(sanitized_iterator(), ":memory:", verbose=False) return sanitized_db
[ "def", "sanitize_gff_db", "(", "db", ",", "gid_field", "=", "\"gid\"", ")", ":", "def", "sanitized_iterator", "(", ")", ":", "# Iterate through the database by each gene's records", "for", "gene_recs", "in", "db", ".", "iter_by_parent_childs", "(", ")", ":", "# The gene's ID", "gene_id", "=", "gene_recs", "[", "0", "]", ".", "id", "for", "rec", "in", "gene_recs", ":", "# Fixup coordinates if necessary", "if", "rec", ".", "start", ">", "rec", ".", "stop", ":", "rec", ".", "start", ",", "rec", ".", "stop", "=", "rec", ".", "stop", ",", "rec", ".", "start", "# Add a gene id field to each gene's records", "rec", ".", "attributes", "[", "gid_field", "]", "=", "[", "gene_id", "]", "yield", "rec", "# Return sanitized GFF database", "sanitized_db", "=", "gffutils", ".", "create_db", "(", "sanitized_iterator", "(", ")", ",", "\":memory:\"", ",", "verbose", "=", "False", ")", "return", "sanitized_db" ]
Sanitize given GFF db. Returns a sanitized GFF db. Sanitizing means: - Ensuring that start < stop for all features - Standardizing gene units by adding a 'gid' attribute that makes the file grep-able TODO: Do something with negative coordinates?
[ "Sanitize", "given", "GFF", "db", ".", "Returns", "a", "sanitized", "GFF", "db", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L348-L376
train
16,956
daler/gffutils
gffutils/helpers.py
sanitize_gff_file
def sanitize_gff_file(gff_fname, in_memory=True, in_place=False): """ Sanitize a GFF file. """ db = None if is_gff_db(gff_fname): # It's a database filename, so load it db = gffutils.FeatureDB(gff_fname) else: # Need to create a database for file if in_memory: db = gffutils.create_db(gff_fname, ":memory:", verbose=False) else: db = get_gff_db(gff_fname) if in_place: gff_out = gffwriter.GFFWriter(gff_fname, in_place=in_place) else: gff_out = gffwriter.GFFWriter(sys.stdout) sanitized_db = sanitize_gff_db(db) for gene_rec in sanitized_db.all_features(featuretype="gene"): gff_out.write_gene_recs(sanitized_db, gene_rec.id) gff_out.close()
python
def sanitize_gff_file(gff_fname, in_memory=True, in_place=False): """ Sanitize a GFF file. """ db = None if is_gff_db(gff_fname): # It's a database filename, so load it db = gffutils.FeatureDB(gff_fname) else: # Need to create a database for file if in_memory: db = gffutils.create_db(gff_fname, ":memory:", verbose=False) else: db = get_gff_db(gff_fname) if in_place: gff_out = gffwriter.GFFWriter(gff_fname, in_place=in_place) else: gff_out = gffwriter.GFFWriter(sys.stdout) sanitized_db = sanitize_gff_db(db) for gene_rec in sanitized_db.all_features(featuretype="gene"): gff_out.write_gene_recs(sanitized_db, gene_rec.id) gff_out.close()
[ "def", "sanitize_gff_file", "(", "gff_fname", ",", "in_memory", "=", "True", ",", "in_place", "=", "False", ")", ":", "db", "=", "None", "if", "is_gff_db", "(", "gff_fname", ")", ":", "# It's a database filename, so load it", "db", "=", "gffutils", ".", "FeatureDB", "(", "gff_fname", ")", "else", ":", "# Need to create a database for file", "if", "in_memory", ":", "db", "=", "gffutils", ".", "create_db", "(", "gff_fname", ",", "\":memory:\"", ",", "verbose", "=", "False", ")", "else", ":", "db", "=", "get_gff_db", "(", "gff_fname", ")", "if", "in_place", ":", "gff_out", "=", "gffwriter", ".", "GFFWriter", "(", "gff_fname", ",", "in_place", "=", "in_place", ")", "else", ":", "gff_out", "=", "gffwriter", ".", "GFFWriter", "(", "sys", ".", "stdout", ")", "sanitized_db", "=", "sanitize_gff_db", "(", "db", ")", "for", "gene_rec", "in", "sanitized_db", ".", "all_features", "(", "featuretype", "=", "\"gene\"", ")", ":", "gff_out", ".", "write_gene_recs", "(", "sanitized_db", ",", "gene_rec", ".", "id", ")", "gff_out", ".", "close", "(", ")" ]
Sanitize a GFF file.
[ "Sanitize", "a", "GFF", "file", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L379-L404
train
16,957
daler/gffutils
gffutils/helpers.py
is_gff_db
def is_gff_db(db_fname): """ Return True if the given filename is a GFF database. For now, rely on .db extension. """ if not os.path.isfile(db_fname): return False if db_fname.endswith(".db"): return True return False
python
def is_gff_db(db_fname): """ Return True if the given filename is a GFF database. For now, rely on .db extension. """ if not os.path.isfile(db_fname): return False if db_fname.endswith(".db"): return True return False
[ "def", "is_gff_db", "(", "db_fname", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "db_fname", ")", ":", "return", "False", "if", "db_fname", ".", "endswith", "(", "\".db\"", ")", ":", "return", "True", "return", "False" ]
Return True if the given filename is a GFF database. For now, rely on .db extension.
[ "Return", "True", "if", "the", "given", "filename", "is", "a", "GFF", "database", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L415-L425
train
16,958
daler/gffutils
gffutils/helpers.py
get_gff_db
def get_gff_db(gff_fname, ext=".db"): """ Get db for GFF file. If the database has a .db file, load that. Otherwise, create a named temporary file, serialize the db to that, and return the loaded database. """ if not os.path.isfile(gff_fname): # Not sure how we should deal with errors normally in # gffutils -- Ryan? raise ValueError("GFF %s does not exist." % (gff_fname)) candidate_db_fname = "%s.%s" % (gff_fname, ext) if os.path.isfile(candidate_db_fname): # Standard .db file found, so return it return candidate_db_fname # Otherwise, we need to create a temporary but non-deleted # file to store the db in. It'll be up to the user # of the function the delete the file when done. ## NOTE: Ryan must have a good scheme for dealing with this ## since pybedtools does something similar under the hood, i.e. ## creating temporary files as needed without over proliferation db_fname = tempfile.NamedTemporaryFile(delete=False) # Create the database for the gff file (suppress output # when using function internally) print("Creating db for %s" % (gff_fname)) t1 = time.time() db = gffutils.create_db(gff_fname, db_fname.name, merge_strategy="merge", verbose=False) t2 = time.time() print(" - Took %.2f seconds" % (t2 - t1)) return db
python
def get_gff_db(gff_fname, ext=".db"): """ Get db for GFF file. If the database has a .db file, load that. Otherwise, create a named temporary file, serialize the db to that, and return the loaded database. """ if not os.path.isfile(gff_fname): # Not sure how we should deal with errors normally in # gffutils -- Ryan? raise ValueError("GFF %s does not exist." % (gff_fname)) candidate_db_fname = "%s.%s" % (gff_fname, ext) if os.path.isfile(candidate_db_fname): # Standard .db file found, so return it return candidate_db_fname # Otherwise, we need to create a temporary but non-deleted # file to store the db in. It'll be up to the user # of the function the delete the file when done. ## NOTE: Ryan must have a good scheme for dealing with this ## since pybedtools does something similar under the hood, i.e. ## creating temporary files as needed without over proliferation db_fname = tempfile.NamedTemporaryFile(delete=False) # Create the database for the gff file (suppress output # when using function internally) print("Creating db for %s" % (gff_fname)) t1 = time.time() db = gffutils.create_db(gff_fname, db_fname.name, merge_strategy="merge", verbose=False) t2 = time.time() print(" - Took %.2f seconds" % (t2 - t1)) return db
[ "def", "get_gff_db", "(", "gff_fname", ",", "ext", "=", "\".db\"", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "gff_fname", ")", ":", "# Not sure how we should deal with errors normally in", "# gffutils -- Ryan?", "raise", "ValueError", "(", "\"GFF %s does not exist.\"", "%", "(", "gff_fname", ")", ")", "candidate_db_fname", "=", "\"%s.%s\"", "%", "(", "gff_fname", ",", "ext", ")", "if", "os", ".", "path", ".", "isfile", "(", "candidate_db_fname", ")", ":", "# Standard .db file found, so return it", "return", "candidate_db_fname", "# Otherwise, we need to create a temporary but non-deleted", "# file to store the db in. It'll be up to the user", "# of the function the delete the file when done.", "## NOTE: Ryan must have a good scheme for dealing with this", "## since pybedtools does something similar under the hood, i.e.", "## creating temporary files as needed without over proliferation", "db_fname", "=", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "# Create the database for the gff file (suppress output", "# when using function internally)", "print", "(", "\"Creating db for %s\"", "%", "(", "gff_fname", ")", ")", "t1", "=", "time", ".", "time", "(", ")", "db", "=", "gffutils", ".", "create_db", "(", "gff_fname", ",", "db_fname", ".", "name", ",", "merge_strategy", "=", "\"merge\"", ",", "verbose", "=", "False", ")", "t2", "=", "time", ".", "time", "(", ")", "print", "(", "\" - Took %.2f seconds\"", "%", "(", "t2", "-", "t1", ")", ")", "return", "db" ]
Get db for GFF file. If the database has a .db file, load that. Otherwise, create a named temporary file, serialize the db to that, and return the loaded database.
[ "Get", "db", "for", "GFF", "file", ".", "If", "the", "database", "has", "a", ".", "db", "file", "load", "that", ".", "Otherwise", "create", "a", "named", "temporary", "file", "serialize", "the", "db", "to", "that", "and", "return", "the", "loaded", "database", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L475-L506
train
16,959
daler/gffutils
gffutils/parser.py
_reconstruct
def _reconstruct(keyvals, dialect, keep_order=False, sort_attribute_values=False): """ Reconstructs the original attributes string according to the dialect. Parameters ========== keyvals : dict Attributes from a GFF/GTF feature dialect : dict Dialect containing info on how to reconstruct a string version of the attributes keep_order : bool If True, then perform sorting of attribute keys to ensure they are in the same order as those provided in the original file. Default is False, which saves time especially on large data sets. sort_attribute_values : bool If True, then sort values to ensure they will always be in the same order. Mostly only useful for testing; default is False. """ if not dialect: raise AttributeStringError() if not keyvals: return "" parts = [] # Re-encode when reconstructing attributes if constants.ignore_url_escape_characters or dialect['fmt'] != 'gff3': attributes = keyvals else: attributes = {} for k, v in keyvals.items(): attributes[k] = [] for i in v: attributes[k].append(''.join([quoter[j] for j in i])) # May need to split multiple values into multiple key/val pairs if dialect['repeated keys']: items = [] for key, val in attributes.items(): if len(val) > 1: for v in val: items.append((key, [v])) else: items.append((key, val)) else: items = list(attributes.items()) def sort_key(x): # sort keys by their order in the dialect; anything not in there will # be in arbitrary order at the end. try: return dialect['order'].index(x[0]) except ValueError: return 1e6 if keep_order: items.sort(key=sort_key) for key, val in items: # Multival sep is usually a comma: if val: if sort_attribute_values: val = sorted(val) val_str = dialect['multival separator'].join(val) if val_str: # Surround with quotes if needed if dialect['quoted GFF2 values']: val_str = '"%s"' % val_str # Typically "=" for GFF3 or " " otherwise part = dialect['keyval separator'].join([key, val_str]) else: if dialect['fmt'] == 'gtf': part = dialect['keyval separator'].join([key, '""']) else: part = key parts.append(part) # Typically ";" or "; " parts_str = dialect['field separator'].join(parts) # Sometimes need to add this if dialect['trailing semicolon']: parts_str += ';' return parts_str
python
def _reconstruct(keyvals, dialect, keep_order=False, sort_attribute_values=False): """ Reconstructs the original attributes string according to the dialect. Parameters ========== keyvals : dict Attributes from a GFF/GTF feature dialect : dict Dialect containing info on how to reconstruct a string version of the attributes keep_order : bool If True, then perform sorting of attribute keys to ensure they are in the same order as those provided in the original file. Default is False, which saves time especially on large data sets. sort_attribute_values : bool If True, then sort values to ensure they will always be in the same order. Mostly only useful for testing; default is False. """ if not dialect: raise AttributeStringError() if not keyvals: return "" parts = [] # Re-encode when reconstructing attributes if constants.ignore_url_escape_characters or dialect['fmt'] != 'gff3': attributes = keyvals else: attributes = {} for k, v in keyvals.items(): attributes[k] = [] for i in v: attributes[k].append(''.join([quoter[j] for j in i])) # May need to split multiple values into multiple key/val pairs if dialect['repeated keys']: items = [] for key, val in attributes.items(): if len(val) > 1: for v in val: items.append((key, [v])) else: items.append((key, val)) else: items = list(attributes.items()) def sort_key(x): # sort keys by their order in the dialect; anything not in there will # be in arbitrary order at the end. try: return dialect['order'].index(x[0]) except ValueError: return 1e6 if keep_order: items.sort(key=sort_key) for key, val in items: # Multival sep is usually a comma: if val: if sort_attribute_values: val = sorted(val) val_str = dialect['multival separator'].join(val) if val_str: # Surround with quotes if needed if dialect['quoted GFF2 values']: val_str = '"%s"' % val_str # Typically "=" for GFF3 or " " otherwise part = dialect['keyval separator'].join([key, val_str]) else: if dialect['fmt'] == 'gtf': part = dialect['keyval separator'].join([key, '""']) else: part = key parts.append(part) # Typically ";" or "; " parts_str = dialect['field separator'].join(parts) # Sometimes need to add this if dialect['trailing semicolon']: parts_str += ';' return parts_str
[ "def", "_reconstruct", "(", "keyvals", ",", "dialect", ",", "keep_order", "=", "False", ",", "sort_attribute_values", "=", "False", ")", ":", "if", "not", "dialect", ":", "raise", "AttributeStringError", "(", ")", "if", "not", "keyvals", ":", "return", "\"\"", "parts", "=", "[", "]", "# Re-encode when reconstructing attributes", "if", "constants", ".", "ignore_url_escape_characters", "or", "dialect", "[", "'fmt'", "]", "!=", "'gff3'", ":", "attributes", "=", "keyvals", "else", ":", "attributes", "=", "{", "}", "for", "k", ",", "v", "in", "keyvals", ".", "items", "(", ")", ":", "attributes", "[", "k", "]", "=", "[", "]", "for", "i", "in", "v", ":", "attributes", "[", "k", "]", ".", "append", "(", "''", ".", "join", "(", "[", "quoter", "[", "j", "]", "for", "j", "in", "i", "]", ")", ")", "# May need to split multiple values into multiple key/val pairs", "if", "dialect", "[", "'repeated keys'", "]", ":", "items", "=", "[", "]", "for", "key", ",", "val", "in", "attributes", ".", "items", "(", ")", ":", "if", "len", "(", "val", ")", ">", "1", ":", "for", "v", "in", "val", ":", "items", ".", "append", "(", "(", "key", ",", "[", "v", "]", ")", ")", "else", ":", "items", ".", "append", "(", "(", "key", ",", "val", ")", ")", "else", ":", "items", "=", "list", "(", "attributes", ".", "items", "(", ")", ")", "def", "sort_key", "(", "x", ")", ":", "# sort keys by their order in the dialect; anything not in there will", "# be in arbitrary order at the end.", "try", ":", "return", "dialect", "[", "'order'", "]", ".", "index", "(", "x", "[", "0", "]", ")", "except", "ValueError", ":", "return", "1e6", "if", "keep_order", ":", "items", ".", "sort", "(", "key", "=", "sort_key", ")", "for", "key", ",", "val", "in", "items", ":", "# Multival sep is usually a comma:", "if", "val", ":", "if", "sort_attribute_values", ":", "val", "=", "sorted", "(", "val", ")", "val_str", "=", "dialect", "[", "'multival separator'", "]", ".", "join", "(", "val", ")", "if", "val_str", ":", "# Surround with quotes if needed", "if", "dialect", "[", "'quoted GFF2 values'", "]", ":", "val_str", "=", "'\"%s\"'", "%", "val_str", "# Typically \"=\" for GFF3 or \" \" otherwise", "part", "=", "dialect", "[", "'keyval separator'", "]", ".", "join", "(", "[", "key", ",", "val_str", "]", ")", "else", ":", "if", "dialect", "[", "'fmt'", "]", "==", "'gtf'", ":", "part", "=", "dialect", "[", "'keyval separator'", "]", ".", "join", "(", "[", "key", ",", "'\"\"'", "]", ")", "else", ":", "part", "=", "key", "parts", ".", "append", "(", "part", ")", "# Typically \";\" or \"; \"", "parts_str", "=", "dialect", "[", "'field separator'", "]", ".", "join", "(", "parts", ")", "# Sometimes need to add this", "if", "dialect", "[", "'trailing semicolon'", "]", ":", "parts_str", "+=", "';'", "return", "parts_str" ]
Reconstructs the original attributes string according to the dialect. Parameters ========== keyvals : dict Attributes from a GFF/GTF feature dialect : dict Dialect containing info on how to reconstruct a string version of the attributes keep_order : bool If True, then perform sorting of attribute keys to ensure they are in the same order as those provided in the original file. Default is False, which saves time especially on large data sets. sort_attribute_values : bool If True, then sort values to ensure they will always be in the same order. Mostly only useful for testing; default is False.
[ "Reconstructs", "the", "original", "attributes", "string", "according", "to", "the", "dialect", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/parser.py#L76-L169
train
16,960
daler/gffutils
gffutils/create.py
create_db
def create_db(data, dbfn, id_spec=None, force=False, verbose=False, checklines=10, merge_strategy='error', transform=None, gtf_transcript_key='transcript_id', gtf_gene_key='gene_id', gtf_subfeature='exon', force_gff=False, force_dialect_check=False, from_string=False, keep_order=False, text_factory=sqlite3.OptimizedUnicode, force_merge_fields=None, pragmas=constants.default_pragmas, sort_attribute_values=False, dialect=None, _keep_tempfiles=False, infer_gene_extent=True, disable_infer_genes=False, disable_infer_transcripts=False, **kwargs): """ Create a database from a GFF or GTF file. For more details on when and how to use the kwargs below, see the examples in the online documentation (:ref:`examples`). Parameters ---------- data : string or iterable If a string (and `from_string` is False), then `data` is the path to the original GFF or GTF file. If a string and `from_string` is True, then assume `data` is the actual data to use. Otherwise, it's an iterable of Feature objects. dbfn : string Path to the database that will be created. Can be the special string ":memory:" to create an in-memory database. id_spec : string, list, dict, callable, or None This parameter guides what will be used as the primary key for the database, which in turn determines how you will access individual features by name from the database. If `id_spec=None`, then auto-increment primary keys based on the feature type (e.g., "gene_1", "gene_2"). This is also the fallback behavior for the other values below. If `id_spec` is a string, then look for this key in the attributes. If it exists, then use its value as the primary key, otherwise autoincrement based on the feature type. For many GFF3 files, "ID" usually works well. If `id_spec` is a list or tuple of keys, then check for each one in order, using the first one found. For GFF3, this might be ["ID", "Name"], which would use the ID if it exists, otherwise the Name, otherwise autoincrement based on the feature type. If `id_spec` is a dictionary, then it is a mapping of feature types to what should be used as the ID. For example, for GTF files, `{'gene': 'gene_id', 'transcript': 'transcript_id'}` may be useful. The values of this dictionary can also be a list, e.g., `{'gene': ['gene_id', 'geneID']}` If `id_spec` is a callable object, then it accepts a dictionary from the iterator and returns one of the following: * None (in which case the feature type will be auto-incremented) * string (which will be used as the primary key) * special string starting with "autoincrement:X", where "X" is a string that will be used for auto-incrementing. For example, if "autoincrement:chr10", then the first feature will be "chr10_1", the second "chr10_2", and so on. force : bool If `False` (default), then raise an exception if `dbfn` already exists. Use `force=True` to overwrite any existing databases. verbose : bool Report percent complete and other feedback on how the db creation is progressing. In order to report percent complete, the entire file needs to be read once to see how many items there are; for large files you may want to use `verbose=False` to avoid this. checklines : int Number of lines to check the dialect. merge_strategy : str One of {merge, create_unique, error, warning, replace}. This parameter specifies the behavior when two items have an identical primary key. Using `merge_strategy="merge"`, then there will be a single entry in the database, but the attributes of all features with the same primary key will be merged. Using `merge_strategy="create_unique"`, then the first entry will use the original primary key, but the second entry will have a unique, autoincremented primary key assigned to it Using `merge_strategy="error"`, a :class:`gffutils.DuplicateID` exception will be raised. This means you will have to edit the file yourself to fix the duplicated IDs. Using `merge_strategy="warning"`, a warning will be printed to the logger, and the duplicate feature will be skipped. Using `merge_strategy="replace"` will replace the entire existing feature with the new feature. transform : callable Function (or other callable object) that accepts a `Feature` object and returns a (possibly modified) `Feature` object. gtf_transcript_key, gtf_gene_key : string Which attribute to use as the transcript ID and gene ID respectively for GTF files. Default is `transcript_id` and `gene_id` according to the GTF spec. gtf_subfeature : string Feature type to use as a "gene component" when inferring gene and transcript extents for GTF files. Default is `exon` according to the GTF spec. force_gff : bool If True, do not do automatic format detection -- only use GFF. force_dialect_check : bool If True, the dialect will be checkef for every feature (instead of just `checklines` features). This can be slow, but may be necessary for inconsistently-formatted input files. from_string : bool If True, then treat `data` as actual data (rather than the path to a file). keep_order : bool If True, all features returned from this instance will have the order of their attributes maintained. This can be turned on or off database-wide by setting the `keep_order` attribute or with this kwarg, or on a feature-by-feature basis by setting the `keep_order` attribute of an individual feature. Note that a single order of attributes will be used for all features. Specifically, the order will be determined by the order of attribute keys in the first `checklines` of the input data. See helpers._choose_dialect for more information on this. Default is False, since this includes a sorting step that can get time-consuming for many features. infer_gene_extent : bool DEPRECATED in version 0.8.4. See `disable_infer_transcripts` and `disable_infer_genes` for more granular control. disable_infer_transcripts, disable_infer_genes : bool Only used for GTF files. By default -- and according to the GTF spec -- we assume that there are no transcript or gene features in the file. gffutils then infers the extent of each transcript based on its constituent exons and infers the extent of each gene bases on its constituent transcripts. This default behavior is problematic if the input file already contains transcript or gene features (like recent GENCODE GTF files for human), since 1) the work to infer extents is unnecessary, and 2) trying to insert an inferred feature back into the database triggers gffutils' feature-merging routines, which can get time consuming. The solution is to use `disable_infer_transcripts=True` if your GTF already has transcripts in it, and/or `disable_infer_genes=True` if it already has genes in it. This can result in dramatic (100x) speedup. Prior to version 0.8.4, setting `infer_gene_extents=False` would disable both transcript and gene inference simultaneously. As of version 0.8.4, these argument allow more granular control. force_merge_fields : list If merge_strategy="merge", then features will only be merged if their non-attribute values are identical (same chrom, source, start, stop, score, strand, phase). Using `force_merge_fields`, you can override this behavior to allow merges even when fields are different. This list can contain one or more of ['seqid', 'source', 'featuretype', 'score', 'strand', 'frame']. The resulting merged fields will be strings of comma-separated values. Note that 'start' and 'end' are not available, since these fields need to be integers. text_factory : callable Text factory to use for the sqlite3 database. See https://docs.python.org/2/library/\ sqlite3.html#sqlite3.Connection.text_factory for details. The default sqlite3.OptimizedUnicode will return Unicode objects only for non-ASCII data, and bytestrings otherwise. pragmas : dict Dictionary of pragmas used when creating the sqlite3 database. See http://www.sqlite.org/pragma.html for a list of available pragmas. The defaults are stored in constants.default_pragmas, which can be used as a template for supplying a custom dictionary. sort_attribute_values : bool All features returned from the database will have their attribute values sorted. Typically this is only useful for testing, since this can get time-consuming for large numbers of features. _keep_tempfiles : bool or string False by default to clean up intermediate tempfiles created during GTF import. If True, then keep these tempfile for testing or debugging. If string, then keep the tempfile for testing, but also use the string as the suffix fo the tempfile. This can be useful for testing in parallel environments. Returns ------- New :class:`FeatureDB` object. """ _locals = locals() # Check if any older kwargs made it in deprecation_handler(kwargs) kwargs = dict((i, _locals[i]) for i in constants._iterator_kwargs) # First construct an iterator so that we can identify the file format. # DataIterator figures out what kind of data was provided (string of lines, # filename, or iterable of Features) and checks `checklines` lines to # identify the dialect. iterator = iterators.DataIterator(**kwargs) kwargs.update(**_locals) if dialect is None: dialect = iterator.dialect # However, a side-effect of this is that if `data` was a generator, then # we've just consumed `checklines` items (see # iterators.BaseIterator.__init__, which calls iterators.peek). # # But it also chains those consumed items back onto the beginning, and the # result is available as as iterator._iter. # # That's what we should be using now for `data: kwargs['data'] = iterator._iter kwargs['directives'] = iterator.directives # Since we've already checked lines, we don't want to do it again kwargs['checklines'] = 0 if force_gff or (dialect['fmt'] == 'gff3'): cls = _GFFDBCreator id_spec = id_spec or 'ID' add_kwargs = dict( id_spec=id_spec, ) elif dialect['fmt'] == 'gtf': cls = _GTFDBCreator id_spec = id_spec or {'gene': 'gene_id', 'transcript': 'transcript_id'} add_kwargs = dict( transcript_key=gtf_transcript_key, gene_key=gtf_gene_key, subfeature=gtf_subfeature, id_spec=id_spec, ) kwargs.update(**add_kwargs) kwargs['dialect'] = dialect c = cls(**kwargs) c.create() if dbfn == ':memory:': db = interface.FeatureDB(c.conn, keep_order=keep_order, pragmas=pragmas, sort_attribute_values=sort_attribute_values, text_factory=text_factory) else: db = interface.FeatureDB(c, keep_order=keep_order, pragmas=pragmas, sort_attribute_values=sort_attribute_values, text_factory=text_factory) return db
python
def create_db(data, dbfn, id_spec=None, force=False, verbose=False, checklines=10, merge_strategy='error', transform=None, gtf_transcript_key='transcript_id', gtf_gene_key='gene_id', gtf_subfeature='exon', force_gff=False, force_dialect_check=False, from_string=False, keep_order=False, text_factory=sqlite3.OptimizedUnicode, force_merge_fields=None, pragmas=constants.default_pragmas, sort_attribute_values=False, dialect=None, _keep_tempfiles=False, infer_gene_extent=True, disable_infer_genes=False, disable_infer_transcripts=False, **kwargs): """ Create a database from a GFF or GTF file. For more details on when and how to use the kwargs below, see the examples in the online documentation (:ref:`examples`). Parameters ---------- data : string or iterable If a string (and `from_string` is False), then `data` is the path to the original GFF or GTF file. If a string and `from_string` is True, then assume `data` is the actual data to use. Otherwise, it's an iterable of Feature objects. dbfn : string Path to the database that will be created. Can be the special string ":memory:" to create an in-memory database. id_spec : string, list, dict, callable, or None This parameter guides what will be used as the primary key for the database, which in turn determines how you will access individual features by name from the database. If `id_spec=None`, then auto-increment primary keys based on the feature type (e.g., "gene_1", "gene_2"). This is also the fallback behavior for the other values below. If `id_spec` is a string, then look for this key in the attributes. If it exists, then use its value as the primary key, otherwise autoincrement based on the feature type. For many GFF3 files, "ID" usually works well. If `id_spec` is a list or tuple of keys, then check for each one in order, using the first one found. For GFF3, this might be ["ID", "Name"], which would use the ID if it exists, otherwise the Name, otherwise autoincrement based on the feature type. If `id_spec` is a dictionary, then it is a mapping of feature types to what should be used as the ID. For example, for GTF files, `{'gene': 'gene_id', 'transcript': 'transcript_id'}` may be useful. The values of this dictionary can also be a list, e.g., `{'gene': ['gene_id', 'geneID']}` If `id_spec` is a callable object, then it accepts a dictionary from the iterator and returns one of the following: * None (in which case the feature type will be auto-incremented) * string (which will be used as the primary key) * special string starting with "autoincrement:X", where "X" is a string that will be used for auto-incrementing. For example, if "autoincrement:chr10", then the first feature will be "chr10_1", the second "chr10_2", and so on. force : bool If `False` (default), then raise an exception if `dbfn` already exists. Use `force=True` to overwrite any existing databases. verbose : bool Report percent complete and other feedback on how the db creation is progressing. In order to report percent complete, the entire file needs to be read once to see how many items there are; for large files you may want to use `verbose=False` to avoid this. checklines : int Number of lines to check the dialect. merge_strategy : str One of {merge, create_unique, error, warning, replace}. This parameter specifies the behavior when two items have an identical primary key. Using `merge_strategy="merge"`, then there will be a single entry in the database, but the attributes of all features with the same primary key will be merged. Using `merge_strategy="create_unique"`, then the first entry will use the original primary key, but the second entry will have a unique, autoincremented primary key assigned to it Using `merge_strategy="error"`, a :class:`gffutils.DuplicateID` exception will be raised. This means you will have to edit the file yourself to fix the duplicated IDs. Using `merge_strategy="warning"`, a warning will be printed to the logger, and the duplicate feature will be skipped. Using `merge_strategy="replace"` will replace the entire existing feature with the new feature. transform : callable Function (or other callable object) that accepts a `Feature` object and returns a (possibly modified) `Feature` object. gtf_transcript_key, gtf_gene_key : string Which attribute to use as the transcript ID and gene ID respectively for GTF files. Default is `transcript_id` and `gene_id` according to the GTF spec. gtf_subfeature : string Feature type to use as a "gene component" when inferring gene and transcript extents for GTF files. Default is `exon` according to the GTF spec. force_gff : bool If True, do not do automatic format detection -- only use GFF. force_dialect_check : bool If True, the dialect will be checkef for every feature (instead of just `checklines` features). This can be slow, but may be necessary for inconsistently-formatted input files. from_string : bool If True, then treat `data` as actual data (rather than the path to a file). keep_order : bool If True, all features returned from this instance will have the order of their attributes maintained. This can be turned on or off database-wide by setting the `keep_order` attribute or with this kwarg, or on a feature-by-feature basis by setting the `keep_order` attribute of an individual feature. Note that a single order of attributes will be used for all features. Specifically, the order will be determined by the order of attribute keys in the first `checklines` of the input data. See helpers._choose_dialect for more information on this. Default is False, since this includes a sorting step that can get time-consuming for many features. infer_gene_extent : bool DEPRECATED in version 0.8.4. See `disable_infer_transcripts` and `disable_infer_genes` for more granular control. disable_infer_transcripts, disable_infer_genes : bool Only used for GTF files. By default -- and according to the GTF spec -- we assume that there are no transcript or gene features in the file. gffutils then infers the extent of each transcript based on its constituent exons and infers the extent of each gene bases on its constituent transcripts. This default behavior is problematic if the input file already contains transcript or gene features (like recent GENCODE GTF files for human), since 1) the work to infer extents is unnecessary, and 2) trying to insert an inferred feature back into the database triggers gffutils' feature-merging routines, which can get time consuming. The solution is to use `disable_infer_transcripts=True` if your GTF already has transcripts in it, and/or `disable_infer_genes=True` if it already has genes in it. This can result in dramatic (100x) speedup. Prior to version 0.8.4, setting `infer_gene_extents=False` would disable both transcript and gene inference simultaneously. As of version 0.8.4, these argument allow more granular control. force_merge_fields : list If merge_strategy="merge", then features will only be merged if their non-attribute values are identical (same chrom, source, start, stop, score, strand, phase). Using `force_merge_fields`, you can override this behavior to allow merges even when fields are different. This list can contain one or more of ['seqid', 'source', 'featuretype', 'score', 'strand', 'frame']. The resulting merged fields will be strings of comma-separated values. Note that 'start' and 'end' are not available, since these fields need to be integers. text_factory : callable Text factory to use for the sqlite3 database. See https://docs.python.org/2/library/\ sqlite3.html#sqlite3.Connection.text_factory for details. The default sqlite3.OptimizedUnicode will return Unicode objects only for non-ASCII data, and bytestrings otherwise. pragmas : dict Dictionary of pragmas used when creating the sqlite3 database. See http://www.sqlite.org/pragma.html for a list of available pragmas. The defaults are stored in constants.default_pragmas, which can be used as a template for supplying a custom dictionary. sort_attribute_values : bool All features returned from the database will have their attribute values sorted. Typically this is only useful for testing, since this can get time-consuming for large numbers of features. _keep_tempfiles : bool or string False by default to clean up intermediate tempfiles created during GTF import. If True, then keep these tempfile for testing or debugging. If string, then keep the tempfile for testing, but also use the string as the suffix fo the tempfile. This can be useful for testing in parallel environments. Returns ------- New :class:`FeatureDB` object. """ _locals = locals() # Check if any older kwargs made it in deprecation_handler(kwargs) kwargs = dict((i, _locals[i]) for i in constants._iterator_kwargs) # First construct an iterator so that we can identify the file format. # DataIterator figures out what kind of data was provided (string of lines, # filename, or iterable of Features) and checks `checklines` lines to # identify the dialect. iterator = iterators.DataIterator(**kwargs) kwargs.update(**_locals) if dialect is None: dialect = iterator.dialect # However, a side-effect of this is that if `data` was a generator, then # we've just consumed `checklines` items (see # iterators.BaseIterator.__init__, which calls iterators.peek). # # But it also chains those consumed items back onto the beginning, and the # result is available as as iterator._iter. # # That's what we should be using now for `data: kwargs['data'] = iterator._iter kwargs['directives'] = iterator.directives # Since we've already checked lines, we don't want to do it again kwargs['checklines'] = 0 if force_gff or (dialect['fmt'] == 'gff3'): cls = _GFFDBCreator id_spec = id_spec or 'ID' add_kwargs = dict( id_spec=id_spec, ) elif dialect['fmt'] == 'gtf': cls = _GTFDBCreator id_spec = id_spec or {'gene': 'gene_id', 'transcript': 'transcript_id'} add_kwargs = dict( transcript_key=gtf_transcript_key, gene_key=gtf_gene_key, subfeature=gtf_subfeature, id_spec=id_spec, ) kwargs.update(**add_kwargs) kwargs['dialect'] = dialect c = cls(**kwargs) c.create() if dbfn == ':memory:': db = interface.FeatureDB(c.conn, keep_order=keep_order, pragmas=pragmas, sort_attribute_values=sort_attribute_values, text_factory=text_factory) else: db = interface.FeatureDB(c, keep_order=keep_order, pragmas=pragmas, sort_attribute_values=sort_attribute_values, text_factory=text_factory) return db
[ "def", "create_db", "(", "data", ",", "dbfn", ",", "id_spec", "=", "None", ",", "force", "=", "False", ",", "verbose", "=", "False", ",", "checklines", "=", "10", ",", "merge_strategy", "=", "'error'", ",", "transform", "=", "None", ",", "gtf_transcript_key", "=", "'transcript_id'", ",", "gtf_gene_key", "=", "'gene_id'", ",", "gtf_subfeature", "=", "'exon'", ",", "force_gff", "=", "False", ",", "force_dialect_check", "=", "False", ",", "from_string", "=", "False", ",", "keep_order", "=", "False", ",", "text_factory", "=", "sqlite3", ".", "OptimizedUnicode", ",", "force_merge_fields", "=", "None", ",", "pragmas", "=", "constants", ".", "default_pragmas", ",", "sort_attribute_values", "=", "False", ",", "dialect", "=", "None", ",", "_keep_tempfiles", "=", "False", ",", "infer_gene_extent", "=", "True", ",", "disable_infer_genes", "=", "False", ",", "disable_infer_transcripts", "=", "False", ",", "*", "*", "kwargs", ")", ":", "_locals", "=", "locals", "(", ")", "# Check if any older kwargs made it in", "deprecation_handler", "(", "kwargs", ")", "kwargs", "=", "dict", "(", "(", "i", ",", "_locals", "[", "i", "]", ")", "for", "i", "in", "constants", ".", "_iterator_kwargs", ")", "# First construct an iterator so that we can identify the file format.", "# DataIterator figures out what kind of data was provided (string of lines,", "# filename, or iterable of Features) and checks `checklines` lines to", "# identify the dialect.", "iterator", "=", "iterators", ".", "DataIterator", "(", "*", "*", "kwargs", ")", "kwargs", ".", "update", "(", "*", "*", "_locals", ")", "if", "dialect", "is", "None", ":", "dialect", "=", "iterator", ".", "dialect", "# However, a side-effect of this is that if `data` was a generator, then", "# we've just consumed `checklines` items (see", "# iterators.BaseIterator.__init__, which calls iterators.peek).", "#", "# But it also chains those consumed items back onto the beginning, and the", "# result is available as as iterator._iter.", "#", "# That's what we should be using now for `data:", "kwargs", "[", "'data'", "]", "=", "iterator", ".", "_iter", "kwargs", "[", "'directives'", "]", "=", "iterator", ".", "directives", "# Since we've already checked lines, we don't want to do it again", "kwargs", "[", "'checklines'", "]", "=", "0", "if", "force_gff", "or", "(", "dialect", "[", "'fmt'", "]", "==", "'gff3'", ")", ":", "cls", "=", "_GFFDBCreator", "id_spec", "=", "id_spec", "or", "'ID'", "add_kwargs", "=", "dict", "(", "id_spec", "=", "id_spec", ",", ")", "elif", "dialect", "[", "'fmt'", "]", "==", "'gtf'", ":", "cls", "=", "_GTFDBCreator", "id_spec", "=", "id_spec", "or", "{", "'gene'", ":", "'gene_id'", ",", "'transcript'", ":", "'transcript_id'", "}", "add_kwargs", "=", "dict", "(", "transcript_key", "=", "gtf_transcript_key", ",", "gene_key", "=", "gtf_gene_key", ",", "subfeature", "=", "gtf_subfeature", ",", "id_spec", "=", "id_spec", ",", ")", "kwargs", ".", "update", "(", "*", "*", "add_kwargs", ")", "kwargs", "[", "'dialect'", "]", "=", "dialect", "c", "=", "cls", "(", "*", "*", "kwargs", ")", "c", ".", "create", "(", ")", "if", "dbfn", "==", "':memory:'", ":", "db", "=", "interface", ".", "FeatureDB", "(", "c", ".", "conn", ",", "keep_order", "=", "keep_order", ",", "pragmas", "=", "pragmas", ",", "sort_attribute_values", "=", "sort_attribute_values", ",", "text_factory", "=", "text_factory", ")", "else", ":", "db", "=", "interface", ".", "FeatureDB", "(", "c", ",", "keep_order", "=", "keep_order", ",", "pragmas", "=", "pragmas", ",", "sort_attribute_values", "=", "sort_attribute_values", ",", "text_factory", "=", "text_factory", ")", "return", "db" ]
Create a database from a GFF or GTF file. For more details on when and how to use the kwargs below, see the examples in the online documentation (:ref:`examples`). Parameters ---------- data : string or iterable If a string (and `from_string` is False), then `data` is the path to the original GFF or GTF file. If a string and `from_string` is True, then assume `data` is the actual data to use. Otherwise, it's an iterable of Feature objects. dbfn : string Path to the database that will be created. Can be the special string ":memory:" to create an in-memory database. id_spec : string, list, dict, callable, or None This parameter guides what will be used as the primary key for the database, which in turn determines how you will access individual features by name from the database. If `id_spec=None`, then auto-increment primary keys based on the feature type (e.g., "gene_1", "gene_2"). This is also the fallback behavior for the other values below. If `id_spec` is a string, then look for this key in the attributes. If it exists, then use its value as the primary key, otherwise autoincrement based on the feature type. For many GFF3 files, "ID" usually works well. If `id_spec` is a list or tuple of keys, then check for each one in order, using the first one found. For GFF3, this might be ["ID", "Name"], which would use the ID if it exists, otherwise the Name, otherwise autoincrement based on the feature type. If `id_spec` is a dictionary, then it is a mapping of feature types to what should be used as the ID. For example, for GTF files, `{'gene': 'gene_id', 'transcript': 'transcript_id'}` may be useful. The values of this dictionary can also be a list, e.g., `{'gene': ['gene_id', 'geneID']}` If `id_spec` is a callable object, then it accepts a dictionary from the iterator and returns one of the following: * None (in which case the feature type will be auto-incremented) * string (which will be used as the primary key) * special string starting with "autoincrement:X", where "X" is a string that will be used for auto-incrementing. For example, if "autoincrement:chr10", then the first feature will be "chr10_1", the second "chr10_2", and so on. force : bool If `False` (default), then raise an exception if `dbfn` already exists. Use `force=True` to overwrite any existing databases. verbose : bool Report percent complete and other feedback on how the db creation is progressing. In order to report percent complete, the entire file needs to be read once to see how many items there are; for large files you may want to use `verbose=False` to avoid this. checklines : int Number of lines to check the dialect. merge_strategy : str One of {merge, create_unique, error, warning, replace}. This parameter specifies the behavior when two items have an identical primary key. Using `merge_strategy="merge"`, then there will be a single entry in the database, but the attributes of all features with the same primary key will be merged. Using `merge_strategy="create_unique"`, then the first entry will use the original primary key, but the second entry will have a unique, autoincremented primary key assigned to it Using `merge_strategy="error"`, a :class:`gffutils.DuplicateID` exception will be raised. This means you will have to edit the file yourself to fix the duplicated IDs. Using `merge_strategy="warning"`, a warning will be printed to the logger, and the duplicate feature will be skipped. Using `merge_strategy="replace"` will replace the entire existing feature with the new feature. transform : callable Function (or other callable object) that accepts a `Feature` object and returns a (possibly modified) `Feature` object. gtf_transcript_key, gtf_gene_key : string Which attribute to use as the transcript ID and gene ID respectively for GTF files. Default is `transcript_id` and `gene_id` according to the GTF spec. gtf_subfeature : string Feature type to use as a "gene component" when inferring gene and transcript extents for GTF files. Default is `exon` according to the GTF spec. force_gff : bool If True, do not do automatic format detection -- only use GFF. force_dialect_check : bool If True, the dialect will be checkef for every feature (instead of just `checklines` features). This can be slow, but may be necessary for inconsistently-formatted input files. from_string : bool If True, then treat `data` as actual data (rather than the path to a file). keep_order : bool If True, all features returned from this instance will have the order of their attributes maintained. This can be turned on or off database-wide by setting the `keep_order` attribute or with this kwarg, or on a feature-by-feature basis by setting the `keep_order` attribute of an individual feature. Note that a single order of attributes will be used for all features. Specifically, the order will be determined by the order of attribute keys in the first `checklines` of the input data. See helpers._choose_dialect for more information on this. Default is False, since this includes a sorting step that can get time-consuming for many features. infer_gene_extent : bool DEPRECATED in version 0.8.4. See `disable_infer_transcripts` and `disable_infer_genes` for more granular control. disable_infer_transcripts, disable_infer_genes : bool Only used for GTF files. By default -- and according to the GTF spec -- we assume that there are no transcript or gene features in the file. gffutils then infers the extent of each transcript based on its constituent exons and infers the extent of each gene bases on its constituent transcripts. This default behavior is problematic if the input file already contains transcript or gene features (like recent GENCODE GTF files for human), since 1) the work to infer extents is unnecessary, and 2) trying to insert an inferred feature back into the database triggers gffutils' feature-merging routines, which can get time consuming. The solution is to use `disable_infer_transcripts=True` if your GTF already has transcripts in it, and/or `disable_infer_genes=True` if it already has genes in it. This can result in dramatic (100x) speedup. Prior to version 0.8.4, setting `infer_gene_extents=False` would disable both transcript and gene inference simultaneously. As of version 0.8.4, these argument allow more granular control. force_merge_fields : list If merge_strategy="merge", then features will only be merged if their non-attribute values are identical (same chrom, source, start, stop, score, strand, phase). Using `force_merge_fields`, you can override this behavior to allow merges even when fields are different. This list can contain one or more of ['seqid', 'source', 'featuretype', 'score', 'strand', 'frame']. The resulting merged fields will be strings of comma-separated values. Note that 'start' and 'end' are not available, since these fields need to be integers. text_factory : callable Text factory to use for the sqlite3 database. See https://docs.python.org/2/library/\ sqlite3.html#sqlite3.Connection.text_factory for details. The default sqlite3.OptimizedUnicode will return Unicode objects only for non-ASCII data, and bytestrings otherwise. pragmas : dict Dictionary of pragmas used when creating the sqlite3 database. See http://www.sqlite.org/pragma.html for a list of available pragmas. The defaults are stored in constants.default_pragmas, which can be used as a template for supplying a custom dictionary. sort_attribute_values : bool All features returned from the database will have their attribute values sorted. Typically this is only useful for testing, since this can get time-consuming for large numbers of features. _keep_tempfiles : bool or string False by default to clean up intermediate tempfiles created during GTF import. If True, then keep these tempfile for testing or debugging. If string, then keep the tempfile for testing, but also use the string as the suffix fo the tempfile. This can be useful for testing in parallel environments. Returns ------- New :class:`FeatureDB` object.
[ "Create", "a", "database", "from", "a", "GFF", "or", "GTF", "file", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/create.py#L1025-L1312
train
16,961
daler/gffutils
gffutils/create.py
_DBCreator._id_handler
def _id_handler(self, f): """ Given a Feature from self.iterator, figure out what the ID should be. This uses `self.id_spec` identify the ID. """ # If id_spec is a string, convert to iterable for later if isinstance(self.id_spec, six.string_types): id_key = [self.id_spec] elif hasattr(self.id_spec, '__call__'): id_key = [self.id_spec] # If dict, then assume it's a feature -> attribute mapping, e.g., # {'gene': 'gene_id'} for GTF elif isinstance(self.id_spec, dict): try: id_key = self.id_spec[f.featuretype] if isinstance(id_key, six.string_types): id_key = [id_key] # Otherwise, use default auto-increment. except KeyError: return self._increment_featuretype_autoid(f.featuretype) # Otherwise assume it's an iterable. else: id_key = self.id_spec # Then try them in order, returning the first one that works: for k in id_key: if hasattr(k, '__call__'): _id = k(f) if _id: if _id.startswith('autoincrement:'): return self._increment_featuretype_autoid(_id[14:]) return _id else: # use GFF fields rather than attributes for cases like :seqid: # or :strand: if (len(k) > 3) and (k[0] == ':') and (k[-1] == ':'): # No [0] here -- only attributes key/vals are forced into # lists, not standard GFF fields. return getattr(f, k[1:-1]) else: try: return f.attributes[k][0] except (KeyError, IndexError): pass # If we get here, then default autoincrement return self._increment_featuretype_autoid(f.featuretype)
python
def _id_handler(self, f): """ Given a Feature from self.iterator, figure out what the ID should be. This uses `self.id_spec` identify the ID. """ # If id_spec is a string, convert to iterable for later if isinstance(self.id_spec, six.string_types): id_key = [self.id_spec] elif hasattr(self.id_spec, '__call__'): id_key = [self.id_spec] # If dict, then assume it's a feature -> attribute mapping, e.g., # {'gene': 'gene_id'} for GTF elif isinstance(self.id_spec, dict): try: id_key = self.id_spec[f.featuretype] if isinstance(id_key, six.string_types): id_key = [id_key] # Otherwise, use default auto-increment. except KeyError: return self._increment_featuretype_autoid(f.featuretype) # Otherwise assume it's an iterable. else: id_key = self.id_spec # Then try them in order, returning the first one that works: for k in id_key: if hasattr(k, '__call__'): _id = k(f) if _id: if _id.startswith('autoincrement:'): return self._increment_featuretype_autoid(_id[14:]) return _id else: # use GFF fields rather than attributes for cases like :seqid: # or :strand: if (len(k) > 3) and (k[0] == ':') and (k[-1] == ':'): # No [0] here -- only attributes key/vals are forced into # lists, not standard GFF fields. return getattr(f, k[1:-1]) else: try: return f.attributes[k][0] except (KeyError, IndexError): pass # If we get here, then default autoincrement return self._increment_featuretype_autoid(f.featuretype)
[ "def", "_id_handler", "(", "self", ",", "f", ")", ":", "# If id_spec is a string, convert to iterable for later", "if", "isinstance", "(", "self", ".", "id_spec", ",", "six", ".", "string_types", ")", ":", "id_key", "=", "[", "self", ".", "id_spec", "]", "elif", "hasattr", "(", "self", ".", "id_spec", ",", "'__call__'", ")", ":", "id_key", "=", "[", "self", ".", "id_spec", "]", "# If dict, then assume it's a feature -> attribute mapping, e.g.,", "# {'gene': 'gene_id'} for GTF", "elif", "isinstance", "(", "self", ".", "id_spec", ",", "dict", ")", ":", "try", ":", "id_key", "=", "self", ".", "id_spec", "[", "f", ".", "featuretype", "]", "if", "isinstance", "(", "id_key", ",", "six", ".", "string_types", ")", ":", "id_key", "=", "[", "id_key", "]", "# Otherwise, use default auto-increment.", "except", "KeyError", ":", "return", "self", ".", "_increment_featuretype_autoid", "(", "f", ".", "featuretype", ")", "# Otherwise assume it's an iterable.", "else", ":", "id_key", "=", "self", ".", "id_spec", "# Then try them in order, returning the first one that works:", "for", "k", "in", "id_key", ":", "if", "hasattr", "(", "k", ",", "'__call__'", ")", ":", "_id", "=", "k", "(", "f", ")", "if", "_id", ":", "if", "_id", ".", "startswith", "(", "'autoincrement:'", ")", ":", "return", "self", ".", "_increment_featuretype_autoid", "(", "_id", "[", "14", ":", "]", ")", "return", "_id", "else", ":", "# use GFF fields rather than attributes for cases like :seqid:", "# or :strand:", "if", "(", "len", "(", "k", ")", ">", "3", ")", "and", "(", "k", "[", "0", "]", "==", "':'", ")", "and", "(", "k", "[", "-", "1", "]", "==", "':'", ")", ":", "# No [0] here -- only attributes key/vals are forced into", "# lists, not standard GFF fields.", "return", "getattr", "(", "f", ",", "k", "[", "1", ":", "-", "1", "]", ")", "else", ":", "try", ":", "return", "f", ".", "attributes", "[", "k", "]", "[", "0", "]", "except", "(", "KeyError", ",", "IndexError", ")", ":", "pass", "# If we get here, then default autoincrement", "return", "self", ".", "_increment_featuretype_autoid", "(", "f", ".", "featuretype", ")" ]
Given a Feature from self.iterator, figure out what the ID should be. This uses `self.id_spec` identify the ID.
[ "Given", "a", "Feature", "from", "self", ".", "iterator", "figure", "out", "what", "the", "ID", "should", "be", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/create.py#L153-L205
train
16,962
daler/gffutils
gffutils/create.py
_DBCreator.create
def create(self): """ Calls various methods sequentially in order to fully build the database. """ # Calls each of these methods in order. _populate_from_lines and # _update_relations must be implemented in subclasses. self._init_tables() self._populate_from_lines(self.iterator) self._update_relations() self._finalize()
python
def create(self): """ Calls various methods sequentially in order to fully build the database. """ # Calls each of these methods in order. _populate_from_lines and # _update_relations must be implemented in subclasses. self._init_tables() self._populate_from_lines(self.iterator) self._update_relations() self._finalize()
[ "def", "create", "(", "self", ")", ":", "# Calls each of these methods in order. _populate_from_lines and", "# _update_relations must be implemented in subclasses.", "self", ".", "_init_tables", "(", ")", "self", ".", "_populate_from_lines", "(", "self", ".", "iterator", ")", "self", ".", "_update_relations", "(", ")", "self", ".", "_finalize", "(", ")" ]
Calls various methods sequentially in order to fully build the database.
[ "Calls", "various", "methods", "sequentially", "in", "order", "to", "fully", "build", "the", "database", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/create.py#L507-L517
train
16,963
daler/gffutils
gffutils/create.py
_DBCreator.execute
def execute(self, query): """ Execute a query directly on the database. """ c = self.conn.cursor() result = c.execute(query) for i in result: yield i
python
def execute(self, query): """ Execute a query directly on the database. """ c = self.conn.cursor() result = c.execute(query) for i in result: yield i
[ "def", "execute", "(", "self", ",", "query", ")", ":", "c", "=", "self", ".", "conn", ".", "cursor", "(", ")", "result", "=", "c", ".", "execute", "(", "query", ")", "for", "i", "in", "result", ":", "yield", "i" ]
Execute a query directly on the database.
[ "Execute", "a", "query", "directly", "on", "the", "database", "." ]
6f7f547cad898738a1bd0a999fd68ba68db2c524
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/create.py#L523-L530
train
16,964
edx/bok-choy
bok_choy/javascript.py
wait_for_js
def wait_for_js(function): """ Method decorator that waits for JavaScript dependencies before executing `function`. If the function is not a method, the decorator has no effect. Args: function (callable): Method to decorate. Returns: Decorated method """ @functools.wraps(function) def wrapper(*args, **kwargs): # pylint: disable=missing-docstring # If not a method, then just call the function if len(args) < 1: return function(*args, **kwargs) # Otherwise, retrieve `self` as the first arg else: self = args[0] # If the class has been decorated by one of the # JavaScript dependency decorators, it should have # a `wait_for_js` method if hasattr(self, 'wait_for_js'): self.wait_for_js() # Call the function return function(*args, **kwargs) return wrapper
python
def wait_for_js(function): """ Method decorator that waits for JavaScript dependencies before executing `function`. If the function is not a method, the decorator has no effect. Args: function (callable): Method to decorate. Returns: Decorated method """ @functools.wraps(function) def wrapper(*args, **kwargs): # pylint: disable=missing-docstring # If not a method, then just call the function if len(args) < 1: return function(*args, **kwargs) # Otherwise, retrieve `self` as the first arg else: self = args[0] # If the class has been decorated by one of the # JavaScript dependency decorators, it should have # a `wait_for_js` method if hasattr(self, 'wait_for_js'): self.wait_for_js() # Call the function return function(*args, **kwargs) return wrapper
[ "def", "wait_for_js", "(", "function", ")", ":", "@", "functools", ".", "wraps", "(", "function", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=missing-docstring", "# If not a method, then just call the function", "if", "len", "(", "args", ")", "<", "1", ":", "return", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# Otherwise, retrieve `self` as the first arg", "else", ":", "self", "=", "args", "[", "0", "]", "# If the class has been decorated by one of the", "# JavaScript dependency decorators, it should have", "# a `wait_for_js` method", "if", "hasattr", "(", "self", ",", "'wait_for_js'", ")", ":", "self", ".", "wait_for_js", "(", ")", "# Call the function", "return", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Method decorator that waits for JavaScript dependencies before executing `function`. If the function is not a method, the decorator has no effect. Args: function (callable): Method to decorate. Returns: Decorated method
[ "Method", "decorator", "that", "waits", "for", "JavaScript", "dependencies", "before", "executing", "function", ".", "If", "the", "function", "is", "not", "a", "method", "the", "decorator", "has", "no", "effect", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/javascript.py#L45-L77
train
16,965
edx/bok-choy
bok_choy/javascript.py
_wait_for_js
def _wait_for_js(self): """ Class method added by the decorators to allow decorated classes to manually re-check JavaScript dependencies. Expect that `self` is a class that: 1) Has been decorated with either `js_defined` or `requirejs` 2) Has a `browser` property If either (1) or (2) is not satisfied, then do nothing. """ # No Selenium browser available, so return without doing anything if not hasattr(self, 'browser'): return # pylint: disable=protected-access # Wait for JavaScript variables to be defined if hasattr(self, '_js_vars') and self._js_vars: EmptyPromise( lambda: _are_js_vars_defined(self.browser, self._js_vars), u"JavaScript variables defined: {0}".format(", ".join(self._js_vars)) ).fulfill() # Wait for RequireJS dependencies to load if hasattr(self, '_requirejs_deps') and self._requirejs_deps: EmptyPromise( lambda: _are_requirejs_deps_loaded(self.browser, self._requirejs_deps), u"RequireJS dependencies loaded: {0}".format(", ".join(self._requirejs_deps)), try_limit=5 ).fulfill()
python
def _wait_for_js(self): """ Class method added by the decorators to allow decorated classes to manually re-check JavaScript dependencies. Expect that `self` is a class that: 1) Has been decorated with either `js_defined` or `requirejs` 2) Has a `browser` property If either (1) or (2) is not satisfied, then do nothing. """ # No Selenium browser available, so return without doing anything if not hasattr(self, 'browser'): return # pylint: disable=protected-access # Wait for JavaScript variables to be defined if hasattr(self, '_js_vars') and self._js_vars: EmptyPromise( lambda: _are_js_vars_defined(self.browser, self._js_vars), u"JavaScript variables defined: {0}".format(", ".join(self._js_vars)) ).fulfill() # Wait for RequireJS dependencies to load if hasattr(self, '_requirejs_deps') and self._requirejs_deps: EmptyPromise( lambda: _are_requirejs_deps_loaded(self.browser, self._requirejs_deps), u"RequireJS dependencies loaded: {0}".format(", ".join(self._requirejs_deps)), try_limit=5 ).fulfill()
[ "def", "_wait_for_js", "(", "self", ")", ":", "# No Selenium browser available, so return without doing anything", "if", "not", "hasattr", "(", "self", ",", "'browser'", ")", ":", "return", "# pylint: disable=protected-access", "# Wait for JavaScript variables to be defined", "if", "hasattr", "(", "self", ",", "'_js_vars'", ")", "and", "self", ".", "_js_vars", ":", "EmptyPromise", "(", "lambda", ":", "_are_js_vars_defined", "(", "self", ".", "browser", ",", "self", ".", "_js_vars", ")", ",", "u\"JavaScript variables defined: {0}\"", ".", "format", "(", "\", \"", ".", "join", "(", "self", ".", "_js_vars", ")", ")", ")", ".", "fulfill", "(", ")", "# Wait for RequireJS dependencies to load", "if", "hasattr", "(", "self", ",", "'_requirejs_deps'", ")", "and", "self", ".", "_requirejs_deps", ":", "EmptyPromise", "(", "lambda", ":", "_are_requirejs_deps_loaded", "(", "self", ".", "browser", ",", "self", ".", "_requirejs_deps", ")", ",", "u\"RequireJS dependencies loaded: {0}\"", ".", "format", "(", "\", \"", ".", "join", "(", "self", ".", "_requirejs_deps", ")", ")", ",", "try_limit", "=", "5", ")", ".", "fulfill", "(", ")" ]
Class method added by the decorators to allow decorated classes to manually re-check JavaScript dependencies. Expect that `self` is a class that: 1) Has been decorated with either `js_defined` or `requirejs` 2) Has a `browser` property If either (1) or (2) is not satisfied, then do nothing.
[ "Class", "method", "added", "by", "the", "decorators", "to", "allow", "decorated", "classes", "to", "manually", "re", "-", "check", "JavaScript", "dependencies", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/javascript.py#L104-L135
train
16,966
edx/bok-choy
bok_choy/javascript.py
_are_js_vars_defined
def _are_js_vars_defined(browser, js_vars): """ Return a boolean indicating whether all the JavaScript variables `js_vars` are defined on the current page. `browser` is a Selenium webdriver instance. """ # This script will evaluate to True iff all of # the required vars are defined. script = u" && ".join([ u"!(typeof {0} === 'undefined')".format(var) for var in js_vars ]) try: return browser.execute_script(u"return {}".format(script)) except WebDriverException as exc: if "is not defined" in exc.msg or "is undefined" in exc.msg: return False else: raise
python
def _are_js_vars_defined(browser, js_vars): """ Return a boolean indicating whether all the JavaScript variables `js_vars` are defined on the current page. `browser` is a Selenium webdriver instance. """ # This script will evaluate to True iff all of # the required vars are defined. script = u" && ".join([ u"!(typeof {0} === 'undefined')".format(var) for var in js_vars ]) try: return browser.execute_script(u"return {}".format(script)) except WebDriverException as exc: if "is not defined" in exc.msg or "is undefined" in exc.msg: return False else: raise
[ "def", "_are_js_vars_defined", "(", "browser", ",", "js_vars", ")", ":", "# This script will evaluate to True iff all of", "# the required vars are defined.", "script", "=", "u\" && \"", ".", "join", "(", "[", "u\"!(typeof {0} === 'undefined')\"", ".", "format", "(", "var", ")", "for", "var", "in", "js_vars", "]", ")", "try", ":", "return", "browser", ".", "execute_script", "(", "u\"return {}\"", ".", "format", "(", "script", ")", ")", "except", "WebDriverException", "as", "exc", ":", "if", "\"is not defined\"", "in", "exc", ".", "msg", "or", "\"is undefined\"", "in", "exc", ".", "msg", ":", "return", "False", "else", ":", "raise" ]
Return a boolean indicating whether all the JavaScript variables `js_vars` are defined on the current page. `browser` is a Selenium webdriver instance.
[ "Return", "a", "boolean", "indicating", "whether", "all", "the", "JavaScript", "variables", "js_vars", "are", "defined", "on", "the", "current", "page", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/javascript.py#L138-L158
train
16,967
edx/bok-choy
bok_choy/javascript.py
_are_requirejs_deps_loaded
def _are_requirejs_deps_loaded(browser, deps): """ Return a boolean indicating whether all the RequireJS dependencies `deps` have loaded on the current page. `browser` is a WebDriver instance. """ # This is a little complicated # # We're going to use `execute_async_script` to give control to # the browser. The browser indicates that it wants to return # control to us by calling `callback`, which is the last item # in the global `arguments` array. # # We install a RequireJS module with the dependencies we want # to ensure are loaded. When our module loads, we return # control to the test suite. script = dedent(u""" // Retrieve the callback function used to return control to the test suite var callback = arguments[arguments.length - 1]; // If RequireJS isn't defined, then return immediately if (!window.require) {{ callback("RequireJS not defined"); }} // Otherwise, install a RequireJS module that depends on the modules // we're waiting for. else {{ // Catch errors reported by RequireJS requirejs.onError = callback; // Install our module require({deps}, function() {{ callback('Success'); }}); }} """).format(deps=json.dumps(list(deps))) # Set a timeout to ensure we get control back browser.set_script_timeout(30) # Give control to the browser # `result` will be the argument passed to the callback function try: result = browser.execute_async_script(script) return result == 'Success' except TimeoutException: return False
python
def _are_requirejs_deps_loaded(browser, deps): """ Return a boolean indicating whether all the RequireJS dependencies `deps` have loaded on the current page. `browser` is a WebDriver instance. """ # This is a little complicated # # We're going to use `execute_async_script` to give control to # the browser. The browser indicates that it wants to return # control to us by calling `callback`, which is the last item # in the global `arguments` array. # # We install a RequireJS module with the dependencies we want # to ensure are loaded. When our module loads, we return # control to the test suite. script = dedent(u""" // Retrieve the callback function used to return control to the test suite var callback = arguments[arguments.length - 1]; // If RequireJS isn't defined, then return immediately if (!window.require) {{ callback("RequireJS not defined"); }} // Otherwise, install a RequireJS module that depends on the modules // we're waiting for. else {{ // Catch errors reported by RequireJS requirejs.onError = callback; // Install our module require({deps}, function() {{ callback('Success'); }}); }} """).format(deps=json.dumps(list(deps))) # Set a timeout to ensure we get control back browser.set_script_timeout(30) # Give control to the browser # `result` will be the argument passed to the callback function try: result = browser.execute_async_script(script) return result == 'Success' except TimeoutException: return False
[ "def", "_are_requirejs_deps_loaded", "(", "browser", ",", "deps", ")", ":", "# This is a little complicated", "#", "# We're going to use `execute_async_script` to give control to", "# the browser. The browser indicates that it wants to return", "# control to us by calling `callback`, which is the last item", "# in the global `arguments` array.", "#", "# We install a RequireJS module with the dependencies we want", "# to ensure are loaded. When our module loads, we return", "# control to the test suite.", "script", "=", "dedent", "(", "u\"\"\"\n // Retrieve the callback function used to return control to the test suite\n var callback = arguments[arguments.length - 1];\n\n // If RequireJS isn't defined, then return immediately\n if (!window.require) {{\n callback(\"RequireJS not defined\");\n }}\n\n // Otherwise, install a RequireJS module that depends on the modules\n // we're waiting for.\n else {{\n\n // Catch errors reported by RequireJS\n requirejs.onError = callback;\n\n // Install our module\n require({deps}, function() {{\n callback('Success');\n }});\n }}\n \"\"\"", ")", ".", "format", "(", "deps", "=", "json", ".", "dumps", "(", "list", "(", "deps", ")", ")", ")", "# Set a timeout to ensure we get control back", "browser", ".", "set_script_timeout", "(", "30", ")", "# Give control to the browser", "# `result` will be the argument passed to the callback function", "try", ":", "result", "=", "browser", ".", "execute_async_script", "(", "script", ")", "return", "result", "==", "'Success'", "except", "TimeoutException", ":", "return", "False" ]
Return a boolean indicating whether all the RequireJS dependencies `deps` have loaded on the current page. `browser` is a WebDriver instance.
[ "Return", "a", "boolean", "indicating", "whether", "all", "the", "RequireJS", "dependencies", "deps", "have", "loaded", "on", "the", "current", "page", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/javascript.py#L161-L212
train
16,968
edx/bok-choy
bok_choy/page_object.py
no_selenium_errors
def no_selenium_errors(func): """ Decorator to create an `EmptyPromise` check function that is satisfied only when `func` executes without a Selenium error. This protects against many common test failures due to timing issues. For example, accessing an element after it has been modified by JavaScript ordinarily results in a `StaleElementException`. Methods decorated with `no_selenium_errors` will simply retry if that happens, which makes tests more robust. Args: func (callable): The function to execute, with retries if an error occurs. Returns: Decorated function """ def _inner(*args, **kwargs): # pylint: disable=missing-docstring try: return_val = func(*args, **kwargs) except WebDriverException: LOGGER.warning(u'Exception ignored during retry loop:', exc_info=True) return False else: return return_val return _inner
python
def no_selenium_errors(func): """ Decorator to create an `EmptyPromise` check function that is satisfied only when `func` executes without a Selenium error. This protects against many common test failures due to timing issues. For example, accessing an element after it has been modified by JavaScript ordinarily results in a `StaleElementException`. Methods decorated with `no_selenium_errors` will simply retry if that happens, which makes tests more robust. Args: func (callable): The function to execute, with retries if an error occurs. Returns: Decorated function """ def _inner(*args, **kwargs): # pylint: disable=missing-docstring try: return_val = func(*args, **kwargs) except WebDriverException: LOGGER.warning(u'Exception ignored during retry loop:', exc_info=True) return False else: return return_val return _inner
[ "def", "no_selenium_errors", "(", "func", ")", ":", "def", "_inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=missing-docstring", "try", ":", "return_val", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "WebDriverException", ":", "LOGGER", ".", "warning", "(", "u'Exception ignored during retry loop:'", ",", "exc_info", "=", "True", ")", "return", "False", "else", ":", "return", "return_val", "return", "_inner" ]
Decorator to create an `EmptyPromise` check function that is satisfied only when `func` executes without a Selenium error. This protects against many common test failures due to timing issues. For example, accessing an element after it has been modified by JavaScript ordinarily results in a `StaleElementException`. Methods decorated with `no_selenium_errors` will simply retry if that happens, which makes tests more robust. Args: func (callable): The function to execute, with retries if an error occurs. Returns: Decorated function
[ "Decorator", "to", "create", "an", "EmptyPromise", "check", "function", "that", "is", "satisfied", "only", "when", "func", "executes", "without", "a", "Selenium", "error", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/page_object.py#L64-L90
train
16,969
edx/bok-choy
bok_choy/a11y/axs_ruleset.py
AxsAuditConfig.set_rules
def set_rules(self, rules): """ Sets the rules to be run or ignored for the audit. Args: rules: a dictionary of the format `{"ignore": [], "apply": []}`. See https://github.com/GoogleChrome/accessibility-developer-tools/tree/master/src/audits Passing `{"apply": []}` or `{}` means to check for all available rules. Passing `{"apply": None}` means that no audit should be done for this page. Passing `{"ignore": []}` means to run all otherwise enabled rules. Any rules in the "ignore" list will be ignored even if they were also specified in the "apply". Examples: To check only `badAriaAttributeValue`:: page.a11y_audit.config.set_rules({ "apply": ['badAriaAttributeValue'] }) To check all rules except `badAriaAttributeValue`:: page.a11y_audit.config.set_rules({ "ignore": ['badAriaAttributeValue'], }) """ self.rules_to_ignore = rules.get("ignore", []) self.rules_to_run = rules.get("apply", [])
python
def set_rules(self, rules): """ Sets the rules to be run or ignored for the audit. Args: rules: a dictionary of the format `{"ignore": [], "apply": []}`. See https://github.com/GoogleChrome/accessibility-developer-tools/tree/master/src/audits Passing `{"apply": []}` or `{}` means to check for all available rules. Passing `{"apply": None}` means that no audit should be done for this page. Passing `{"ignore": []}` means to run all otherwise enabled rules. Any rules in the "ignore" list will be ignored even if they were also specified in the "apply". Examples: To check only `badAriaAttributeValue`:: page.a11y_audit.config.set_rules({ "apply": ['badAriaAttributeValue'] }) To check all rules except `badAriaAttributeValue`:: page.a11y_audit.config.set_rules({ "ignore": ['badAriaAttributeValue'], }) """ self.rules_to_ignore = rules.get("ignore", []) self.rules_to_run = rules.get("apply", [])
[ "def", "set_rules", "(", "self", ",", "rules", ")", ":", "self", ".", "rules_to_ignore", "=", "rules", ".", "get", "(", "\"ignore\"", ",", "[", "]", ")", "self", ".", "rules_to_run", "=", "rules", ".", "get", "(", "\"apply\"", ",", "[", "]", ")" ]
Sets the rules to be run or ignored for the audit. Args: rules: a dictionary of the format `{"ignore": [], "apply": []}`. See https://github.com/GoogleChrome/accessibility-developer-tools/tree/master/src/audits Passing `{"apply": []}` or `{}` means to check for all available rules. Passing `{"apply": None}` means that no audit should be done for this page. Passing `{"ignore": []}` means to run all otherwise enabled rules. Any rules in the "ignore" list will be ignored even if they were also specified in the "apply". Examples: To check only `badAriaAttributeValue`:: page.a11y_audit.config.set_rules({ "apply": ['badAriaAttributeValue'] }) To check all rules except `badAriaAttributeValue`:: page.a11y_audit.config.set_rules({ "ignore": ['badAriaAttributeValue'], })
[ "Sets", "the", "rules", "to", "be", "run", "or", "ignored", "for", "the", "audit", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/a11y/axs_ruleset.py#L36-L69
train
16,970
edx/bok-choy
bok_choy/a11y/axs_ruleset.py
AxsAuditConfig.set_scope
def set_scope(self, include=None, exclude=None): """ Sets `scope`, the "start point" for the audit. Args: include: A list of css selectors specifying the elements that contain the portion of the page that should be audited. Defaults to auditing the entire document. exclude: This arg is not implemented in this ruleset. Examples: To check only the `div` with id `foo`:: page.a11y_audit.config.set_scope(["div#foo"]) To reset the scope to check the whole document:: page.a11y_audit.config.set_scope() """ if include: self.scope = u"document.querySelector(\"{}\")".format( u', '.join(include) ) else: self.scope = "null" if exclude is not None: raise NotImplementedError( "The argument `exclude` has not been implemented in " "AxsAuditConfig.set_scope method." )
python
def set_scope(self, include=None, exclude=None): """ Sets `scope`, the "start point" for the audit. Args: include: A list of css selectors specifying the elements that contain the portion of the page that should be audited. Defaults to auditing the entire document. exclude: This arg is not implemented in this ruleset. Examples: To check only the `div` with id `foo`:: page.a11y_audit.config.set_scope(["div#foo"]) To reset the scope to check the whole document:: page.a11y_audit.config.set_scope() """ if include: self.scope = u"document.querySelector(\"{}\")".format( u', '.join(include) ) else: self.scope = "null" if exclude is not None: raise NotImplementedError( "The argument `exclude` has not been implemented in " "AxsAuditConfig.set_scope method." )
[ "def", "set_scope", "(", "self", ",", "include", "=", "None", ",", "exclude", "=", "None", ")", ":", "if", "include", ":", "self", ".", "scope", "=", "u\"document.querySelector(\\\"{}\\\")\"", ".", "format", "(", "u', '", ".", "join", "(", "include", ")", ")", "else", ":", "self", ".", "scope", "=", "\"null\"", "if", "exclude", "is", "not", "None", ":", "raise", "NotImplementedError", "(", "\"The argument `exclude` has not been implemented in \"", "\"AxsAuditConfig.set_scope method.\"", ")" ]
Sets `scope`, the "start point" for the audit. Args: include: A list of css selectors specifying the elements that contain the portion of the page that should be audited. Defaults to auditing the entire document. exclude: This arg is not implemented in this ruleset. Examples: To check only the `div` with id `foo`:: page.a11y_audit.config.set_scope(["div#foo"]) To reset the scope to check the whole document:: page.a11y_audit.config.set_scope()
[ "Sets", "scope", "the", "start", "point", "for", "the", "audit", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/a11y/axs_ruleset.py#L71-L103
train
16,971
edx/bok-choy
bok_choy/a11y/axs_ruleset.py
AxsAudit._check_rules
def _check_rules(browser, rules_js, config): """ Check the page for violations of the configured rules. By default, all rules in the ruleset will be checked. Args: browser: a browser instance. rules_js: the ruleset JavaScript as a string. config: an AxsAuditConfig instance. Returns: A namedtuple with 'errors' and 'warnings' fields whose values are the errors and warnings returned from the audit. None if config has rules_to_run set to None. __Caution__: You probably don't really want to call this method directly! It will be used by `A11yAudit.do_audit` if using this ruleset. """ if config.rules_to_run is None: msg = 'No accessibility rules were specified to check.' log.warning(msg) return None # This line will only be included in the script if rules to check on # this page are specified, as the default behavior of the js is to # run all rules. rules = config.rules_to_run if rules: rules_config = u"auditConfig.auditRulesToRun = {rules};".format( rules=rules) else: rules_config = "" ignored_rules = config.rules_to_ignore if ignored_rules: rules_config += ( u"\nauditConfig.auditRulesToIgnore = {rules};".format( rules=ignored_rules ) ) script = dedent(u""" {rules_js} var auditConfig = new axs.AuditConfiguration(); {rules_config} auditConfig.scope = {scope}; var run_results = axs.Audit.run(auditConfig); var audit_results = axs.Audit.auditResults(run_results) return audit_results; """.format(rules_js=rules_js, rules_config=rules_config, scope=config.scope)) result = browser.execute_script(script) # audit_results is report of accessibility errors for that session audit_results = AuditResults( errors=result.get('errors_'), warnings=result.get('warnings_') ) return audit_results
python
def _check_rules(browser, rules_js, config): """ Check the page for violations of the configured rules. By default, all rules in the ruleset will be checked. Args: browser: a browser instance. rules_js: the ruleset JavaScript as a string. config: an AxsAuditConfig instance. Returns: A namedtuple with 'errors' and 'warnings' fields whose values are the errors and warnings returned from the audit. None if config has rules_to_run set to None. __Caution__: You probably don't really want to call this method directly! It will be used by `A11yAudit.do_audit` if using this ruleset. """ if config.rules_to_run is None: msg = 'No accessibility rules were specified to check.' log.warning(msg) return None # This line will only be included in the script if rules to check on # this page are specified, as the default behavior of the js is to # run all rules. rules = config.rules_to_run if rules: rules_config = u"auditConfig.auditRulesToRun = {rules};".format( rules=rules) else: rules_config = "" ignored_rules = config.rules_to_ignore if ignored_rules: rules_config += ( u"\nauditConfig.auditRulesToIgnore = {rules};".format( rules=ignored_rules ) ) script = dedent(u""" {rules_js} var auditConfig = new axs.AuditConfiguration(); {rules_config} auditConfig.scope = {scope}; var run_results = axs.Audit.run(auditConfig); var audit_results = axs.Audit.auditResults(run_results) return audit_results; """.format(rules_js=rules_js, rules_config=rules_config, scope=config.scope)) result = browser.execute_script(script) # audit_results is report of accessibility errors for that session audit_results = AuditResults( errors=result.get('errors_'), warnings=result.get('warnings_') ) return audit_results
[ "def", "_check_rules", "(", "browser", ",", "rules_js", ",", "config", ")", ":", "if", "config", ".", "rules_to_run", "is", "None", ":", "msg", "=", "'No accessibility rules were specified to check.'", "log", ".", "warning", "(", "msg", ")", "return", "None", "# This line will only be included in the script if rules to check on", "# this page are specified, as the default behavior of the js is to", "# run all rules.", "rules", "=", "config", ".", "rules_to_run", "if", "rules", ":", "rules_config", "=", "u\"auditConfig.auditRulesToRun = {rules};\"", ".", "format", "(", "rules", "=", "rules", ")", "else", ":", "rules_config", "=", "\"\"", "ignored_rules", "=", "config", ".", "rules_to_ignore", "if", "ignored_rules", ":", "rules_config", "+=", "(", "u\"\\nauditConfig.auditRulesToIgnore = {rules};\"", ".", "format", "(", "rules", "=", "ignored_rules", ")", ")", "script", "=", "dedent", "(", "u\"\"\"\n {rules_js}\n var auditConfig = new axs.AuditConfiguration();\n {rules_config}\n auditConfig.scope = {scope};\n var run_results = axs.Audit.run(auditConfig);\n var audit_results = axs.Audit.auditResults(run_results)\n return audit_results;\n \"\"\"", ".", "format", "(", "rules_js", "=", "rules_js", ",", "rules_config", "=", "rules_config", ",", "scope", "=", "config", ".", "scope", ")", ")", "result", "=", "browser", ".", "execute_script", "(", "script", ")", "# audit_results is report of accessibility errors for that session", "audit_results", "=", "AuditResults", "(", "errors", "=", "result", ".", "get", "(", "'errors_'", ")", ",", "warnings", "=", "result", ".", "get", "(", "'warnings_'", ")", ")", "return", "audit_results" ]
Check the page for violations of the configured rules. By default, all rules in the ruleset will be checked. Args: browser: a browser instance. rules_js: the ruleset JavaScript as a string. config: an AxsAuditConfig instance. Returns: A namedtuple with 'errors' and 'warnings' fields whose values are the errors and warnings returned from the audit. None if config has rules_to_run set to None. __Caution__: You probably don't really want to call this method directly! It will be used by `A11yAudit.do_audit` if using this ruleset.
[ "Check", "the", "page", "for", "violations", "of", "the", "configured", "rules", ".", "By", "default", "all", "rules", "in", "the", "ruleset", "will", "be", "checked", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/a11y/axs_ruleset.py#L134-L193
train
16,972
edx/bok-choy
bok_choy/promise.py
Promise.fulfill
def fulfill(self): """ Evaluate the promise and return the result. Returns: The result of the `Promise` (second return value from the `check_func`) Raises: BrokenPromise: the `Promise` was not satisfied within the time or attempt limits. """ is_fulfilled, result = self._check_fulfilled() if is_fulfilled: return result else: raise BrokenPromise(self)
python
def fulfill(self): """ Evaluate the promise and return the result. Returns: The result of the `Promise` (second return value from the `check_func`) Raises: BrokenPromise: the `Promise` was not satisfied within the time or attempt limits. """ is_fulfilled, result = self._check_fulfilled() if is_fulfilled: return result else: raise BrokenPromise(self)
[ "def", "fulfill", "(", "self", ")", ":", "is_fulfilled", ",", "result", "=", "self", ".", "_check_fulfilled", "(", ")", "if", "is_fulfilled", ":", "return", "result", "else", ":", "raise", "BrokenPromise", "(", "self", ")" ]
Evaluate the promise and return the result. Returns: The result of the `Promise` (second return value from the `check_func`) Raises: BrokenPromise: the `Promise` was not satisfied within the time or attempt limits.
[ "Evaluate", "the", "promise", "and", "return", "the", "result", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/promise.py#L91-L106
train
16,973
edx/bok-choy
docs/code/round_3/pages.py
GitHubSearchPage.search
def search(self): """ Click on the Search button and wait for the results page to be displayed """ self.q(css='button.btn').click() GitHubSearchResultsPage(self.browser).wait_for_page()
python
def search(self): """ Click on the Search button and wait for the results page to be displayed """ self.q(css='button.btn').click() GitHubSearchResultsPage(self.browser).wait_for_page()
[ "def", "search", "(", "self", ")", ":", "self", ".", "q", "(", "css", "=", "'button.btn'", ")", ".", "click", "(", ")", "GitHubSearchResultsPage", "(", "self", ".", "browser", ")", ".", "wait_for_page", "(", ")" ]
Click on the Search button and wait for the results page to be displayed
[ "Click", "on", "the", "Search", "button", "and", "wait", "for", "the", "results", "page", "to", "be", "displayed" ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/docs/code/round_3/pages.py#L43-L49
train
16,974
edx/bok-choy
bok_choy/a11y/axe_core_ruleset.py
AxeCoreAuditConfig.set_rules
def set_rules(self, rules): """ Set rules to ignore XOR limit to when checking for accessibility errors on the page. Args: rules: a dictionary one of the following formats. If you want to run all of the rules except for some:: {"ignore": []} If you want to run only a specific set of rules:: {"apply": []} If you want to run only rules of a specific standard:: {"tags": []} Examples: To run only "bad-link" and "color-contrast" rules:: page.a11y_audit.config.set_rules({ "apply": ["bad-link", "color-contrast"], }) To run all rules except for "bad-link" and "color-contrast":: page.a11y_audit.config.set_rules({ "ignore": ["bad-link", "color-contrast"], }) To run only WCAG 2.0 Level A rules:: page.a11y_audit.config.set_rules({ "tags": ["wcag2a"], }) To run all rules: page.a11y_audit.config.set_rules({}) Related documentation: * https://github.com/dequelabs/axe-core/blob/master/doc/API.md#options-parameter-examples * https://github.com/dequelabs/axe-core/doc/rule-descriptions.md """ options = {} if rules: if rules.get("ignore"): options["rules"] = {} for rule in rules.get("ignore"): options["rules"][rule] = {"enabled": False} elif rules.get("apply"): options["runOnly"] = { "type": "rule", "values": rules.get("apply"), } elif rules.get("tags"): options["runOnly"] = { "type": "tag", "values": rules.get("tags"), } self.rules = json.dumps(options)
python
def set_rules(self, rules): """ Set rules to ignore XOR limit to when checking for accessibility errors on the page. Args: rules: a dictionary one of the following formats. If you want to run all of the rules except for some:: {"ignore": []} If you want to run only a specific set of rules:: {"apply": []} If you want to run only rules of a specific standard:: {"tags": []} Examples: To run only "bad-link" and "color-contrast" rules:: page.a11y_audit.config.set_rules({ "apply": ["bad-link", "color-contrast"], }) To run all rules except for "bad-link" and "color-contrast":: page.a11y_audit.config.set_rules({ "ignore": ["bad-link", "color-contrast"], }) To run only WCAG 2.0 Level A rules:: page.a11y_audit.config.set_rules({ "tags": ["wcag2a"], }) To run all rules: page.a11y_audit.config.set_rules({}) Related documentation: * https://github.com/dequelabs/axe-core/blob/master/doc/API.md#options-parameter-examples * https://github.com/dequelabs/axe-core/doc/rule-descriptions.md """ options = {} if rules: if rules.get("ignore"): options["rules"] = {} for rule in rules.get("ignore"): options["rules"][rule] = {"enabled": False} elif rules.get("apply"): options["runOnly"] = { "type": "rule", "values": rules.get("apply"), } elif rules.get("tags"): options["runOnly"] = { "type": "tag", "values": rules.get("tags"), } self.rules = json.dumps(options)
[ "def", "set_rules", "(", "self", ",", "rules", ")", ":", "options", "=", "{", "}", "if", "rules", ":", "if", "rules", ".", "get", "(", "\"ignore\"", ")", ":", "options", "[", "\"rules\"", "]", "=", "{", "}", "for", "rule", "in", "rules", ".", "get", "(", "\"ignore\"", ")", ":", "options", "[", "\"rules\"", "]", "[", "rule", "]", "=", "{", "\"enabled\"", ":", "False", "}", "elif", "rules", ".", "get", "(", "\"apply\"", ")", ":", "options", "[", "\"runOnly\"", "]", "=", "{", "\"type\"", ":", "\"rule\"", ",", "\"values\"", ":", "rules", ".", "get", "(", "\"apply\"", ")", ",", "}", "elif", "rules", ".", "get", "(", "\"tags\"", ")", ":", "options", "[", "\"runOnly\"", "]", "=", "{", "\"type\"", ":", "\"tag\"", ",", "\"values\"", ":", "rules", ".", "get", "(", "\"tags\"", ")", ",", "}", "self", ".", "rules", "=", "json", ".", "dumps", "(", "options", ")" ]
Set rules to ignore XOR limit to when checking for accessibility errors on the page. Args: rules: a dictionary one of the following formats. If you want to run all of the rules except for some:: {"ignore": []} If you want to run only a specific set of rules:: {"apply": []} If you want to run only rules of a specific standard:: {"tags": []} Examples: To run only "bad-link" and "color-contrast" rules:: page.a11y_audit.config.set_rules({ "apply": ["bad-link", "color-contrast"], }) To run all rules except for "bad-link" and "color-contrast":: page.a11y_audit.config.set_rules({ "ignore": ["bad-link", "color-contrast"], }) To run only WCAG 2.0 Level A rules:: page.a11y_audit.config.set_rules({ "tags": ["wcag2a"], }) To run all rules: page.a11y_audit.config.set_rules({}) Related documentation: * https://github.com/dequelabs/axe-core/blob/master/doc/API.md#options-parameter-examples * https://github.com/dequelabs/axe-core/doc/rule-descriptions.md
[ "Set", "rules", "to", "ignore", "XOR", "limit", "to", "when", "checking", "for", "accessibility", "errors", "on", "the", "page", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/a11y/axe_core_ruleset.py#L37-L101
train
16,975
edx/bok-choy
bok_choy/a11y/axe_core_ruleset.py
AxeCoreAuditConfig.customize_ruleset
def customize_ruleset(self, custom_ruleset_file=None): """ Updates the ruleset to include a set of custom rules. These rules will be _added_ to the existing ruleset or replace the existing rule with the same ID. Args: custom_ruleset_file (optional): The filepath to the custom rules. Defaults to `None`. If `custom_ruleset_file` isn't passed, the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` will be checked. If a filepath isn't specified by either of these methods, the ruleset will not be updated. Raises: `IOError` if the specified file does not exist. Examples: To include the rules defined in `axe-core-custom-rules.js`:: page.a11y_audit.config.customize_ruleset( "axe-core-custom-rules.js" ) Alternatively, use the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` to specify the path to the file containing the custom rules. Documentation for how to write rules: https://github.com/dequelabs/axe-core/blob/master/doc/developer-guide.md An example of a custom rules file can be found at https://github.com/edx/bok-choy/tree/master/tests/a11y_custom_rules.js """ custom_file = custom_ruleset_file or os.environ.get( "BOKCHOY_A11Y_CUSTOM_RULES_FILE" ) if not custom_file: return with open(custom_file, "r") as additional_rules: custom_rules = additional_rules.read() if "var customRules" not in custom_rules: raise A11yAuditConfigError( "Custom rules file must include \"var customRules\"" ) self.custom_rules = custom_rules
python
def customize_ruleset(self, custom_ruleset_file=None): """ Updates the ruleset to include a set of custom rules. These rules will be _added_ to the existing ruleset or replace the existing rule with the same ID. Args: custom_ruleset_file (optional): The filepath to the custom rules. Defaults to `None`. If `custom_ruleset_file` isn't passed, the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` will be checked. If a filepath isn't specified by either of these methods, the ruleset will not be updated. Raises: `IOError` if the specified file does not exist. Examples: To include the rules defined in `axe-core-custom-rules.js`:: page.a11y_audit.config.customize_ruleset( "axe-core-custom-rules.js" ) Alternatively, use the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` to specify the path to the file containing the custom rules. Documentation for how to write rules: https://github.com/dequelabs/axe-core/blob/master/doc/developer-guide.md An example of a custom rules file can be found at https://github.com/edx/bok-choy/tree/master/tests/a11y_custom_rules.js """ custom_file = custom_ruleset_file or os.environ.get( "BOKCHOY_A11Y_CUSTOM_RULES_FILE" ) if not custom_file: return with open(custom_file, "r") as additional_rules: custom_rules = additional_rules.read() if "var customRules" not in custom_rules: raise A11yAuditConfigError( "Custom rules file must include \"var customRules\"" ) self.custom_rules = custom_rules
[ "def", "customize_ruleset", "(", "self", ",", "custom_ruleset_file", "=", "None", ")", ":", "custom_file", "=", "custom_ruleset_file", "or", "os", ".", "environ", ".", "get", "(", "\"BOKCHOY_A11Y_CUSTOM_RULES_FILE\"", ")", "if", "not", "custom_file", ":", "return", "with", "open", "(", "custom_file", ",", "\"r\"", ")", "as", "additional_rules", ":", "custom_rules", "=", "additional_rules", ".", "read", "(", ")", "if", "\"var customRules\"", "not", "in", "custom_rules", ":", "raise", "A11yAuditConfigError", "(", "\"Custom rules file must include \\\"var customRules\\\"\"", ")", "self", ".", "custom_rules", "=", "custom_rules" ]
Updates the ruleset to include a set of custom rules. These rules will be _added_ to the existing ruleset or replace the existing rule with the same ID. Args: custom_ruleset_file (optional): The filepath to the custom rules. Defaults to `None`. If `custom_ruleset_file` isn't passed, the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` will be checked. If a filepath isn't specified by either of these methods, the ruleset will not be updated. Raises: `IOError` if the specified file does not exist. Examples: To include the rules defined in `axe-core-custom-rules.js`:: page.a11y_audit.config.customize_ruleset( "axe-core-custom-rules.js" ) Alternatively, use the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` to specify the path to the file containing the custom rules. Documentation for how to write rules: https://github.com/dequelabs/axe-core/blob/master/doc/developer-guide.md An example of a custom rules file can be found at https://github.com/edx/bok-choy/tree/master/tests/a11y_custom_rules.js
[ "Updates", "the", "ruleset", "to", "include", "a", "set", "of", "custom", "rules", ".", "These", "rules", "will", "be", "_added_", "to", "the", "existing", "ruleset", "or", "replace", "the", "existing", "rule", "with", "the", "same", "ID", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/a11y/axe_core_ruleset.py#L156-L207
train
16,976
edx/bok-choy
bok_choy/a11y/axe_core_ruleset.py
AxeCoreAudit._check_rules
def _check_rules(browser, rules_js, config): """ Run an accessibility audit on the page using the axe-core ruleset. Args: browser: a browser instance. rules_js: the ruleset JavaScript as a string. config: an AxsAuditConfig instance. Returns: A list of violations. Related documentation: https://github.com/dequelabs/axe-core/blob/master/doc/API.md#results-object __Caution__: You probably don't really want to call this method directly! It will be used by `AxeCoreAudit.do_audit`. """ audit_run_script = dedent(u""" {rules_js} {custom_rules} axe.configure(customRules); var callback = function(err, results) {{ if (err) throw err; window.a11yAuditResults = JSON.stringify(results); window.console.log(window.a11yAuditResults); }} axe.run({context}, {options}, callback); """).format( rules_js=rules_js, custom_rules=config.custom_rules, context=config.context, options=config.rules ) audit_results_script = dedent(u""" window.console.log(window.a11yAuditResults); return window.a11yAuditResults; """) browser.execute_script(audit_run_script) def audit_results_check_func(): """ A method to check that the audit has completed. Returns: (True, results) if the results are available. (False, None) if the results aren't available. """ unicode_results = browser.execute_script(audit_results_script) try: results = json.loads(unicode_results) except (TypeError, ValueError): results = None if results: return True, results return False, None result = Promise( audit_results_check_func, "Timed out waiting for a11y audit results.", timeout=5, ).fulfill() # audit_results is report of accessibility violations for that session # Note that this ruleset doesn't have distinct error/warning levels. audit_results = result.get('violations') return audit_results
python
def _check_rules(browser, rules_js, config): """ Run an accessibility audit on the page using the axe-core ruleset. Args: browser: a browser instance. rules_js: the ruleset JavaScript as a string. config: an AxsAuditConfig instance. Returns: A list of violations. Related documentation: https://github.com/dequelabs/axe-core/blob/master/doc/API.md#results-object __Caution__: You probably don't really want to call this method directly! It will be used by `AxeCoreAudit.do_audit`. """ audit_run_script = dedent(u""" {rules_js} {custom_rules} axe.configure(customRules); var callback = function(err, results) {{ if (err) throw err; window.a11yAuditResults = JSON.stringify(results); window.console.log(window.a11yAuditResults); }} axe.run({context}, {options}, callback); """).format( rules_js=rules_js, custom_rules=config.custom_rules, context=config.context, options=config.rules ) audit_results_script = dedent(u""" window.console.log(window.a11yAuditResults); return window.a11yAuditResults; """) browser.execute_script(audit_run_script) def audit_results_check_func(): """ A method to check that the audit has completed. Returns: (True, results) if the results are available. (False, None) if the results aren't available. """ unicode_results = browser.execute_script(audit_results_script) try: results = json.loads(unicode_results) except (TypeError, ValueError): results = None if results: return True, results return False, None result = Promise( audit_results_check_func, "Timed out waiting for a11y audit results.", timeout=5, ).fulfill() # audit_results is report of accessibility violations for that session # Note that this ruleset doesn't have distinct error/warning levels. audit_results = result.get('violations') return audit_results
[ "def", "_check_rules", "(", "browser", ",", "rules_js", ",", "config", ")", ":", "audit_run_script", "=", "dedent", "(", "u\"\"\"\n {rules_js}\n {custom_rules}\n axe.configure(customRules);\n var callback = function(err, results) {{\n if (err) throw err;\n window.a11yAuditResults = JSON.stringify(results);\n window.console.log(window.a11yAuditResults);\n }}\n axe.run({context}, {options}, callback);\n \"\"\"", ")", ".", "format", "(", "rules_js", "=", "rules_js", ",", "custom_rules", "=", "config", ".", "custom_rules", ",", "context", "=", "config", ".", "context", ",", "options", "=", "config", ".", "rules", ")", "audit_results_script", "=", "dedent", "(", "u\"\"\"\n window.console.log(window.a11yAuditResults);\n return window.a11yAuditResults;\n \"\"\"", ")", "browser", ".", "execute_script", "(", "audit_run_script", ")", "def", "audit_results_check_func", "(", ")", ":", "\"\"\"\n A method to check that the audit has completed.\n\n Returns:\n\n (True, results) if the results are available.\n (False, None) if the results aren't available.\n \"\"\"", "unicode_results", "=", "browser", ".", "execute_script", "(", "audit_results_script", ")", "try", ":", "results", "=", "json", ".", "loads", "(", "unicode_results", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "results", "=", "None", "if", "results", ":", "return", "True", ",", "results", "return", "False", ",", "None", "result", "=", "Promise", "(", "audit_results_check_func", ",", "\"Timed out waiting for a11y audit results.\"", ",", "timeout", "=", "5", ",", ")", ".", "fulfill", "(", ")", "# audit_results is report of accessibility violations for that session", "# Note that this ruleset doesn't have distinct error/warning levels.", "audit_results", "=", "result", ".", "get", "(", "'violations'", ")", "return", "audit_results" ]
Run an accessibility audit on the page using the axe-core ruleset. Args: browser: a browser instance. rules_js: the ruleset JavaScript as a string. config: an AxsAuditConfig instance. Returns: A list of violations. Related documentation: https://github.com/dequelabs/axe-core/blob/master/doc/API.md#results-object __Caution__: You probably don't really want to call this method directly! It will be used by `AxeCoreAudit.do_audit`.
[ "Run", "an", "accessibility", "audit", "on", "the", "page", "using", "the", "axe", "-", "core", "ruleset", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/a11y/axe_core_ruleset.py#L227-L300
train
16,977
edx/bok-choy
bok_choy/browser.py
save_source
def save_source(driver, name): """ Save the rendered HTML of the browser. The location of the source can be configured by the environment variable `SAVED_SOURCE_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. name (str): A name to use in the output file name. Note that ".html" is appended automatically Returns: None """ source = driver.page_source file_name = os.path.join(os.environ.get('SAVED_SOURCE_DIR'), '{name}.html'.format(name=name)) try: with open(file_name, 'wb') as output_file: output_file.write(source.encode('utf-8')) except Exception: # pylint: disable=broad-except msg = u"Could not save the browser page source to {}.".format(file_name) LOGGER.warning(msg)
python
def save_source(driver, name): """ Save the rendered HTML of the browser. The location of the source can be configured by the environment variable `SAVED_SOURCE_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. name (str): A name to use in the output file name. Note that ".html" is appended automatically Returns: None """ source = driver.page_source file_name = os.path.join(os.environ.get('SAVED_SOURCE_DIR'), '{name}.html'.format(name=name)) try: with open(file_name, 'wb') as output_file: output_file.write(source.encode('utf-8')) except Exception: # pylint: disable=broad-except msg = u"Could not save the browser page source to {}.".format(file_name) LOGGER.warning(msg)
[ "def", "save_source", "(", "driver", ",", "name", ")", ":", "source", "=", "driver", ".", "page_source", "file_name", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", ".", "get", "(", "'SAVED_SOURCE_DIR'", ")", ",", "'{name}.html'", ".", "format", "(", "name", "=", "name", ")", ")", "try", ":", "with", "open", "(", "file_name", ",", "'wb'", ")", "as", "output_file", ":", "output_file", ".", "write", "(", "source", ".", "encode", "(", "'utf-8'", ")", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "msg", "=", "u\"Could not save the browser page source to {}.\"", ".", "format", "(", "file_name", ")", "LOGGER", ".", "warning", "(", "msg", ")" ]
Save the rendered HTML of the browser. The location of the source can be configured by the environment variable `SAVED_SOURCE_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. name (str): A name to use in the output file name. Note that ".html" is appended automatically Returns: None
[ "Save", "the", "rendered", "HTML", "of", "the", "browser", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L79-L104
train
16,978
edx/bok-choy
bok_choy/browser.py
save_screenshot
def save_screenshot(driver, name): """ Save a screenshot of the browser. The location of the screenshot can be configured by the environment variable `SCREENSHOT_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. name (str): A name for the screenshot, which will be used in the output file name. Returns: None """ if hasattr(driver, 'save_screenshot'): screenshot_dir = os.environ.get('SCREENSHOT_DIR') if not screenshot_dir: LOGGER.warning('The SCREENSHOT_DIR environment variable was not set; not saving a screenshot') return elif not os.path.exists(screenshot_dir): os.makedirs(screenshot_dir) image_name = os.path.join(screenshot_dir, name + '.png') driver.save_screenshot(image_name) else: msg = ( u"Browser does not support screenshots. " u"Could not save screenshot '{name}'" ).format(name=name) LOGGER.warning(msg)
python
def save_screenshot(driver, name): """ Save a screenshot of the browser. The location of the screenshot can be configured by the environment variable `SCREENSHOT_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. name (str): A name for the screenshot, which will be used in the output file name. Returns: None """ if hasattr(driver, 'save_screenshot'): screenshot_dir = os.environ.get('SCREENSHOT_DIR') if not screenshot_dir: LOGGER.warning('The SCREENSHOT_DIR environment variable was not set; not saving a screenshot') return elif not os.path.exists(screenshot_dir): os.makedirs(screenshot_dir) image_name = os.path.join(screenshot_dir, name + '.png') driver.save_screenshot(image_name) else: msg = ( u"Browser does not support screenshots. " u"Could not save screenshot '{name}'" ).format(name=name) LOGGER.warning(msg)
[ "def", "save_screenshot", "(", "driver", ",", "name", ")", ":", "if", "hasattr", "(", "driver", ",", "'save_screenshot'", ")", ":", "screenshot_dir", "=", "os", ".", "environ", ".", "get", "(", "'SCREENSHOT_DIR'", ")", "if", "not", "screenshot_dir", ":", "LOGGER", ".", "warning", "(", "'The SCREENSHOT_DIR environment variable was not set; not saving a screenshot'", ")", "return", "elif", "not", "os", ".", "path", ".", "exists", "(", "screenshot_dir", ")", ":", "os", ".", "makedirs", "(", "screenshot_dir", ")", "image_name", "=", "os", ".", "path", ".", "join", "(", "screenshot_dir", ",", "name", "+", "'.png'", ")", "driver", ".", "save_screenshot", "(", "image_name", ")", "else", ":", "msg", "=", "(", "u\"Browser does not support screenshots. \"", "u\"Could not save screenshot '{name}'\"", ")", ".", "format", "(", "name", "=", "name", ")", "LOGGER", ".", "warning", "(", "msg", ")" ]
Save a screenshot of the browser. The location of the screenshot can be configured by the environment variable `SCREENSHOT_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. name (str): A name for the screenshot, which will be used in the output file name. Returns: None
[ "Save", "a", "screenshot", "of", "the", "browser", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L107-L138
train
16,979
edx/bok-choy
bok_choy/browser.py
save_driver_logs
def save_driver_logs(driver, prefix): """ Save the selenium driver logs. The location of the driver log files can be configured by the environment variable `SELENIUM_DRIVER_LOG_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. prefix (str): A prefix which will be used in the output file names for the logs. Returns: None """ browser_name = os.environ.get('SELENIUM_BROWSER', 'firefox') log_dir = os.environ.get('SELENIUM_DRIVER_LOG_DIR') if not log_dir: LOGGER.warning('The SELENIUM_DRIVER_LOG_DIR environment variable was not set; not saving logs') return elif not os.path.exists(log_dir): os.makedirs(log_dir) if browser_name == 'firefox': # Firefox doesn't yet provide logs to Selenium, but does log to a separate file # https://github.com/mozilla/geckodriver/issues/284 # https://firefox-source-docs.mozilla.org/testing/geckodriver/geckodriver/TraceLogs.html log_path = os.path.join(os.getcwd(), 'geckodriver.log') if os.path.exists(log_path): dest_path = os.path.join(log_dir, '{}_geckodriver.log'.format(prefix)) copyfile(log_path, dest_path) return log_types = driver.log_types for log_type in log_types: try: log = driver.get_log(log_type) file_name = os.path.join( log_dir, '{}_{}.log'.format(prefix, log_type) ) with open(file_name, 'w') as output_file: for line in log: output_file.write("{}{}".format(dumps(line), '\n')) except: # pylint: disable=bare-except msg = ( u"Could not save browser log of type '{log_type}'. " u"It may be that the browser does not support it." ).format(log_type=log_type) LOGGER.warning(msg, exc_info=True)
python
def save_driver_logs(driver, prefix): """ Save the selenium driver logs. The location of the driver log files can be configured by the environment variable `SELENIUM_DRIVER_LOG_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. prefix (str): A prefix which will be used in the output file names for the logs. Returns: None """ browser_name = os.environ.get('SELENIUM_BROWSER', 'firefox') log_dir = os.environ.get('SELENIUM_DRIVER_LOG_DIR') if not log_dir: LOGGER.warning('The SELENIUM_DRIVER_LOG_DIR environment variable was not set; not saving logs') return elif not os.path.exists(log_dir): os.makedirs(log_dir) if browser_name == 'firefox': # Firefox doesn't yet provide logs to Selenium, but does log to a separate file # https://github.com/mozilla/geckodriver/issues/284 # https://firefox-source-docs.mozilla.org/testing/geckodriver/geckodriver/TraceLogs.html log_path = os.path.join(os.getcwd(), 'geckodriver.log') if os.path.exists(log_path): dest_path = os.path.join(log_dir, '{}_geckodriver.log'.format(prefix)) copyfile(log_path, dest_path) return log_types = driver.log_types for log_type in log_types: try: log = driver.get_log(log_type) file_name = os.path.join( log_dir, '{}_{}.log'.format(prefix, log_type) ) with open(file_name, 'w') as output_file: for line in log: output_file.write("{}{}".format(dumps(line), '\n')) except: # pylint: disable=bare-except msg = ( u"Could not save browser log of type '{log_type}'. " u"It may be that the browser does not support it." ).format(log_type=log_type) LOGGER.warning(msg, exc_info=True)
[ "def", "save_driver_logs", "(", "driver", ",", "prefix", ")", ":", "browser_name", "=", "os", ".", "environ", ".", "get", "(", "'SELENIUM_BROWSER'", ",", "'firefox'", ")", "log_dir", "=", "os", ".", "environ", ".", "get", "(", "'SELENIUM_DRIVER_LOG_DIR'", ")", "if", "not", "log_dir", ":", "LOGGER", ".", "warning", "(", "'The SELENIUM_DRIVER_LOG_DIR environment variable was not set; not saving logs'", ")", "return", "elif", "not", "os", ".", "path", ".", "exists", "(", "log_dir", ")", ":", "os", ".", "makedirs", "(", "log_dir", ")", "if", "browser_name", "==", "'firefox'", ":", "# Firefox doesn't yet provide logs to Selenium, but does log to a separate file", "# https://github.com/mozilla/geckodriver/issues/284", "# https://firefox-source-docs.mozilla.org/testing/geckodriver/geckodriver/TraceLogs.html", "log_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'geckodriver.log'", ")", "if", "os", ".", "path", ".", "exists", "(", "log_path", ")", ":", "dest_path", "=", "os", ".", "path", ".", "join", "(", "log_dir", ",", "'{}_geckodriver.log'", ".", "format", "(", "prefix", ")", ")", "copyfile", "(", "log_path", ",", "dest_path", ")", "return", "log_types", "=", "driver", ".", "log_types", "for", "log_type", "in", "log_types", ":", "try", ":", "log", "=", "driver", ".", "get_log", "(", "log_type", ")", "file_name", "=", "os", ".", "path", ".", "join", "(", "log_dir", ",", "'{}_{}.log'", ".", "format", "(", "prefix", ",", "log_type", ")", ")", "with", "open", "(", "file_name", ",", "'w'", ")", "as", "output_file", ":", "for", "line", "in", "log", ":", "output_file", ".", "write", "(", "\"{}{}\"", ".", "format", "(", "dumps", "(", "line", ")", ",", "'\\n'", ")", ")", "except", ":", "# pylint: disable=bare-except", "msg", "=", "(", "u\"Could not save browser log of type '{log_type}'. \"", "u\"It may be that the browser does not support it.\"", ")", ".", "format", "(", "log_type", "=", "log_type", ")", "LOGGER", ".", "warning", "(", "msg", ",", "exc_info", "=", "True", ")" ]
Save the selenium driver logs. The location of the driver log files can be configured by the environment variable `SELENIUM_DRIVER_LOG_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. prefix (str): A prefix which will be used in the output file names for the logs. Returns: None
[ "Save", "the", "selenium", "driver", "logs", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L141-L189
train
16,980
edx/bok-choy
bok_choy/browser.py
browser
def browser(tags=None, proxy=None, other_caps=None): """ Interpret environment variables to configure Selenium. Performs validation, logging, and sensible defaults. There are three cases: 1. Local browsers: If the proper environment variables are not all set for the second case, then we use a local browser. * The environment variable `SELENIUM_BROWSER` can be set to specify which local browser to use. The default is \ Firefox. * Additionally, if a proxy instance is passed and the browser choice is either Chrome or Firefox, then the \ browser will be initialized with the proxy server set. * The environment variable `SELENIUM_FIREFOX_PATH` can be used for specifying a path to the Firefox binary. \ Default behavior is to use the system location. * The environment variable `FIREFOX_PROFILE_PATH` can be used for specifying a path to the Firefox profile. \ Default behavior is to use a barebones default profile with a few useful preferences set. 2. Remote browser (not SauceLabs): Set all of the following environment variables, but not all of the ones needed for SauceLabs: * SELENIUM_BROWSER * SELENIUM_HOST * SELENIUM_PORT 3. SauceLabs: Set all of the following environment variables: * SELENIUM_BROWSER * SELENIUM_VERSION * SELENIUM_PLATFORM * SELENIUM_HOST * SELENIUM_PORT * SAUCE_USER_NAME * SAUCE_API_KEY **NOTE:** these are the environment variables set by the SauceLabs Jenkins plugin. Optionally provide Jenkins info, used to identify jobs to Sauce: * JOB_NAME * BUILD_NUMBER `tags` is a list of string tags to apply to the SauceLabs job. If not using SauceLabs, these will be ignored. Keyword Args: tags (list of str): Tags to apply to the SauceLabs job. If not using SauceLabs, these will be ignored. proxy: A proxy instance. other_caps (dict of str): Additional desired capabilities to provide to remote WebDriver instances. Note that these values will be overwritten by environment variables described above. This is only used for remote driver instances, where such info is usually used by services for additional configuration and metadata. Returns: selenium.webdriver: The configured browser object used to drive tests Raises: BrowserConfigError: The environment variables are not correctly specified. """ browser_name = os.environ.get('SELENIUM_BROWSER', 'firefox') def browser_check_func(): """ Instantiate the browser and return the browser instance """ # See https://openedx.atlassian.net/browse/TE-701 try: # Get the class and kwargs required to instantiate the browser based on # whether we are using a local or remote one. if _use_remote_browser(SAUCE_ENV_VARS): browser_class, browser_args, browser_kwargs = _remote_browser_class( SAUCE_ENV_VARS, tags) elif _use_remote_browser(REMOTE_ENV_VARS): browser_class, browser_args, browser_kwargs = _remote_browser_class( REMOTE_ENV_VARS, tags) else: browser_class, browser_args, browser_kwargs = _local_browser_class( browser_name) # If we are using a proxy, we need extra kwargs passed on intantiation. if proxy: browser_kwargs = _proxy_kwargs(browser_name, proxy, browser_kwargs) # Load in user given desired caps but override with derived caps from above. This is to retain existing # behavior. Only for remote drivers, where various testing services use this info for configuration. if browser_class == webdriver.Remote: desired_caps = other_caps or {} desired_caps.update(browser_kwargs.get('desired_capabilities', {})) browser_kwargs['desired_capabilities'] = desired_caps return True, browser_class(*browser_args, **browser_kwargs) except (socket.error, WebDriverException) as err: msg = str(err) LOGGER.debug('Failed to instantiate browser: ' + msg) return False, None browser_instance = Promise( # There are cases where selenium takes 30s to return with a failure, so in order to try 3 # times, we set a long timeout. If there is a hang on the first try, the timeout will # be enforced. browser_check_func, "Browser is instantiated successfully.", try_limit=3, timeout=95).fulfill() return browser_instance
python
def browser(tags=None, proxy=None, other_caps=None): """ Interpret environment variables to configure Selenium. Performs validation, logging, and sensible defaults. There are three cases: 1. Local browsers: If the proper environment variables are not all set for the second case, then we use a local browser. * The environment variable `SELENIUM_BROWSER` can be set to specify which local browser to use. The default is \ Firefox. * Additionally, if a proxy instance is passed and the browser choice is either Chrome or Firefox, then the \ browser will be initialized with the proxy server set. * The environment variable `SELENIUM_FIREFOX_PATH` can be used for specifying a path to the Firefox binary. \ Default behavior is to use the system location. * The environment variable `FIREFOX_PROFILE_PATH` can be used for specifying a path to the Firefox profile. \ Default behavior is to use a barebones default profile with a few useful preferences set. 2. Remote browser (not SauceLabs): Set all of the following environment variables, but not all of the ones needed for SauceLabs: * SELENIUM_BROWSER * SELENIUM_HOST * SELENIUM_PORT 3. SauceLabs: Set all of the following environment variables: * SELENIUM_BROWSER * SELENIUM_VERSION * SELENIUM_PLATFORM * SELENIUM_HOST * SELENIUM_PORT * SAUCE_USER_NAME * SAUCE_API_KEY **NOTE:** these are the environment variables set by the SauceLabs Jenkins plugin. Optionally provide Jenkins info, used to identify jobs to Sauce: * JOB_NAME * BUILD_NUMBER `tags` is a list of string tags to apply to the SauceLabs job. If not using SauceLabs, these will be ignored. Keyword Args: tags (list of str): Tags to apply to the SauceLabs job. If not using SauceLabs, these will be ignored. proxy: A proxy instance. other_caps (dict of str): Additional desired capabilities to provide to remote WebDriver instances. Note that these values will be overwritten by environment variables described above. This is only used for remote driver instances, where such info is usually used by services for additional configuration and metadata. Returns: selenium.webdriver: The configured browser object used to drive tests Raises: BrowserConfigError: The environment variables are not correctly specified. """ browser_name = os.environ.get('SELENIUM_BROWSER', 'firefox') def browser_check_func(): """ Instantiate the browser and return the browser instance """ # See https://openedx.atlassian.net/browse/TE-701 try: # Get the class and kwargs required to instantiate the browser based on # whether we are using a local or remote one. if _use_remote_browser(SAUCE_ENV_VARS): browser_class, browser_args, browser_kwargs = _remote_browser_class( SAUCE_ENV_VARS, tags) elif _use_remote_browser(REMOTE_ENV_VARS): browser_class, browser_args, browser_kwargs = _remote_browser_class( REMOTE_ENV_VARS, tags) else: browser_class, browser_args, browser_kwargs = _local_browser_class( browser_name) # If we are using a proxy, we need extra kwargs passed on intantiation. if proxy: browser_kwargs = _proxy_kwargs(browser_name, proxy, browser_kwargs) # Load in user given desired caps but override with derived caps from above. This is to retain existing # behavior. Only for remote drivers, where various testing services use this info for configuration. if browser_class == webdriver.Remote: desired_caps = other_caps or {} desired_caps.update(browser_kwargs.get('desired_capabilities', {})) browser_kwargs['desired_capabilities'] = desired_caps return True, browser_class(*browser_args, **browser_kwargs) except (socket.error, WebDriverException) as err: msg = str(err) LOGGER.debug('Failed to instantiate browser: ' + msg) return False, None browser_instance = Promise( # There are cases where selenium takes 30s to return with a failure, so in order to try 3 # times, we set a long timeout. If there is a hang on the first try, the timeout will # be enforced. browser_check_func, "Browser is instantiated successfully.", try_limit=3, timeout=95).fulfill() return browser_instance
[ "def", "browser", "(", "tags", "=", "None", ",", "proxy", "=", "None", ",", "other_caps", "=", "None", ")", ":", "browser_name", "=", "os", ".", "environ", ".", "get", "(", "'SELENIUM_BROWSER'", ",", "'firefox'", ")", "def", "browser_check_func", "(", ")", ":", "\"\"\" Instantiate the browser and return the browser instance \"\"\"", "# See https://openedx.atlassian.net/browse/TE-701", "try", ":", "# Get the class and kwargs required to instantiate the browser based on", "# whether we are using a local or remote one.", "if", "_use_remote_browser", "(", "SAUCE_ENV_VARS", ")", ":", "browser_class", ",", "browser_args", ",", "browser_kwargs", "=", "_remote_browser_class", "(", "SAUCE_ENV_VARS", ",", "tags", ")", "elif", "_use_remote_browser", "(", "REMOTE_ENV_VARS", ")", ":", "browser_class", ",", "browser_args", ",", "browser_kwargs", "=", "_remote_browser_class", "(", "REMOTE_ENV_VARS", ",", "tags", ")", "else", ":", "browser_class", ",", "browser_args", ",", "browser_kwargs", "=", "_local_browser_class", "(", "browser_name", ")", "# If we are using a proxy, we need extra kwargs passed on intantiation.", "if", "proxy", ":", "browser_kwargs", "=", "_proxy_kwargs", "(", "browser_name", ",", "proxy", ",", "browser_kwargs", ")", "# Load in user given desired caps but override with derived caps from above. This is to retain existing", "# behavior. Only for remote drivers, where various testing services use this info for configuration.", "if", "browser_class", "==", "webdriver", ".", "Remote", ":", "desired_caps", "=", "other_caps", "or", "{", "}", "desired_caps", ".", "update", "(", "browser_kwargs", ".", "get", "(", "'desired_capabilities'", ",", "{", "}", ")", ")", "browser_kwargs", "[", "'desired_capabilities'", "]", "=", "desired_caps", "return", "True", ",", "browser_class", "(", "*", "browser_args", ",", "*", "*", "browser_kwargs", ")", "except", "(", "socket", ".", "error", ",", "WebDriverException", ")", "as", "err", ":", "msg", "=", "str", "(", "err", ")", "LOGGER", ".", "debug", "(", "'Failed to instantiate browser: '", "+", "msg", ")", "return", "False", ",", "None", "browser_instance", "=", "Promise", "(", "# There are cases where selenium takes 30s to return with a failure, so in order to try 3", "# times, we set a long timeout. If there is a hang on the first try, the timeout will", "# be enforced.", "browser_check_func", ",", "\"Browser is instantiated successfully.\"", ",", "try_limit", "=", "3", ",", "timeout", "=", "95", ")", ".", "fulfill", "(", ")", "return", "browser_instance" ]
Interpret environment variables to configure Selenium. Performs validation, logging, and sensible defaults. There are three cases: 1. Local browsers: If the proper environment variables are not all set for the second case, then we use a local browser. * The environment variable `SELENIUM_BROWSER` can be set to specify which local browser to use. The default is \ Firefox. * Additionally, if a proxy instance is passed and the browser choice is either Chrome or Firefox, then the \ browser will be initialized with the proxy server set. * The environment variable `SELENIUM_FIREFOX_PATH` can be used for specifying a path to the Firefox binary. \ Default behavior is to use the system location. * The environment variable `FIREFOX_PROFILE_PATH` can be used for specifying a path to the Firefox profile. \ Default behavior is to use a barebones default profile with a few useful preferences set. 2. Remote browser (not SauceLabs): Set all of the following environment variables, but not all of the ones needed for SauceLabs: * SELENIUM_BROWSER * SELENIUM_HOST * SELENIUM_PORT 3. SauceLabs: Set all of the following environment variables: * SELENIUM_BROWSER * SELENIUM_VERSION * SELENIUM_PLATFORM * SELENIUM_HOST * SELENIUM_PORT * SAUCE_USER_NAME * SAUCE_API_KEY **NOTE:** these are the environment variables set by the SauceLabs Jenkins plugin. Optionally provide Jenkins info, used to identify jobs to Sauce: * JOB_NAME * BUILD_NUMBER `tags` is a list of string tags to apply to the SauceLabs job. If not using SauceLabs, these will be ignored. Keyword Args: tags (list of str): Tags to apply to the SauceLabs job. If not using SauceLabs, these will be ignored. proxy: A proxy instance. other_caps (dict of str): Additional desired capabilities to provide to remote WebDriver instances. Note that these values will be overwritten by environment variables described above. This is only used for remote driver instances, where such info is usually used by services for additional configuration and metadata. Returns: selenium.webdriver: The configured browser object used to drive tests Raises: BrowserConfigError: The environment variables are not correctly specified.
[ "Interpret", "environment", "variables", "to", "configure", "Selenium", ".", "Performs", "validation", "logging", "and", "sensible", "defaults", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L192-L296
train
16,981
edx/bok-choy
bok_choy/browser.py
_firefox_profile
def _firefox_profile(): """Configure the Firefox profile, respecting FIREFOX_PROFILE_PATH if set""" profile_dir = os.environ.get(FIREFOX_PROFILE_ENV_VAR) if profile_dir: LOGGER.info(u"Using firefox profile: %s", profile_dir) try: firefox_profile = webdriver.FirefoxProfile(profile_dir) except OSError as err: if err.errno == errno.ENOENT: raise BrowserConfigError( u"Firefox profile directory {env_var}={profile_dir} does not exist".format( env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir)) elif err.errno == errno.EACCES: raise BrowserConfigError( u"Firefox profile directory {env_var}={profile_dir} has incorrect permissions. It must be \ readable and executable.".format(env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir)) else: # Some other OSError: raise BrowserConfigError( u"Problem with firefox profile directory {env_var}={profile_dir}: {msg}" .format(env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir, msg=str(err))) else: LOGGER.info("Using default firefox profile") firefox_profile = webdriver.FirefoxProfile() # Bypasses the security prompt displayed by the browser when it attempts to # access a media device (e.g., a webcam) firefox_profile.set_preference('media.navigator.permission.disabled', True) # Disable the initial url fetch to 'learn more' from mozilla (so you don't have to # be online to run bok-choy on firefox) firefox_profile.set_preference('browser.startup.homepage', 'about:blank') firefox_profile.set_preference('startup.homepage_welcome_url', 'about:blank') firefox_profile.set_preference('startup.homepage_welcome_url.additional', 'about:blank') # Disable fetching an updated version of firefox firefox_profile.set_preference('app.update.enabled', False) # Disable plugin checking firefox_profile.set_preference('plugins.hide_infobar_for_outdated_plugin', True) # Disable health reporter firefox_profile.set_preference('datareporting.healthreport.service.enabled', False) # Disable all data upload (Telemetry and FHR) firefox_profile.set_preference('datareporting.policy.dataSubmissionEnabled', False) # Disable crash reporter firefox_profile.set_preference('toolkit.crashreporter.enabled', False) # Disable the JSON Viewer firefox_profile.set_preference('devtools.jsonview.enabled', False) # Grant OS focus to the launched browser so focus-related tests function correctly firefox_profile.set_preference('focusmanager.testmode', True) for function in FIREFOX_PROFILE_CUSTOMIZERS: function(firefox_profile) return firefox_profile
python
def _firefox_profile(): """Configure the Firefox profile, respecting FIREFOX_PROFILE_PATH if set""" profile_dir = os.environ.get(FIREFOX_PROFILE_ENV_VAR) if profile_dir: LOGGER.info(u"Using firefox profile: %s", profile_dir) try: firefox_profile = webdriver.FirefoxProfile(profile_dir) except OSError as err: if err.errno == errno.ENOENT: raise BrowserConfigError( u"Firefox profile directory {env_var}={profile_dir} does not exist".format( env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir)) elif err.errno == errno.EACCES: raise BrowserConfigError( u"Firefox profile directory {env_var}={profile_dir} has incorrect permissions. It must be \ readable and executable.".format(env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir)) else: # Some other OSError: raise BrowserConfigError( u"Problem with firefox profile directory {env_var}={profile_dir}: {msg}" .format(env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir, msg=str(err))) else: LOGGER.info("Using default firefox profile") firefox_profile = webdriver.FirefoxProfile() # Bypasses the security prompt displayed by the browser when it attempts to # access a media device (e.g., a webcam) firefox_profile.set_preference('media.navigator.permission.disabled', True) # Disable the initial url fetch to 'learn more' from mozilla (so you don't have to # be online to run bok-choy on firefox) firefox_profile.set_preference('browser.startup.homepage', 'about:blank') firefox_profile.set_preference('startup.homepage_welcome_url', 'about:blank') firefox_profile.set_preference('startup.homepage_welcome_url.additional', 'about:blank') # Disable fetching an updated version of firefox firefox_profile.set_preference('app.update.enabled', False) # Disable plugin checking firefox_profile.set_preference('plugins.hide_infobar_for_outdated_plugin', True) # Disable health reporter firefox_profile.set_preference('datareporting.healthreport.service.enabled', False) # Disable all data upload (Telemetry and FHR) firefox_profile.set_preference('datareporting.policy.dataSubmissionEnabled', False) # Disable crash reporter firefox_profile.set_preference('toolkit.crashreporter.enabled', False) # Disable the JSON Viewer firefox_profile.set_preference('devtools.jsonview.enabled', False) # Grant OS focus to the launched browser so focus-related tests function correctly firefox_profile.set_preference('focusmanager.testmode', True) for function in FIREFOX_PROFILE_CUSTOMIZERS: function(firefox_profile) return firefox_profile
[ "def", "_firefox_profile", "(", ")", ":", "profile_dir", "=", "os", ".", "environ", ".", "get", "(", "FIREFOX_PROFILE_ENV_VAR", ")", "if", "profile_dir", ":", "LOGGER", ".", "info", "(", "u\"Using firefox profile: %s\"", ",", "profile_dir", ")", "try", ":", "firefox_profile", "=", "webdriver", ".", "FirefoxProfile", "(", "profile_dir", ")", "except", "OSError", "as", "err", ":", "if", "err", ".", "errno", "==", "errno", ".", "ENOENT", ":", "raise", "BrowserConfigError", "(", "u\"Firefox profile directory {env_var}={profile_dir} does not exist\"", ".", "format", "(", "env_var", "=", "FIREFOX_PROFILE_ENV_VAR", ",", "profile_dir", "=", "profile_dir", ")", ")", "elif", "err", ".", "errno", "==", "errno", ".", "EACCES", ":", "raise", "BrowserConfigError", "(", "u\"Firefox profile directory {env_var}={profile_dir} has incorrect permissions. It must be \\\n readable and executable.\"", ".", "format", "(", "env_var", "=", "FIREFOX_PROFILE_ENV_VAR", ",", "profile_dir", "=", "profile_dir", ")", ")", "else", ":", "# Some other OSError:", "raise", "BrowserConfigError", "(", "u\"Problem with firefox profile directory {env_var}={profile_dir}: {msg}\"", ".", "format", "(", "env_var", "=", "FIREFOX_PROFILE_ENV_VAR", ",", "profile_dir", "=", "profile_dir", ",", "msg", "=", "str", "(", "err", ")", ")", ")", "else", ":", "LOGGER", ".", "info", "(", "\"Using default firefox profile\"", ")", "firefox_profile", "=", "webdriver", ".", "FirefoxProfile", "(", ")", "# Bypasses the security prompt displayed by the browser when it attempts to", "# access a media device (e.g., a webcam)", "firefox_profile", ".", "set_preference", "(", "'media.navigator.permission.disabled'", ",", "True", ")", "# Disable the initial url fetch to 'learn more' from mozilla (so you don't have to", "# be online to run bok-choy on firefox)", "firefox_profile", ".", "set_preference", "(", "'browser.startup.homepage'", ",", "'about:blank'", ")", "firefox_profile", ".", "set_preference", "(", "'startup.homepage_welcome_url'", ",", "'about:blank'", ")", "firefox_profile", ".", "set_preference", "(", "'startup.homepage_welcome_url.additional'", ",", "'about:blank'", ")", "# Disable fetching an updated version of firefox", "firefox_profile", ".", "set_preference", "(", "'app.update.enabled'", ",", "False", ")", "# Disable plugin checking", "firefox_profile", ".", "set_preference", "(", "'plugins.hide_infobar_for_outdated_plugin'", ",", "True", ")", "# Disable health reporter", "firefox_profile", ".", "set_preference", "(", "'datareporting.healthreport.service.enabled'", ",", "False", ")", "# Disable all data upload (Telemetry and FHR)", "firefox_profile", ".", "set_preference", "(", "'datareporting.policy.dataSubmissionEnabled'", ",", "False", ")", "# Disable crash reporter", "firefox_profile", ".", "set_preference", "(", "'toolkit.crashreporter.enabled'", ",", "False", ")", "# Disable the JSON Viewer", "firefox_profile", ".", "set_preference", "(", "'devtools.jsonview.enabled'", ",", "False", ")", "# Grant OS focus to the launched browser so focus-related tests function correctly", "firefox_profile", ".", "set_preference", "(", "'focusmanager.testmode'", ",", "True", ")", "for", "function", "in", "FIREFOX_PROFILE_CUSTOMIZERS", ":", "function", "(", "firefox_profile", ")", "return", "firefox_profile" ]
Configure the Firefox profile, respecting FIREFOX_PROFILE_PATH if set
[ "Configure", "the", "Firefox", "profile", "respecting", "FIREFOX_PROFILE_PATH", "if", "set" ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L309-L367
train
16,982
edx/bok-choy
bok_choy/browser.py
_local_browser_class
def _local_browser_class(browser_name): """ Returns class, kwargs, and args needed to instantiate the local browser. """ # Log name of local browser LOGGER.info(u"Using local browser: %s [Default is firefox]", browser_name) # Get class of local browser based on name browser_class = BROWSERS.get(browser_name) headless = os.environ.get('BOKCHOY_HEADLESS', 'false').lower() == 'true' if browser_class is None: raise BrowserConfigError( u"Invalid browser name {name}. Options are: {options}".format( name=browser_name, options=", ".join(list(BROWSERS.keys())))) else: if browser_name == 'firefox': # Remove geckodriver log data from previous test cases log_path = os.path.join(os.getcwd(), 'geckodriver.log') if os.path.exists(log_path): os.remove(log_path) firefox_options = FirefoxOptions() firefox_options.log.level = 'trace' if headless: firefox_options.headless = True browser_args = [] browser_kwargs = { 'firefox_profile': _firefox_profile(), 'options': firefox_options, } firefox_path = os.environ.get('SELENIUM_FIREFOX_PATH') firefox_log = os.environ.get('SELENIUM_FIREFOX_LOG') if firefox_path and firefox_log: browser_kwargs.update({ 'firefox_binary': FirefoxBinary( firefox_path=firefox_path, log_file=firefox_log) }) elif firefox_path: browser_kwargs.update({ 'firefox_binary': FirefoxBinary(firefox_path=firefox_path) }) elif firefox_log: browser_kwargs.update({ 'firefox_binary': FirefoxBinary(log_file=firefox_log) }) elif browser_name == 'chrome': chrome_options = ChromeOptions() if headless: chrome_options.headless = True # Emulate webcam and microphone for testing purposes chrome_options.add_argument('--use-fake-device-for-media-stream') # Bypasses the security prompt displayed by the browser when it attempts to # access a media device (e.g., a webcam) chrome_options.add_argument('--use-fake-ui-for-media-stream') browser_args = [] browser_kwargs = { 'options': chrome_options, } else: browser_args, browser_kwargs = [], {} return browser_class, browser_args, browser_kwargs
python
def _local_browser_class(browser_name): """ Returns class, kwargs, and args needed to instantiate the local browser. """ # Log name of local browser LOGGER.info(u"Using local browser: %s [Default is firefox]", browser_name) # Get class of local browser based on name browser_class = BROWSERS.get(browser_name) headless = os.environ.get('BOKCHOY_HEADLESS', 'false').lower() == 'true' if browser_class is None: raise BrowserConfigError( u"Invalid browser name {name}. Options are: {options}".format( name=browser_name, options=", ".join(list(BROWSERS.keys())))) else: if browser_name == 'firefox': # Remove geckodriver log data from previous test cases log_path = os.path.join(os.getcwd(), 'geckodriver.log') if os.path.exists(log_path): os.remove(log_path) firefox_options = FirefoxOptions() firefox_options.log.level = 'trace' if headless: firefox_options.headless = True browser_args = [] browser_kwargs = { 'firefox_profile': _firefox_profile(), 'options': firefox_options, } firefox_path = os.environ.get('SELENIUM_FIREFOX_PATH') firefox_log = os.environ.get('SELENIUM_FIREFOX_LOG') if firefox_path and firefox_log: browser_kwargs.update({ 'firefox_binary': FirefoxBinary( firefox_path=firefox_path, log_file=firefox_log) }) elif firefox_path: browser_kwargs.update({ 'firefox_binary': FirefoxBinary(firefox_path=firefox_path) }) elif firefox_log: browser_kwargs.update({ 'firefox_binary': FirefoxBinary(log_file=firefox_log) }) elif browser_name == 'chrome': chrome_options = ChromeOptions() if headless: chrome_options.headless = True # Emulate webcam and microphone for testing purposes chrome_options.add_argument('--use-fake-device-for-media-stream') # Bypasses the security prompt displayed by the browser when it attempts to # access a media device (e.g., a webcam) chrome_options.add_argument('--use-fake-ui-for-media-stream') browser_args = [] browser_kwargs = { 'options': chrome_options, } else: browser_args, browser_kwargs = [], {} return browser_class, browser_args, browser_kwargs
[ "def", "_local_browser_class", "(", "browser_name", ")", ":", "# Log name of local browser", "LOGGER", ".", "info", "(", "u\"Using local browser: %s [Default is firefox]\"", ",", "browser_name", ")", "# Get class of local browser based on name", "browser_class", "=", "BROWSERS", ".", "get", "(", "browser_name", ")", "headless", "=", "os", ".", "environ", ".", "get", "(", "'BOKCHOY_HEADLESS'", ",", "'false'", ")", ".", "lower", "(", ")", "==", "'true'", "if", "browser_class", "is", "None", ":", "raise", "BrowserConfigError", "(", "u\"Invalid browser name {name}. Options are: {options}\"", ".", "format", "(", "name", "=", "browser_name", ",", "options", "=", "\", \"", ".", "join", "(", "list", "(", "BROWSERS", ".", "keys", "(", ")", ")", ")", ")", ")", "else", ":", "if", "browser_name", "==", "'firefox'", ":", "# Remove geckodriver log data from previous test cases", "log_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'geckodriver.log'", ")", "if", "os", ".", "path", ".", "exists", "(", "log_path", ")", ":", "os", ".", "remove", "(", "log_path", ")", "firefox_options", "=", "FirefoxOptions", "(", ")", "firefox_options", ".", "log", ".", "level", "=", "'trace'", "if", "headless", ":", "firefox_options", ".", "headless", "=", "True", "browser_args", "=", "[", "]", "browser_kwargs", "=", "{", "'firefox_profile'", ":", "_firefox_profile", "(", ")", ",", "'options'", ":", "firefox_options", ",", "}", "firefox_path", "=", "os", ".", "environ", ".", "get", "(", "'SELENIUM_FIREFOX_PATH'", ")", "firefox_log", "=", "os", ".", "environ", ".", "get", "(", "'SELENIUM_FIREFOX_LOG'", ")", "if", "firefox_path", "and", "firefox_log", ":", "browser_kwargs", ".", "update", "(", "{", "'firefox_binary'", ":", "FirefoxBinary", "(", "firefox_path", "=", "firefox_path", ",", "log_file", "=", "firefox_log", ")", "}", ")", "elif", "firefox_path", ":", "browser_kwargs", ".", "update", "(", "{", "'firefox_binary'", ":", "FirefoxBinary", "(", "firefox_path", "=", "firefox_path", ")", "}", ")", "elif", "firefox_log", ":", "browser_kwargs", ".", "update", "(", "{", "'firefox_binary'", ":", "FirefoxBinary", "(", "log_file", "=", "firefox_log", ")", "}", ")", "elif", "browser_name", "==", "'chrome'", ":", "chrome_options", "=", "ChromeOptions", "(", ")", "if", "headless", ":", "chrome_options", ".", "headless", "=", "True", "# Emulate webcam and microphone for testing purposes", "chrome_options", ".", "add_argument", "(", "'--use-fake-device-for-media-stream'", ")", "# Bypasses the security prompt displayed by the browser when it attempts to", "# access a media device (e.g., a webcam)", "chrome_options", ".", "add_argument", "(", "'--use-fake-ui-for-media-stream'", ")", "browser_args", "=", "[", "]", "browser_kwargs", "=", "{", "'options'", ":", "chrome_options", ",", "}", "else", ":", "browser_args", ",", "browser_kwargs", "=", "[", "]", ",", "{", "}", "return", "browser_class", ",", "browser_args", ",", "browser_kwargs" ]
Returns class, kwargs, and args needed to instantiate the local browser.
[ "Returns", "class", "kwargs", "and", "args", "needed", "to", "instantiate", "the", "local", "browser", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L370-L437
train
16,983
edx/bok-choy
bok_choy/browser.py
_remote_browser_class
def _remote_browser_class(env_vars, tags=None): """ Returns class, kwargs, and args needed to instantiate the remote browser. """ if tags is None: tags = [] # Interpret the environment variables, raising an exception if they're # invalid envs = _required_envs(env_vars) envs.update(_optional_envs()) # Turn the environment variables into a dictionary of desired capabilities caps = _capabilities_dict(envs, tags) if 'accessKey' in caps: LOGGER.info(u"Using SauceLabs: %s %s %s", caps['platform'], caps['browserName'], caps['version']) else: LOGGER.info(u"Using Remote Browser: %s", caps['browserName']) # Create and return a new Browser # We assume that the WebDriver end-point is running locally (e.g. using # SauceConnect) url = u"http://{0}:{1}/wd/hub".format( envs['SELENIUM_HOST'], envs['SELENIUM_PORT']) browser_args = [] browser_kwargs = { 'command_executor': url, 'desired_capabilities': caps, } if caps['browserName'] == 'firefox': browser_kwargs['browser_profile'] = _firefox_profile() return webdriver.Remote, browser_args, browser_kwargs
python
def _remote_browser_class(env_vars, tags=None): """ Returns class, kwargs, and args needed to instantiate the remote browser. """ if tags is None: tags = [] # Interpret the environment variables, raising an exception if they're # invalid envs = _required_envs(env_vars) envs.update(_optional_envs()) # Turn the environment variables into a dictionary of desired capabilities caps = _capabilities_dict(envs, tags) if 'accessKey' in caps: LOGGER.info(u"Using SauceLabs: %s %s %s", caps['platform'], caps['browserName'], caps['version']) else: LOGGER.info(u"Using Remote Browser: %s", caps['browserName']) # Create and return a new Browser # We assume that the WebDriver end-point is running locally (e.g. using # SauceConnect) url = u"http://{0}:{1}/wd/hub".format( envs['SELENIUM_HOST'], envs['SELENIUM_PORT']) browser_args = [] browser_kwargs = { 'command_executor': url, 'desired_capabilities': caps, } if caps['browserName'] == 'firefox': browser_kwargs['browser_profile'] = _firefox_profile() return webdriver.Remote, browser_args, browser_kwargs
[ "def", "_remote_browser_class", "(", "env_vars", ",", "tags", "=", "None", ")", ":", "if", "tags", "is", "None", ":", "tags", "=", "[", "]", "# Interpret the environment variables, raising an exception if they're", "# invalid", "envs", "=", "_required_envs", "(", "env_vars", ")", "envs", ".", "update", "(", "_optional_envs", "(", ")", ")", "# Turn the environment variables into a dictionary of desired capabilities", "caps", "=", "_capabilities_dict", "(", "envs", ",", "tags", ")", "if", "'accessKey'", "in", "caps", ":", "LOGGER", ".", "info", "(", "u\"Using SauceLabs: %s %s %s\"", ",", "caps", "[", "'platform'", "]", ",", "caps", "[", "'browserName'", "]", ",", "caps", "[", "'version'", "]", ")", "else", ":", "LOGGER", ".", "info", "(", "u\"Using Remote Browser: %s\"", ",", "caps", "[", "'browserName'", "]", ")", "# Create and return a new Browser", "# We assume that the WebDriver end-point is running locally (e.g. using", "# SauceConnect)", "url", "=", "u\"http://{0}:{1}/wd/hub\"", ".", "format", "(", "envs", "[", "'SELENIUM_HOST'", "]", ",", "envs", "[", "'SELENIUM_PORT'", "]", ")", "browser_args", "=", "[", "]", "browser_kwargs", "=", "{", "'command_executor'", ":", "url", ",", "'desired_capabilities'", ":", "caps", ",", "}", "if", "caps", "[", "'browserName'", "]", "==", "'firefox'", ":", "browser_kwargs", "[", "'browser_profile'", "]", "=", "_firefox_profile", "(", ")", "return", "webdriver", ".", "Remote", ",", "browser_args", ",", "browser_kwargs" ]
Returns class, kwargs, and args needed to instantiate the remote browser.
[ "Returns", "class", "kwargs", "and", "args", "needed", "to", "instantiate", "the", "remote", "browser", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L440-L474
train
16,984
edx/bok-choy
bok_choy/browser.py
_proxy_kwargs
def _proxy_kwargs(browser_name, proxy, browser_kwargs={}): # pylint: disable=dangerous-default-value """ Determines the kwargs needed to set up a proxy based on the browser type. Returns: a dictionary of arguments needed to pass when instantiating the WebDriver instance. """ proxy_dict = { "httpProxy": proxy.proxy, "proxyType": 'manual', } if browser_name == 'firefox' and 'desired_capabilities' not in browser_kwargs: # This one works for firefox locally wd_proxy = webdriver.common.proxy.Proxy(proxy_dict) browser_kwargs['proxy'] = wd_proxy else: # This one works with chrome, both locally and remote # This one works with firefox remote, but not locally if 'desired_capabilities' not in browser_kwargs: browser_kwargs['desired_capabilities'] = {} browser_kwargs['desired_capabilities']['proxy'] = proxy_dict return browser_kwargs
python
def _proxy_kwargs(browser_name, proxy, browser_kwargs={}): # pylint: disable=dangerous-default-value """ Determines the kwargs needed to set up a proxy based on the browser type. Returns: a dictionary of arguments needed to pass when instantiating the WebDriver instance. """ proxy_dict = { "httpProxy": proxy.proxy, "proxyType": 'manual', } if browser_name == 'firefox' and 'desired_capabilities' not in browser_kwargs: # This one works for firefox locally wd_proxy = webdriver.common.proxy.Proxy(proxy_dict) browser_kwargs['proxy'] = wd_proxy else: # This one works with chrome, both locally and remote # This one works with firefox remote, but not locally if 'desired_capabilities' not in browser_kwargs: browser_kwargs['desired_capabilities'] = {} browser_kwargs['desired_capabilities']['proxy'] = proxy_dict return browser_kwargs
[ "def", "_proxy_kwargs", "(", "browser_name", ",", "proxy", ",", "browser_kwargs", "=", "{", "}", ")", ":", "# pylint: disable=dangerous-default-value", "proxy_dict", "=", "{", "\"httpProxy\"", ":", "proxy", ".", "proxy", ",", "\"proxyType\"", ":", "'manual'", ",", "}", "if", "browser_name", "==", "'firefox'", "and", "'desired_capabilities'", "not", "in", "browser_kwargs", ":", "# This one works for firefox locally", "wd_proxy", "=", "webdriver", ".", "common", ".", "proxy", ".", "Proxy", "(", "proxy_dict", ")", "browser_kwargs", "[", "'proxy'", "]", "=", "wd_proxy", "else", ":", "# This one works with chrome, both locally and remote", "# This one works with firefox remote, but not locally", "if", "'desired_capabilities'", "not", "in", "browser_kwargs", ":", "browser_kwargs", "[", "'desired_capabilities'", "]", "=", "{", "}", "browser_kwargs", "[", "'desired_capabilities'", "]", "[", "'proxy'", "]", "=", "proxy_dict", "return", "browser_kwargs" ]
Determines the kwargs needed to set up a proxy based on the browser type. Returns: a dictionary of arguments needed to pass when instantiating the WebDriver instance.
[ "Determines", "the", "kwargs", "needed", "to", "set", "up", "a", "proxy", "based", "on", "the", "browser", "type", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L477-L503
train
16,985
edx/bok-choy
bok_choy/browser.py
_required_envs
def _required_envs(env_vars): """ Parse environment variables for required values, raising a `BrowserConfig` error if they are not found. Returns a `dict` of environment variables. """ envs = { key: os.environ.get(key) for key in env_vars } # Check for missing keys missing = [key for key, val in list(envs.items()) if val is None] if missing: msg = ( u"These environment variables must be set: " + u", ".join(missing) ) raise BrowserConfigError(msg) # Check that we support this browser if envs['SELENIUM_BROWSER'] not in BROWSERS: msg = u"Unsuppported browser: {0}".format(envs['SELENIUM_BROWSER']) raise BrowserConfigError(msg) return envs
python
def _required_envs(env_vars): """ Parse environment variables for required values, raising a `BrowserConfig` error if they are not found. Returns a `dict` of environment variables. """ envs = { key: os.environ.get(key) for key in env_vars } # Check for missing keys missing = [key for key, val in list(envs.items()) if val is None] if missing: msg = ( u"These environment variables must be set: " + u", ".join(missing) ) raise BrowserConfigError(msg) # Check that we support this browser if envs['SELENIUM_BROWSER'] not in BROWSERS: msg = u"Unsuppported browser: {0}".format(envs['SELENIUM_BROWSER']) raise BrowserConfigError(msg) return envs
[ "def", "_required_envs", "(", "env_vars", ")", ":", "envs", "=", "{", "key", ":", "os", ".", "environ", ".", "get", "(", "key", ")", "for", "key", "in", "env_vars", "}", "# Check for missing keys", "missing", "=", "[", "key", "for", "key", ",", "val", "in", "list", "(", "envs", ".", "items", "(", ")", ")", "if", "val", "is", "None", "]", "if", "missing", ":", "msg", "=", "(", "u\"These environment variables must be set: \"", "+", "u\", \"", ".", "join", "(", "missing", ")", ")", "raise", "BrowserConfigError", "(", "msg", ")", "# Check that we support this browser", "if", "envs", "[", "'SELENIUM_BROWSER'", "]", "not", "in", "BROWSERS", ":", "msg", "=", "u\"Unsuppported browser: {0}\"", ".", "format", "(", "envs", "[", "'SELENIUM_BROWSER'", "]", ")", "raise", "BrowserConfigError", "(", "msg", ")", "return", "envs" ]
Parse environment variables for required values, raising a `BrowserConfig` error if they are not found. Returns a `dict` of environment variables.
[ "Parse", "environment", "variables", "for", "required", "values", "raising", "a", "BrowserConfig", "error", "if", "they", "are", "not", "found", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L519-L544
train
16,986
edx/bok-choy
bok_choy/browser.py
_optional_envs
def _optional_envs(): """ Parse environment variables for optional values, raising a `BrowserConfig` error if they are insufficiently specified. Returns a `dict` of environment variables. """ envs = { key: os.environ.get(key) for key in OPTIONAL_ENV_VARS if key in os.environ } # If we're using Jenkins, check that we have all the required info if 'JOB_NAME' in envs and 'BUILD_NUMBER' not in envs: raise BrowserConfigError("Missing BUILD_NUMBER environment var") if 'BUILD_NUMBER' in envs and 'JOB_NAME' not in envs: raise BrowserConfigError("Missing JOB_NAME environment var") return envs
python
def _optional_envs(): """ Parse environment variables for optional values, raising a `BrowserConfig` error if they are insufficiently specified. Returns a `dict` of environment variables. """ envs = { key: os.environ.get(key) for key in OPTIONAL_ENV_VARS if key in os.environ } # If we're using Jenkins, check that we have all the required info if 'JOB_NAME' in envs and 'BUILD_NUMBER' not in envs: raise BrowserConfigError("Missing BUILD_NUMBER environment var") if 'BUILD_NUMBER' in envs and 'JOB_NAME' not in envs: raise BrowserConfigError("Missing JOB_NAME environment var") return envs
[ "def", "_optional_envs", "(", ")", ":", "envs", "=", "{", "key", ":", "os", ".", "environ", ".", "get", "(", "key", ")", "for", "key", "in", "OPTIONAL_ENV_VARS", "if", "key", "in", "os", ".", "environ", "}", "# If we're using Jenkins, check that we have all the required info", "if", "'JOB_NAME'", "in", "envs", "and", "'BUILD_NUMBER'", "not", "in", "envs", ":", "raise", "BrowserConfigError", "(", "\"Missing BUILD_NUMBER environment var\"", ")", "if", "'BUILD_NUMBER'", "in", "envs", "and", "'JOB_NAME'", "not", "in", "envs", ":", "raise", "BrowserConfigError", "(", "\"Missing JOB_NAME environment var\"", ")", "return", "envs" ]
Parse environment variables for optional values, raising a `BrowserConfig` error if they are insufficiently specified. Returns a `dict` of environment variables.
[ "Parse", "environment", "variables", "for", "optional", "values", "raising", "a", "BrowserConfig", "error", "if", "they", "are", "insufficiently", "specified", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L547-L567
train
16,987
edx/bok-choy
bok_choy/browser.py
_capabilities_dict
def _capabilities_dict(envs, tags): """ Convert the dictionary of environment variables to a dictionary of desired capabilities to send to the Remote WebDriver. `tags` is a list of string tags to apply to the SauceLabs job. """ capabilities = { 'browserName': envs['SELENIUM_BROWSER'], 'acceptInsecureCerts': bool(envs.get('SELENIUM_INSECURE_CERTS', False)), 'video-upload-on-pass': False, 'sauce-advisor': False, 'capture-html': True, 'record-screenshots': True, 'max-duration': 600, 'public': 'public restricted', 'tags': tags, } # Add SauceLabs specific environment vars if they are set. if _use_remote_browser(SAUCE_ENV_VARS): sauce_capabilities = { 'platform': envs['SELENIUM_PLATFORM'], 'version': envs['SELENIUM_VERSION'], 'username': envs['SAUCE_USER_NAME'], 'accessKey': envs['SAUCE_API_KEY'], } capabilities.update(sauce_capabilities) # Optional: Add in Jenkins-specific environment variables # to link Sauce output with the Jenkins job if 'JOB_NAME' in envs: jenkins_vars = { 'build': envs['BUILD_NUMBER'], 'name': envs['JOB_NAME'], } capabilities.update(jenkins_vars) return capabilities
python
def _capabilities_dict(envs, tags): """ Convert the dictionary of environment variables to a dictionary of desired capabilities to send to the Remote WebDriver. `tags` is a list of string tags to apply to the SauceLabs job. """ capabilities = { 'browserName': envs['SELENIUM_BROWSER'], 'acceptInsecureCerts': bool(envs.get('SELENIUM_INSECURE_CERTS', False)), 'video-upload-on-pass': False, 'sauce-advisor': False, 'capture-html': True, 'record-screenshots': True, 'max-duration': 600, 'public': 'public restricted', 'tags': tags, } # Add SauceLabs specific environment vars if they are set. if _use_remote_browser(SAUCE_ENV_VARS): sauce_capabilities = { 'platform': envs['SELENIUM_PLATFORM'], 'version': envs['SELENIUM_VERSION'], 'username': envs['SAUCE_USER_NAME'], 'accessKey': envs['SAUCE_API_KEY'], } capabilities.update(sauce_capabilities) # Optional: Add in Jenkins-specific environment variables # to link Sauce output with the Jenkins job if 'JOB_NAME' in envs: jenkins_vars = { 'build': envs['BUILD_NUMBER'], 'name': envs['JOB_NAME'], } capabilities.update(jenkins_vars) return capabilities
[ "def", "_capabilities_dict", "(", "envs", ",", "tags", ")", ":", "capabilities", "=", "{", "'browserName'", ":", "envs", "[", "'SELENIUM_BROWSER'", "]", ",", "'acceptInsecureCerts'", ":", "bool", "(", "envs", ".", "get", "(", "'SELENIUM_INSECURE_CERTS'", ",", "False", ")", ")", ",", "'video-upload-on-pass'", ":", "False", ",", "'sauce-advisor'", ":", "False", ",", "'capture-html'", ":", "True", ",", "'record-screenshots'", ":", "True", ",", "'max-duration'", ":", "600", ",", "'public'", ":", "'public restricted'", ",", "'tags'", ":", "tags", ",", "}", "# Add SauceLabs specific environment vars if they are set.", "if", "_use_remote_browser", "(", "SAUCE_ENV_VARS", ")", ":", "sauce_capabilities", "=", "{", "'platform'", ":", "envs", "[", "'SELENIUM_PLATFORM'", "]", ",", "'version'", ":", "envs", "[", "'SELENIUM_VERSION'", "]", ",", "'username'", ":", "envs", "[", "'SAUCE_USER_NAME'", "]", ",", "'accessKey'", ":", "envs", "[", "'SAUCE_API_KEY'", "]", ",", "}", "capabilities", ".", "update", "(", "sauce_capabilities", ")", "# Optional: Add in Jenkins-specific environment variables", "# to link Sauce output with the Jenkins job", "if", "'JOB_NAME'", "in", "envs", ":", "jenkins_vars", "=", "{", "'build'", ":", "envs", "[", "'BUILD_NUMBER'", "]", ",", "'name'", ":", "envs", "[", "'JOB_NAME'", "]", ",", "}", "capabilities", ".", "update", "(", "jenkins_vars", ")", "return", "capabilities" ]
Convert the dictionary of environment variables to a dictionary of desired capabilities to send to the Remote WebDriver. `tags` is a list of string tags to apply to the SauceLabs job.
[ "Convert", "the", "dictionary", "of", "environment", "variables", "to", "a", "dictionary", "of", "desired", "capabilities", "to", "send", "to", "the", "Remote", "WebDriver", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L570-L611
train
16,988
edx/bok-choy
bok_choy/query.py
Query.replace
def replace(self, **kwargs): """ Return a copy of this `Query`, but with attributes specified as keyword arguments replaced by the keyword values. Keyword Args: Attributes/values to replace in the copy. Returns: A copy of the query that has its attributes updated with the specified values. Raises: TypeError: The `Query` does not have the specified attribute. """ clone = copy(self) clone.transforms = list(clone.transforms) for key, value in kwargs.items(): if not hasattr(clone, key): raise TypeError(u'replace() got an unexpected keyword argument {!r}'.format(key)) setattr(clone, key, value) return clone
python
def replace(self, **kwargs): """ Return a copy of this `Query`, but with attributes specified as keyword arguments replaced by the keyword values. Keyword Args: Attributes/values to replace in the copy. Returns: A copy of the query that has its attributes updated with the specified values. Raises: TypeError: The `Query` does not have the specified attribute. """ clone = copy(self) clone.transforms = list(clone.transforms) for key, value in kwargs.items(): if not hasattr(clone, key): raise TypeError(u'replace() got an unexpected keyword argument {!r}'.format(key)) setattr(clone, key, value) return clone
[ "def", "replace", "(", "self", ",", "*", "*", "kwargs", ")", ":", "clone", "=", "copy", "(", "self", ")", "clone", ".", "transforms", "=", "list", "(", "clone", ".", "transforms", ")", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "not", "hasattr", "(", "clone", ",", "key", ")", ":", "raise", "TypeError", "(", "u'replace() got an unexpected keyword argument {!r}'", ".", "format", "(", "key", ")", ")", "setattr", "(", "clone", ",", "key", ",", "value", ")", "return", "clone" ]
Return a copy of this `Query`, but with attributes specified as keyword arguments replaced by the keyword values. Keyword Args: Attributes/values to replace in the copy. Returns: A copy of the query that has its attributes updated with the specified values. Raises: TypeError: The `Query` does not have the specified attribute.
[ "Return", "a", "copy", "of", "this", "Query", "but", "with", "attributes", "specified", "as", "keyword", "arguments", "replaced", "by", "the", "keyword", "values", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L81-L103
train
16,989
edx/bok-choy
bok_choy/query.py
Query.transform
def transform(self, transform, desc=None): """ Create a copy of this query, transformed by `transform`. Args: transform (callable): Callable that takes an iterable of values and returns an iterable of transformed values. Keyword Args: desc (str): A description of the transform, to use in log messages. Defaults to the name of the `transform` function. Returns: Query """ if desc is None: desc = u'transform({})'.format(getattr(transform, '__name__', '')) return self.replace( transforms=self.transforms + [transform], desc_stack=self.desc_stack + [desc] )
python
def transform(self, transform, desc=None): """ Create a copy of this query, transformed by `transform`. Args: transform (callable): Callable that takes an iterable of values and returns an iterable of transformed values. Keyword Args: desc (str): A description of the transform, to use in log messages. Defaults to the name of the `transform` function. Returns: Query """ if desc is None: desc = u'transform({})'.format(getattr(transform, '__name__', '')) return self.replace( transforms=self.transforms + [transform], desc_stack=self.desc_stack + [desc] )
[ "def", "transform", "(", "self", ",", "transform", ",", "desc", "=", "None", ")", ":", "if", "desc", "is", "None", ":", "desc", "=", "u'transform({})'", ".", "format", "(", "getattr", "(", "transform", ",", "'__name__'", ",", "''", ")", ")", "return", "self", ".", "replace", "(", "transforms", "=", "self", ".", "transforms", "+", "[", "transform", "]", ",", "desc_stack", "=", "self", ".", "desc_stack", "+", "[", "desc", "]", ")" ]
Create a copy of this query, transformed by `transform`. Args: transform (callable): Callable that takes an iterable of values and returns an iterable of transformed values. Keyword Args: desc (str): A description of the transform, to use in log messages. Defaults to the name of the `transform` function. Returns: Query
[ "Create", "a", "copy", "of", "this", "query", "transformed", "by", "transform", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L105-L126
train
16,990
edx/bok-choy
bok_choy/query.py
Query.map
def map(self, map_fn, desc=None): """ Return a copy of this query, with the values mapped through `map_fn`. Args: map_fn (callable): A callable that takes a single argument and returns a new value. Keyword Args: desc (str): A description of the mapping transform, for use in log message. Defaults to the name of the map function. Returns: Query """ if desc is None: desc = getattr(map_fn, '__name__', '') desc = u'map({})'.format(desc) return self.transform(lambda xs: (map_fn(x) for x in xs), desc=desc)
python
def map(self, map_fn, desc=None): """ Return a copy of this query, with the values mapped through `map_fn`. Args: map_fn (callable): A callable that takes a single argument and returns a new value. Keyword Args: desc (str): A description of the mapping transform, for use in log message. Defaults to the name of the map function. Returns: Query """ if desc is None: desc = getattr(map_fn, '__name__', '') desc = u'map({})'.format(desc) return self.transform(lambda xs: (map_fn(x) for x in xs), desc=desc)
[ "def", "map", "(", "self", ",", "map_fn", ",", "desc", "=", "None", ")", ":", "if", "desc", "is", "None", ":", "desc", "=", "getattr", "(", "map_fn", ",", "'__name__'", ",", "''", ")", "desc", "=", "u'map({})'", ".", "format", "(", "desc", ")", "return", "self", ".", "transform", "(", "lambda", "xs", ":", "(", "map_fn", "(", "x", ")", "for", "x", "in", "xs", ")", ",", "desc", "=", "desc", ")" ]
Return a copy of this query, with the values mapped through `map_fn`. Args: map_fn (callable): A callable that takes a single argument and returns a new value. Keyword Args: desc (str): A description of the mapping transform, for use in log message. Defaults to the name of the map function. Returns: Query
[ "Return", "a", "copy", "of", "this", "query", "with", "the", "values", "mapped", "through", "map_fn", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L128-L146
train
16,991
edx/bok-choy
bok_choy/query.py
Query.filter
def filter(self, filter_fn=None, desc=None, **kwargs): """ Return a copy of this query, with some values removed. Example usages: .. code:: python # Returns a query that matches even numbers q.filter(filter_fn=lambda x: x % 2) # Returns a query that matches elements with el.description == "foo" q.filter(description="foo") Keyword Args: filter_fn (callable): If specified, a function that accepts one argument (the element) and returns a boolean indicating whether to include that element in the results. kwargs: Specify attribute values that an element must have to be included in the results. desc (str): A description of the filter, for use in log messages. Defaults to the name of the filter function or attribute. Raises: TypeError: neither or both of `filter_fn` and `kwargs` are provided. """ if filter_fn is not None and kwargs: raise TypeError('Must supply either a filter_fn or attribute filter parameters to filter(), but not both.') if filter_fn is None and not kwargs: raise TypeError('Must supply one of filter_fn or one or more attribute filter parameters to filter().') if desc is None: if filter_fn is not None: desc = getattr(filter_fn, '__name__', '') elif kwargs: desc = u", ".join([u"{}={!r}".format(key, value) for key, value in kwargs.items()]) desc = u"filter({})".format(desc) if kwargs: def filter_fn(elem): # pylint: disable=function-redefined, missing-docstring return all( getattr(elem, filter_key) == filter_value for filter_key, filter_value in kwargs.items() ) return self.transform(lambda xs: (x for x in xs if filter_fn(x)), desc=desc)
python
def filter(self, filter_fn=None, desc=None, **kwargs): """ Return a copy of this query, with some values removed. Example usages: .. code:: python # Returns a query that matches even numbers q.filter(filter_fn=lambda x: x % 2) # Returns a query that matches elements with el.description == "foo" q.filter(description="foo") Keyword Args: filter_fn (callable): If specified, a function that accepts one argument (the element) and returns a boolean indicating whether to include that element in the results. kwargs: Specify attribute values that an element must have to be included in the results. desc (str): A description of the filter, for use in log messages. Defaults to the name of the filter function or attribute. Raises: TypeError: neither or both of `filter_fn` and `kwargs` are provided. """ if filter_fn is not None and kwargs: raise TypeError('Must supply either a filter_fn or attribute filter parameters to filter(), but not both.') if filter_fn is None and not kwargs: raise TypeError('Must supply one of filter_fn or one or more attribute filter parameters to filter().') if desc is None: if filter_fn is not None: desc = getattr(filter_fn, '__name__', '') elif kwargs: desc = u", ".join([u"{}={!r}".format(key, value) for key, value in kwargs.items()]) desc = u"filter({})".format(desc) if kwargs: def filter_fn(elem): # pylint: disable=function-redefined, missing-docstring return all( getattr(elem, filter_key) == filter_value for filter_key, filter_value in kwargs.items() ) return self.transform(lambda xs: (x for x in xs if filter_fn(x)), desc=desc)
[ "def", "filter", "(", "self", ",", "filter_fn", "=", "None", ",", "desc", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "filter_fn", "is", "not", "None", "and", "kwargs", ":", "raise", "TypeError", "(", "'Must supply either a filter_fn or attribute filter parameters to filter(), but not both.'", ")", "if", "filter_fn", "is", "None", "and", "not", "kwargs", ":", "raise", "TypeError", "(", "'Must supply one of filter_fn or one or more attribute filter parameters to filter().'", ")", "if", "desc", "is", "None", ":", "if", "filter_fn", "is", "not", "None", ":", "desc", "=", "getattr", "(", "filter_fn", ",", "'__name__'", ",", "''", ")", "elif", "kwargs", ":", "desc", "=", "u\", \"", ".", "join", "(", "[", "u\"{}={!r}\"", ".", "format", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", "]", ")", "desc", "=", "u\"filter({})\"", ".", "format", "(", "desc", ")", "if", "kwargs", ":", "def", "filter_fn", "(", "elem", ")", ":", "# pylint: disable=function-redefined, missing-docstring", "return", "all", "(", "getattr", "(", "elem", ",", "filter_key", ")", "==", "filter_value", "for", "filter_key", ",", "filter_value", "in", "kwargs", ".", "items", "(", ")", ")", "return", "self", ".", "transform", "(", "lambda", "xs", ":", "(", "x", "for", "x", "in", "xs", "if", "filter_fn", "(", "x", ")", ")", ",", "desc", "=", "desc", ")" ]
Return a copy of this query, with some values removed. Example usages: .. code:: python # Returns a query that matches even numbers q.filter(filter_fn=lambda x: x % 2) # Returns a query that matches elements with el.description == "foo" q.filter(description="foo") Keyword Args: filter_fn (callable): If specified, a function that accepts one argument (the element) and returns a boolean indicating whether to include that element in the results. kwargs: Specify attribute values that an element must have to be included in the results. desc (str): A description of the filter, for use in log messages. Defaults to the name of the filter function or attribute. Raises: TypeError: neither or both of `filter_fn` and `kwargs` are provided.
[ "Return", "a", "copy", "of", "this", "query", "with", "some", "values", "removed", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L148-L194
train
16,992
edx/bok-choy
bok_choy/query.py
Query._execute
def _execute(self): """ Run the query, generating data from the `seed_fn` and performing transforms on the results. """ data = self.seed_fn() for transform in self.transforms: data = transform(data) return list(data)
python
def _execute(self): """ Run the query, generating data from the `seed_fn` and performing transforms on the results. """ data = self.seed_fn() for transform in self.transforms: data = transform(data) return list(data)
[ "def", "_execute", "(", "self", ")", ":", "data", "=", "self", ".", "seed_fn", "(", ")", "for", "transform", "in", "self", ".", "transforms", ":", "data", "=", "transform", "(", "data", ")", "return", "list", "(", "data", ")" ]
Run the query, generating data from the `seed_fn` and performing transforms on the results.
[ "Run", "the", "query", "generating", "data", "from", "the", "seed_fn", "and", "performing", "transforms", "on", "the", "results", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L196-L203
train
16,993
edx/bok-choy
bok_choy/query.py
Query.execute
def execute(self, try_limit=5, try_interval=0.5, timeout=30): """ Execute this query, retrying based on the supplied parameters. Keyword Args: try_limit (int): The number of times to retry the query. try_interval (float): The number of seconds to wait between each try (float). timeout (float): The maximum number of seconds to spend retrying (float). Returns: The transformed results of the query. Raises: BrokenPromise: The query did not execute without a Selenium error after one or more attempts. """ return Promise( no_error(self._execute), u"Executing {!r}".format(self), try_limit=try_limit, try_interval=try_interval, timeout=timeout, ).fulfill()
python
def execute(self, try_limit=5, try_interval=0.5, timeout=30): """ Execute this query, retrying based on the supplied parameters. Keyword Args: try_limit (int): The number of times to retry the query. try_interval (float): The number of seconds to wait between each try (float). timeout (float): The maximum number of seconds to spend retrying (float). Returns: The transformed results of the query. Raises: BrokenPromise: The query did not execute without a Selenium error after one or more attempts. """ return Promise( no_error(self._execute), u"Executing {!r}".format(self), try_limit=try_limit, try_interval=try_interval, timeout=timeout, ).fulfill()
[ "def", "execute", "(", "self", ",", "try_limit", "=", "5", ",", "try_interval", "=", "0.5", ",", "timeout", "=", "30", ")", ":", "return", "Promise", "(", "no_error", "(", "self", ".", "_execute", ")", ",", "u\"Executing {!r}\"", ".", "format", "(", "self", ")", ",", "try_limit", "=", "try_limit", ",", "try_interval", "=", "try_interval", ",", "timeout", "=", "timeout", ",", ")", ".", "fulfill", "(", ")" ]
Execute this query, retrying based on the supplied parameters. Keyword Args: try_limit (int): The number of times to retry the query. try_interval (float): The number of seconds to wait between each try (float). timeout (float): The maximum number of seconds to spend retrying (float). Returns: The transformed results of the query. Raises: BrokenPromise: The query did not execute without a Selenium error after one or more attempts.
[ "Execute", "this", "query", "retrying", "based", "on", "the", "supplied", "parameters", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L205-L226
train
16,994
edx/bok-choy
bok_choy/query.py
Query.first
def first(self): """ Return a Query that selects only the first element of this Query. If no elements are available, returns a query with no results. Example usage: .. code:: python >> q = Query(lambda: list(range(5))) >> q.first.results [0] Returns: Query """ def _transform(xs): # pylint: disable=missing-docstring, invalid-name try: return [six.next(iter(xs))] except StopIteration: return [] return self.transform(_transform, 'first')
python
def first(self): """ Return a Query that selects only the first element of this Query. If no elements are available, returns a query with no results. Example usage: .. code:: python >> q = Query(lambda: list(range(5))) >> q.first.results [0] Returns: Query """ def _transform(xs): # pylint: disable=missing-docstring, invalid-name try: return [six.next(iter(xs))] except StopIteration: return [] return self.transform(_transform, 'first')
[ "def", "first", "(", "self", ")", ":", "def", "_transform", "(", "xs", ")", ":", "# pylint: disable=missing-docstring, invalid-name", "try", ":", "return", "[", "six", ".", "next", "(", "iter", "(", "xs", ")", ")", "]", "except", "StopIteration", ":", "return", "[", "]", "return", "self", ".", "transform", "(", "_transform", ",", "'first'", ")" ]
Return a Query that selects only the first element of this Query. If no elements are available, returns a query with no results. Example usage: .. code:: python >> q = Query(lambda: list(range(5))) >> q.first.results [0] Returns: Query
[ "Return", "a", "Query", "that", "selects", "only", "the", "first", "element", "of", "this", "Query", ".", "If", "no", "elements", "are", "available", "returns", "a", "query", "with", "no", "results", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L258-L280
train
16,995
edx/bok-choy
bok_choy/query.py
BrowserQuery.attrs
def attrs(self, attribute_name): """ Retrieve HTML attribute values from the elements matched by the query. Example usage: .. code:: python # Assume that the query matches html elements: # <div class="foo"> and <div class="bar"> >> q.attrs('class') ['foo', 'bar'] Args: attribute_name (str): The name of the attribute values to retrieve. Returns: A list of attribute values for `attribute_name`. """ desc = u'attrs({!r})'.format(attribute_name) return self.map(lambda el: el.get_attribute(attribute_name), desc).results
python
def attrs(self, attribute_name): """ Retrieve HTML attribute values from the elements matched by the query. Example usage: .. code:: python # Assume that the query matches html elements: # <div class="foo"> and <div class="bar"> >> q.attrs('class') ['foo', 'bar'] Args: attribute_name (str): The name of the attribute values to retrieve. Returns: A list of attribute values for `attribute_name`. """ desc = u'attrs({!r})'.format(attribute_name) return self.map(lambda el: el.get_attribute(attribute_name), desc).results
[ "def", "attrs", "(", "self", ",", "attribute_name", ")", ":", "desc", "=", "u'attrs({!r})'", ".", "format", "(", "attribute_name", ")", "return", "self", ".", "map", "(", "lambda", "el", ":", "el", ".", "get_attribute", "(", "attribute_name", ")", ",", "desc", ")", ".", "results" ]
Retrieve HTML attribute values from the elements matched by the query. Example usage: .. code:: python # Assume that the query matches html elements: # <div class="foo"> and <div class="bar"> >> q.attrs('class') ['foo', 'bar'] Args: attribute_name (str): The name of the attribute values to retrieve. Returns: A list of attribute values for `attribute_name`.
[ "Retrieve", "HTML", "attribute", "values", "from", "the", "elements", "matched", "by", "the", "query", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L356-L376
train
16,996
edx/bok-choy
bok_choy/query.py
BrowserQuery.selected
def selected(self): """ Check whether all the matched elements are selected. Returns: bool """ query_results = self.map(lambda el: el.is_selected(), 'selected').results if query_results: return all(query_results) return False
python
def selected(self): """ Check whether all the matched elements are selected. Returns: bool """ query_results = self.map(lambda el: el.is_selected(), 'selected').results if query_results: return all(query_results) return False
[ "def", "selected", "(", "self", ")", ":", "query_results", "=", "self", ".", "map", "(", "lambda", "el", ":", "el", ".", "is_selected", "(", ")", ",", "'selected'", ")", ".", "results", "if", "query_results", ":", "return", "all", "(", "query_results", ")", "return", "False" ]
Check whether all the matched elements are selected. Returns: bool
[ "Check", "whether", "all", "the", "matched", "elements", "are", "selected", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L417-L427
train
16,997
edx/bok-choy
bok_choy/query.py
BrowserQuery.visible
def visible(self): """ Check whether all matched elements are visible. Returns: bool """ query_results = self.map(lambda el: el.is_displayed(), 'visible').results if query_results: return all(query_results) return False
python
def visible(self): """ Check whether all matched elements are visible. Returns: bool """ query_results = self.map(lambda el: el.is_displayed(), 'visible').results if query_results: return all(query_results) return False
[ "def", "visible", "(", "self", ")", ":", "query_results", "=", "self", ".", "map", "(", "lambda", "el", ":", "el", ".", "is_displayed", "(", ")", ",", "'visible'", ")", ".", "results", "if", "query_results", ":", "return", "all", "(", "query_results", ")", "return", "False" ]
Check whether all matched elements are visible. Returns: bool
[ "Check", "whether", "all", "matched", "elements", "are", "visible", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L430-L440
train
16,998
edx/bok-choy
bok_choy/query.py
BrowserQuery.fill
def fill(self, text): """ Set the text value of each matched element to `text`. Example usage: .. code:: python # Set the text of the first element matched by the query to "Foo" q.first.fill('Foo') Args: text (str): The text used to fill the element (usually a text field or text area). Returns: None """ def _fill(elem): # pylint: disable=missing-docstring elem.clear() elem.send_keys(text) self.map(_fill, u'fill({!r})'.format(text)).execute()
python
def fill(self, text): """ Set the text value of each matched element to `text`. Example usage: .. code:: python # Set the text of the first element matched by the query to "Foo" q.first.fill('Foo') Args: text (str): The text used to fill the element (usually a text field or text area). Returns: None """ def _fill(elem): # pylint: disable=missing-docstring elem.clear() elem.send_keys(text) self.map(_fill, u'fill({!r})'.format(text)).execute()
[ "def", "fill", "(", "self", ",", "text", ")", ":", "def", "_fill", "(", "elem", ")", ":", "# pylint: disable=missing-docstring", "elem", ".", "clear", "(", ")", "elem", ".", "send_keys", "(", "text", ")", "self", ".", "map", "(", "_fill", ",", "u'fill({!r})'", ".", "format", "(", "text", ")", ")", ".", "execute", "(", ")" ]
Set the text value of each matched element to `text`. Example usage: .. code:: python # Set the text of the first element matched by the query to "Foo" q.first.fill('Foo') Args: text (str): The text used to fill the element (usually a text field or text area). Returns: None
[ "Set", "the", "text", "value", "of", "each", "matched", "element", "to", "text", "." ]
cdd0d423419fc0c49d56a9226533aa1490b60afc
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L486-L507
train
16,999