query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
you wont believe what this method does
def count_lines_in_file ( self , fname = '' ) : i = 0 if fname == '' : fname = self . fullname try : #with open(fname, encoding="utf8") as f: with codecs . open ( fname , "r" , encoding = 'utf8' , errors = 'ignore' ) as f : for i , _ in enumerate ( f ) : pass return i + 1 except Exception as ex : print ( 'cant count lines in file in "' , fname , '":' , str ( ex ) ) return 0
2,600
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L109-L122
[ "def", "load_from_package", "(", ")", ":", "try", ":", "import", "pkg_resources", "f", "=", "pkg_resources", ".", "resource_stream", "(", "meta", ".", "__app__", ",", "'cache/unicategories.cache'", ")", "dversion", ",", "mversion", ",", "data", "=", "pickle", ".", "load", "(", "f", ")", "if", "dversion", "==", "data_version", "and", "mversion", "==", "module_version", ":", "return", "data", "warnings", ".", "warn", "(", "'Unicode unicategories database is outdated. '", "'Please reinstall unicategories module to regenerate it.'", "if", "dversion", "<", "data_version", "else", "'Incompatible unicategories database. '", "'Please reinstall unicategories module to regenerate it.'", ")", "except", "(", "ValueError", ",", "EOFError", ")", ":", "warnings", ".", "warn", "(", "'Incompatible unicategories database. '", "'Please reinstall unicategories module to regenerate it.'", ")", "except", "(", "ImportError", ",", "FileNotFoundError", ")", ":", "pass" ]
counts non blank lines
def count_lines_of_code ( self , fname = '' ) : if fname == '' : fname = self . fullname loc = 0 try : with open ( fname ) as f : for l in f : if l . strip ( ) != '' : loc += 1 return loc except Exception as ex : print ( 'cant count lines of code in "' , fname , '":' , str ( ex ) ) return 0
2,601
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L124-L137
[ "def", "transform_data", "(", "self", ",", "data", ")", ":", "def", "type_check", "(", "value", ")", ":", "'''Type check values for JSON serialization. Native Python JSON\n serialization will not recognize some Numpy data types properly,\n so they must be explictly converted.'''", "if", "pd", ".", "isnull", "(", "value", ")", ":", "return", "None", "elif", "(", "isinstance", "(", "value", ",", "pd", ".", "tslib", ".", "Timestamp", ")", "or", "isinstance", "(", "value", ",", "pd", ".", "Period", ")", ")", ":", "return", "time", ".", "mktime", "(", "value", ".", "timetuple", "(", ")", ")", "elif", "isinstance", "(", "value", ",", "(", "int", ",", "np", ".", "integer", ")", ")", ":", "return", "int", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "(", "float", ",", "np", ".", "float_", ")", ")", ":", "return", "float", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "str", ")", ":", "return", "str", "(", "value", ")", "else", ":", "return", "value", "objectify", "=", "lambda", "dat", ":", "[", "{", "\"x\"", ":", "type_check", "(", "x", ")", ",", "\"y\"", ":", "type_check", "(", "y", ")", "}", "for", "x", ",", "y", "in", "dat", ".", "iteritems", "(", ")", "]", "self", ".", "raw_data", "=", "data", "if", "isinstance", "(", "data", ",", "pd", ".", "Series", ")", ":", "data", ".", "name", "=", "data", ".", "name", "or", "'data'", "self", ".", "json_data", "=", "[", "{", "'name'", ":", "data", ".", "name", ",", "'data'", ":", "objectify", "(", "data", ")", "}", "]", "elif", "isinstance", "(", "data", ",", "pd", ".", "DataFrame", ")", ":", "self", ".", "json_data", "=", "[", "{", "'name'", ":", "x", "[", "0", "]", ",", "'data'", ":", "objectify", "(", "x", "[", "1", "]", ")", "}", "for", "x", "in", "data", ".", "iteritems", "(", ")", "]" ]
retrieve a sample of the file
def get_file_sample ( self , numLines = 10 ) : res = '' try : with open ( self . fullname , 'r' ) as f : for line_num , line in enumerate ( f ) : res += str ( line_num ) . zfill ( 5 ) + ' ' + line if line_num >= numLines - 1 : break return res except Exception as ex : print ( 'cant get_file_sample in "' , self . fullname , '":' , str ( ex ) ) return res
2,602
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L140-L152
[ "def", "mangle_volume", "(", "citation_elements", ")", ":", "volume_re", "=", "re", ".", "compile", "(", "ur\"(\\d+)([A-Z])\"", ",", "re", ".", "U", "|", "re", ".", "I", ")", "for", "el", "in", "citation_elements", ":", "if", "el", "[", "'type'", "]", "==", "'JOURNAL'", ":", "matches", "=", "volume_re", ".", "match", "(", "el", "[", "'volume'", "]", ")", "if", "matches", ":", "el", "[", "'volume'", "]", "=", "matches", ".", "group", "(", "2", ")", "+", "matches", ".", "group", "(", "1", ")", "return", "citation_elements" ]
adds a line of text to a file
def append_text ( self , txt ) : with open ( self . fullname , "a" ) as myfile : myfile . write ( txt )
2,603
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L155-L158
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
load a file to a string
def load_file_to_string ( self ) : try : with open ( self . fullname , 'r' ) as f : txt = f . read ( ) return txt except IOError : return ''
2,604
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L172-L179
[ "def", "get_user_last_submissions", "(", "self", ",", "limit", "=", "5", ",", "request", "=", "None", ")", ":", "if", "request", "is", "None", ":", "request", "=", "{", "}", "request", ".", "update", "(", "{", "\"username\"", ":", "self", ".", "_user_manager", ".", "session_username", "(", ")", "}", ")", "# Before, submissions were first sorted by submission date, then grouped", "# and then resorted by submission date before limiting. Actually, grouping", "# and pushing, keeping the max date, followed by result filtering is much more", "# efficient", "data", "=", "self", ".", "_database", ".", "submissions", ".", "aggregate", "(", "[", "{", "\"$match\"", ":", "request", "}", ",", "{", "\"$group\"", ":", "{", "\"_id\"", ":", "{", "\"courseid\"", ":", "\"$courseid\"", ",", "\"taskid\"", ":", "\"$taskid\"", "}", ",", "\"submitted_on\"", ":", "{", "\"$max\"", ":", "\"$submitted_on\"", "}", ",", "\"submissions\"", ":", "{", "\"$push\"", ":", "{", "\"_id\"", ":", "\"$_id\"", ",", "\"result\"", ":", "\"$result\"", ",", "\"status\"", ":", "\"$status\"", ",", "\"courseid\"", ":", "\"$courseid\"", ",", "\"taskid\"", ":", "\"$taskid\"", ",", "\"submitted_on\"", ":", "\"$submitted_on\"", "}", "}", ",", "}", "}", ",", "{", "\"$project\"", ":", "{", "\"submitted_on\"", ":", "1", ",", "\"submissions\"", ":", "{", "# This could be replaced by $filter if mongo v3.2 is set as dependency", "\"$setDifference\"", ":", "[", "{", "\"$map\"", ":", "{", "\"input\"", ":", "\"$submissions\"", ",", "\"as\"", ":", "\"submission\"", ",", "\"in\"", ":", "{", "\"$cond\"", ":", "[", "{", "\"$eq\"", ":", "[", "\"$submitted_on\"", ",", "\"$$submission.submitted_on\"", "]", "}", ",", "\"$$submission\"", ",", "False", "]", "}", "}", "}", ",", "[", "False", "]", "]", "}", "}", "}", ",", "{", "\"$sort\"", ":", "{", "\"submitted_on\"", ":", "pymongo", ".", "DESCENDING", "}", "}", ",", "{", "\"$limit\"", ":", "limit", "}", "]", ")", "return", "[", "item", "[", "\"submissions\"", "]", "[", "0", "]", "for", "item", "in", "data", "]" ]
load a file to a list
def load_file_to_list ( self ) : lst = [ ] try : with open ( self . fullname , 'r' ) as f : for line in f : lst . append ( line ) return lst except IOError : return lst
2,605
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L181-L190
[ "def", "_validate_checksum", "(", "self", ",", "buffer", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"Validating the buffer\"", ")", "if", "len", "(", "buffer", ")", "==", "0", ":", "self", ".", "_log", ".", "debug", "(", "\"Buffer was empty\"", ")", "if", "self", ".", "_conn", ".", "isOpen", "(", ")", ":", "self", ".", "_log", ".", "debug", "(", "'Closing connection'", ")", "self", ".", "_conn", ".", "close", "(", ")", "return", "False", "p0", "=", "hex2int", "(", "buffer", "[", "0", "]", ")", "p1", "=", "hex2int", "(", "buffer", "[", "1", "]", ")", "checksum", "=", "sum", "(", "[", "hex2int", "(", "c", ")", "for", "c", "in", "buffer", "[", ":", "35", "]", "]", ")", "&", "0xFF", "p35", "=", "hex2int", "(", "buffer", "[", "35", "]", ")", "if", "p0", "!=", "165", "or", "p1", "!=", "150", "or", "p35", "!=", "checksum", ":", "self", ".", "_log", ".", "debug", "(", "\"Buffer checksum was not valid\"", ")", "return", "False", "return", "True" ]
get a HTML formatted view of all Python programs in all subfolders of AIKIF including imports and lists of functions and classes
def get_program_list ( ) : colList = [ 'FileName' , 'FileSize' , 'Functions' , 'Imports' ] txt = '<TABLE width=90% border=0>' txt += format_file_table_header ( colList ) fl = web . GetFileList ( aikif_folder , [ '*.py' ] , 'N' ) for f in fl : if '__init__.py' in f : txt += '<TR><TD colspan=4><HR><H3>' + get_subfolder ( f ) + '</h3></td></tr>\n' else : txt += format_file_to_html_row ( f , colList ) txt += '</TABLE>\n\n' return txt
2,606
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_programs.py#L32-L49
[ "def", "loadJSON", "(", "self", ",", "jdata", ")", ":", "super", "(", "StringColumn", ",", "self", ")", ".", "loadJSON", "(", "jdata", ")", "# load additional info", "self", ".", "__maxLength", "=", "jdata", ".", "get", "(", "'maxLength'", ")", "or", "self", ".", "__maxLength" ]
extracts a displayable subfolder name from full filename
def get_subfolder ( txt ) : root_folder = os . sep + 'aikif' + os . sep ndx = txt . find ( root_folder , 1 ) return txt [ ndx : ] . replace ( '__init__.py' , '' )
2,607
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_programs.py#L51-L57
[ "def", "count_signatures", "(", "self", ",", "conditions", "=", "{", "}", ")", ":", "url", "=", "self", ".", "SIGNS_COUNT_URL", "+", "'?'", "for", "key", ",", "value", "in", "conditions", ".", "items", "(", ")", ":", "if", "key", "is", "'ids'", ":", "value", "=", "\",\"", ".", "join", "(", "value", ")", "url", "+=", "'&%s=%s'", "%", "(", "key", ",", "value", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
get a list of functions from a Python program
def get_functions ( fname ) : txt = '' with open ( fname , 'r' ) as f : for line in f : if line . strip ( ) [ 0 : 4 ] == 'def ' : txt += '<PRE>' + strip_text_after_string ( strip_text_after_string ( line , '#' ) [ 4 : ] , ':' ) + '</PRE>\n' if line [ 0 : 5 ] == 'class' : txt += '<PRE>' + strip_text_after_string ( strip_text_after_string ( line , '#' ) , ':' ) + '</PRE>\n' return txt + '<BR>'
2,608
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_programs.py#L95-L104
[ "def", "get_partition_scores", "(", "image", ",", "min_w", "=", "1", ",", "min_h", "=", "1", ")", ":", "h", ",", "w", "=", "image", ".", "shape", "[", ":", "2", "]", "if", "w", "==", "0", "or", "h", "==", "0", ":", "return", "[", "]", "area", "=", "h", "*", "w", "cnz", "=", "numpy", ".", "count_nonzero", "total", "=", "cnz", "(", "image", ")", "if", "total", "==", "0", "or", "area", "==", "total", ":", "return", "[", "]", "if", "h", "<", "min_h", "*", "2", ":", "y_c", "=", "[", "]", "else", ":", "y_c", "=", "[", "(", "-", "abs", "(", "(", "count", "/", "(", "(", "h", "-", "y", ")", "*", "w", ")", ")", "-", "(", "(", "total", "-", "count", ")", "/", "(", "y", "*", "w", ")", ")", ")", ",", "y", ",", "0", ")", "for", "count", ",", "y", "in", "(", "(", "cnz", "(", "image", "[", "y", ":", "]", ")", ",", "y", ")", "for", "y", "in", "range", "(", "min_h", ",", "image", ".", "shape", "[", "0", "]", "-", "min_h", ")", ")", "]", "if", "w", "<", "min_w", "*", "2", ":", "x_c", "=", "[", "]", "else", ":", "x_c", "=", "[", "(", "-", "abs", "(", "(", "count", "/", "(", "h", "*", "(", "w", "-", "x", ")", ")", ")", "-", "(", "(", "total", "-", "count", ")", "/", "(", "h", "*", "x", ")", ")", ")", ",", "x", ",", "1", ")", "for", "count", ",", "x", "in", "(", "(", "cnz", "(", "image", "[", ":", ",", "x", ":", "]", ")", ",", "x", ")", "for", "x", "in", "range", "(", "min_w", ",", "image", ".", "shape", "[", "1", "]", "-", "min_w", ")", ")", "]", "return", "sorted", "(", "x_c", "+", "y_c", ")" ]
used to strip any poorly documented comments at the end of function defs
def strip_text_after_string ( txt , junk ) : if junk in txt : return txt [ : txt . find ( junk ) ] else : return txt
2,609
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_programs.py#L106-L111
[ "def", "format_log_context", "(", "msg", ",", "connection", "=", "None", ",", "keyspace", "=", "None", ")", ":", "connection_info", "=", "connection", "or", "'DEFAULT_CONNECTION'", "if", "keyspace", ":", "msg", "=", "'[Connection: {0}, Keyspace: {1}] {2}'", ".", "format", "(", "connection_info", ",", "keyspace", ",", "msg", ")", "else", ":", "msg", "=", "'[Connection: {0}] {1}'", ".", "format", "(", "connection_info", ",", "msg", ")", "return", "msg" ]
get a list of imports from a Python program
def get_imports ( fname ) : txt = '' with open ( fname , 'r' ) as f : for line in f : if line [ 0 : 6 ] == 'import' : txt += '<PRE>' + strip_text_after_string ( line [ 7 : ] , ' as ' ) + '</PRE>\n' return txt + '<BR>'
2,610
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_programs.py#L113-L120
[ "def", "get_partition_scores", "(", "image", ",", "min_w", "=", "1", ",", "min_h", "=", "1", ")", ":", "h", ",", "w", "=", "image", ".", "shape", "[", ":", "2", "]", "if", "w", "==", "0", "or", "h", "==", "0", ":", "return", "[", "]", "area", "=", "h", "*", "w", "cnz", "=", "numpy", ".", "count_nonzero", "total", "=", "cnz", "(", "image", ")", "if", "total", "==", "0", "or", "area", "==", "total", ":", "return", "[", "]", "if", "h", "<", "min_h", "*", "2", ":", "y_c", "=", "[", "]", "else", ":", "y_c", "=", "[", "(", "-", "abs", "(", "(", "count", "/", "(", "(", "h", "-", "y", ")", "*", "w", ")", ")", "-", "(", "(", "total", "-", "count", ")", "/", "(", "y", "*", "w", ")", ")", ")", ",", "y", ",", "0", ")", "for", "count", ",", "y", "in", "(", "(", "cnz", "(", "image", "[", "y", ":", "]", ")", ",", "y", ")", "for", "y", "in", "range", "(", "min_h", ",", "image", ".", "shape", "[", "0", "]", "-", "min_h", ")", ")", "]", "if", "w", "<", "min_w", "*", "2", ":", "x_c", "=", "[", "]", "else", ":", "x_c", "=", "[", "(", "-", "abs", "(", "(", "count", "/", "(", "h", "*", "(", "w", "-", "x", ")", ")", ")", "-", "(", "(", "total", "-", "count", ")", "/", "(", "h", "*", "x", ")", ")", ")", ",", "x", ",", "1", ")", "for", "count", ",", "x", "in", "(", "(", "cnz", "(", "image", "[", ":", ",", "x", ":", "]", ")", ",", "x", ")", "for", "x", "in", "range", "(", "min_w", ",", "image", ".", "shape", "[", "1", "]", "-", "min_w", ")", ")", "]", "return", "sorted", "(", "x_c", "+", "y_c", ")" ]
This is a sample program to show how a learning agent can be logged using AIKIF . The idea is that this main function is your algorithm which will run until it finds a successful result . The result is returned and the time taken is logged . There can optionally be have additional functions to call to allow for easy logging access
def main ( arg1 = 55 , arg2 = 'test' , arg3 = None ) : print ( 'Starting dummy AI algorithm with :' , arg1 , arg2 , arg3 ) if arg3 is None : arg3 = [ 5 , 6 , 7 , 5 , 4 , ] result = arg1 + arg3 [ 0 ] * 7566.545 # dummy result print ( 'Done - returning ' , result ) return result
2,611
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/learn/dummy_learn_1.py#L5-L23
[ "def", "vrel", "(", "v1", ",", "v2", ")", ":", "v1", "=", "stypes", ".", "toDoubleVector", "(", "v1", ")", "v2", "=", "stypes", ".", "toDoubleVector", "(", "v2", ")", "return", "libspice", ".", "vrel_c", "(", "v1", ",", "v2", ")" ]
get a set of keys from redis
def get ( self , key ) : res = self . connection . get ( key ) print ( res ) return res
2,612
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/if_redis.py#L108-L112
[ "def", "ValidateEndConfig", "(", "self", ",", "config_obj", ",", "errors_fatal", "=", "True", ")", ":", "errors", "=", "super", "(", "WindowsClientRepacker", ",", "self", ")", ".", "ValidateEndConfig", "(", "config_obj", ",", "errors_fatal", "=", "errors_fatal", ")", "install_dir", "=", "config_obj", "[", "\"Client.install_path\"", "]", "for", "path", "in", "config_obj", "[", "\"Client.tempdir_roots\"", "]", ":", "if", "path", ".", "startswith", "(", "\"/\"", ")", ":", "errors", ".", "append", "(", "\"Client.tempdir_root %s starts with /, probably has Unix path.\"", "%", "path", ")", "if", "not", "path", ".", "startswith", "(", "install_dir", ")", ":", "errors", ".", "append", "(", "\"Client.tempdir_root %s is not inside the install_dir %s, this is \"", "\"a security risk\"", "%", "(", "(", "path", ",", "install_dir", ")", ")", ")", "if", "config_obj", ".", "Get", "(", "\"Logging.path\"", ")", ".", "startswith", "(", "\"/\"", ")", ":", "errors", ".", "append", "(", "\"Logging.path starts with /, probably has Unix path. %s\"", "%", "config_obj", "[", "\"Logging.path\"", "]", ")", "if", "\"Windows\\\\\"", "in", "config_obj", ".", "GetRaw", "(", "\"Logging.path\"", ")", ":", "errors", ".", "append", "(", "\"Windows in Logging.path, you probably want \"", "\"%(WINDIR|env) instead\"", ")", "if", "not", "config_obj", "[", "\"Client.binary_name\"", "]", ".", "endswith", "(", "\".exe\"", ")", ":", "errors", ".", "append", "(", "\"Missing .exe extension on binary_name %s\"", "%", "config_obj", "[", "\"Client.binary_name\"", "]", ")", "if", "not", "config_obj", "[", "\"Nanny.binary\"", "]", ".", "endswith", "(", "\".exe\"", ")", ":", "errors", ".", "append", "(", "\"Missing .exe extension on nanny_binary\"", ")", "if", "errors_fatal", "and", "errors", ":", "for", "error", "in", "errors", ":", "logging", ".", "error", "(", "\"Build Config Error: %s\"", ",", "error", ")", "raise", "RuntimeError", "(", "\"Bad configuration generated. Terminating.\"", ")", "else", ":", "return", "errors" ]
Creator function for creating an instance of a Packer image script .
def creator ( _ , config ) : packer_script = render ( config . script , model = config . model , env = config . env , variables = config . variables , item = config . item ) filename = "packer.dry.run.see.comment" if not config . dry_run : # writing Packer file (JSON) filename = write_temporary_file ( packer_script , 'packer-' , '.json' ) packer_script = '' # rendering the Bash script for generating the Packer image template_file = os . path . join ( os . path . dirname ( __file__ ) , 'templates/packer-image.sh.j2' ) with open ( template_file ) as handle : template = handle . read ( ) config . script = render ( template , debug = config . debug , packer_content = packer_script , packer_filename = filename ) return Packer ( config )
2,613
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/packer.py#L37-L57
[ "def", "console_wait_for_keypress", "(", "flush", ":", "bool", ")", "->", "Key", ":", "key", "=", "Key", "(", ")", "lib", ".", "TCOD_console_wait_for_keypress_wrapper", "(", "key", ".", "key_p", ",", "flush", ")", "return", "key" ]
Generates an optimized JPEG from JPEG - encoded bytes .
def process_jpeg_bytes ( bytes_in , quality = DEFAULT_JPEG_QUALITY ) : bytes_out_p = ffi . new ( "char**" ) bytes_out_p_gc = ffi . gc ( bytes_out_p , lib . guetzli_free_bytes ) length = lib . guetzli_process_jpeg_bytes ( bytes_in , len ( bytes_in ) , bytes_out_p_gc , quality ) if length == 0 : raise ValueError ( "Invalid JPEG: Guetzli was not able to decode the image" ) # noqa bytes_out = ffi . cast ( "char*" , bytes_out_p_gc [ 0 ] ) return ffi . unpack ( bytes_out , length )
2,614
https://github.com/wanadev/pyguetzli/blob/4e0c221f5e8f23adb38505c3c1c5a09294b7ee98/pyguetzli/guetzli.py#L13-L46
[ "def", "kelvin_to_celsius", "(", "kelvintemp", ")", ":", "if", "kelvintemp", "<", "0", ":", "raise", "ValueError", "(", "__name__", "+", "\": negative temperature values not allowed\"", ")", "celsiustemp", "=", "kelvintemp", "-", "KELVIN_OFFSET", "return", "float", "(", "\"{0:.2f}\"", ".", "format", "(", "celsiustemp", ")", ")" ]
Generates an optimized JPEG from RGB bytes .
def process_rgb_bytes ( bytes_in , width , height , quality = DEFAULT_JPEG_QUALITY ) : if len ( bytes_in ) != width * height * 3 : raise ValueError ( "bytes_in length is not coherent with given width and height" ) # noqa bytes_out_p = ffi . new ( "char**" ) bytes_out_p_gc = ffi . gc ( bytes_out_p , lib . guetzli_free_bytes ) length = lib . guetzli_process_rgb_bytes ( bytes_in , width , height , bytes_out_p_gc , quality ) bytes_out = ffi . cast ( "char*" , bytes_out_p_gc [ 0 ] ) return ffi . unpack ( bytes_out , length )
2,615
https://github.com/wanadev/pyguetzli/blob/4e0c221f5e8f23adb38505c3c1c5a09294b7ee98/pyguetzli/guetzli.py#L49-L90
[ "def", "publish_topology_opened", "(", "self", ",", "topology_id", ")", ":", "event", "=", "TopologyOpenedEvent", "(", "topology_id", ")", "for", "subscriber", "in", "self", ".", "__topology_listeners", ":", "try", ":", "subscriber", ".", "opened", "(", "event", ")", "except", "Exception", ":", "_handle_exception", "(", ")" ]
Decorator for a class to make a singleton out of it .
def singleton ( the_class ) : class_instances = { } def get_instance ( * args , * * kwargs ) : """ Creating or just return the one and only class instance. The singleton depends on the parameters used in __init__ @type args: list @param args: positional arguments of the constructor. @type kwargs: dict @param kwargs: named parameters of the constructor. @rtype: decorated class type @return: singleton instance of decorated class. """ key = ( the_class , args , str ( kwargs ) ) if key not in class_instances : class_instances [ key ] = the_class ( * args , * * kwargs ) return class_instances [ key ] return get_instance
2,616
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/decorators.py#L25-L53
[ "def", "_write_image_description", "(", "self", ")", ":", "if", "(", "not", "self", ".", "_datashape", "or", "self", ".", "_datashape", "[", "0", "]", "==", "1", "or", "self", ".", "_descriptionoffset", "<=", "0", ")", ":", "return", "colormapped", "=", "self", ".", "_colormap", "is", "not", "None", "if", "self", ".", "_imagej", ":", "isrgb", "=", "self", ".", "_shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", "description", "=", "imagej_description", "(", "self", ".", "_datashape", ",", "isrgb", ",", "colormapped", ",", "*", "*", "self", ".", "_metadata", ")", "else", ":", "description", "=", "json_description", "(", "self", ".", "_datashape", ",", "*", "*", "self", ".", "_metadata", ")", "# rewrite description and its length to file", "description", "=", "description", ".", "encode", "(", "'utf-8'", ")", "description", "=", "description", "[", ":", "self", ".", "_descriptionlen", "-", "1", "]", "pos", "=", "self", ".", "_fh", ".", "tell", "(", ")", "self", ".", "_fh", ".", "seek", "(", "self", ".", "_descriptionoffset", ")", "self", ".", "_fh", ".", "write", "(", "description", ")", "self", ".", "_fh", ".", "seek", "(", "self", ".", "_descriptionlenoffset", ")", "self", ".", "_fh", ".", "write", "(", "struct", ".", "pack", "(", "self", ".", "_byteorder", "+", "self", ".", "_offsetformat", ",", "len", "(", "description", ")", "+", "1", ")", ")", "self", ".", "_fh", ".", "seek", "(", "pos", ")", "self", ".", "_descriptionoffset", "=", "0", "self", ".", "_descriptionlenoffset", "=", "0", "self", ".", "_descriptionlen", "=", "0" ]
builds a 2048 starting board Printing Grid 0 0 0 2 0 0 4 0 0 0 0 0 0 0 0 0
def build_board_2048 ( ) : grd = Grid ( 4 , 4 , [ 2 , 4 ] ) grd . new_tile ( ) grd . new_tile ( ) print ( grd ) return grd
2,617
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/game_board_utils.py#L10-L23
[ "def", "units", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "if", "\"units\"", "in", "self", ".", "attrs", ".", "keys", "(", ")", ":", "self", ".", "attrs", ".", "pop", "(", "\"units\"", ")", "else", ":", "try", ":", "self", ".", "attrs", "[", "\"units\"", "]", "=", "value", "except", "AttributeError", ":", "self", ".", "attrs", "[", "\"units\"", "]", "=", "value" ]
builds a checkers starting board Printing Grid 0 B 0 B 0 B 0 B B 0 B 0 B 0 B 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 W 0 W 0 W 0 W W 0 W 0 W 0 W 0
def build_board_checkers ( ) : grd = Grid ( 8 , 8 , [ "B" , "W" ] ) for c in range ( 4 ) : grd . set_tile ( 0 , ( c * 2 ) - 1 , "B" ) grd . set_tile ( 1 , ( c * 2 ) - 0 , "B" ) grd . set_tile ( 6 , ( c * 2 ) + 1 , "W" ) grd . set_tile ( 7 , ( c * 2 ) - 0 , "W" ) print ( grd ) return grd
2,618
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/game_board_utils.py#L25-L45
[ "def", "_forget_annot", "(", "self", ",", "annot", ")", ":", "aid", "=", "id", "(", "annot", ")", "if", "aid", "in", "self", ".", "_annot_refs", ":", "self", ".", "_annot_refs", "[", "aid", "]", "=", "None" ]
tests for this module
def TEST ( ) : grd = Grid ( 4 , 4 , [ 2 , 4 ] ) grd . new_tile ( ) grd . new_tile ( ) print ( grd ) print ( "There are " , grd . count_blank_positions ( ) , " blanks in grid 1\n" ) grd2 = Grid ( 5 , 5 , [ 'A' , 'B' ] ) grd2 . new_tile ( 26 ) print ( grd2 ) build_board_checkers ( ) print ( "There are " , grd2 . count_blank_positions ( ) , " blanks in grid 2" )
2,619
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/game_board_utils.py#L47-L60
[ "def", "create_or_update_secret", "(", "self", ",", "path", ",", "secret", ",", "cas", "=", "None", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'options'", ":", "{", "}", ",", "'data'", ":", "secret", ",", "}", "if", "cas", "is", "not", "None", ":", "params", "[", "'options'", "]", "[", "'cas'", "]", "=", "cas", "api_path", "=", "'/v1/{mount_point}/data/{path}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "path", "=", "path", ")", "response", "=", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")", "return", "response", ".", "json", "(", ")" ]
Ask blobstore api for an url to directly serve the file
def url ( self , name ) : key = blobstore . create_gs_key ( '/gs' + name ) return images . get_serving_url ( key )
2,620
https://github.com/masci/django-appengine-toolkit/blob/9ffe8b05a263889787fb34a3e28ebc66b1f0a1d2/appengine_toolkit/storage.py#L74-L79
[ "def", "is_collapsed", "(", "self", ",", "id_user", ")", ":", "return", "CmtCOLLAPSED", ".", "query", ".", "filter", "(", "db", ".", "and_", "(", "CmtCOLLAPSED", ".", "id_bibrec", "==", "self", ".", "id_bibrec", ",", "CmtCOLLAPSED", ".", "id_cmtRECORDCOMMENT", "==", "self", ".", "id", ",", "CmtCOLLAPSED", ".", "id_user", "==", "id_user", ")", ")", ".", "count", "(", ")", ">", "0" ]
Processing one stage .
def process ( self , stage ) : self . logger . info ( "Processing pipeline stage '%s'" , self . title ) output = [ ] for entry in stage : key = list ( entry . keys ( ) ) [ 0 ] if key == "env" : self . pipeline . data . env_list [ 1 ] . update ( entry [ key ] ) self . logger . debug ( "Updating environment at level 1 with %s" , self . pipeline . data . env_list [ 1 ] ) continue # if not "env" then it must be "tasks" (schema): tasks = Tasks ( self . pipeline , re . match ( r"tasks\(parallel\)" , key ) is not None ) result = tasks . process ( entry [ key ] ) for line in result [ 'output' ] : output . append ( line ) if not result [ 'success' ] : self . event . failed ( ) return { 'success' : False , 'output' : output } self . event . succeeded ( ) return { 'success' : True , 'output' : output }
2,621
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/stage.py#L47-L69
[ "def", "deleteObserver", "(", "self", ",", "observer", ")", ":", "Observable", ".", "deleteObserver", "(", "self", ",", "observer", ")", "# If self.startOnDemand is True, the reader monitoring", "# thread is stopped when there are no more observers.", "if", "self", ".", "startOnDemand", ":", "if", "0", "==", "self", ".", "countObservers", "(", ")", ":", "self", ".", "rmthread", ".", "stop", "(", ")", "del", "self", ".", "rmthread", "self", ".", "rmthread", "=", "None" ]
Fetch trading fees .
def trading_fees ( self ) -> TradingFees : return self . _fetch ( 'trading fees' , self . market . code ) ( self . _trading_fees ) ( )
2,622
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L202-L204
[ "def", "__reconnect", "(", "self", ")", ":", "# Cancel the timer, if any", "self", ".", "__stop_timer", "(", ")", "try", ":", "# Try to reconnect the server", "result_code", "=", "self", ".", "__mqtt", ".", "reconnect", "(", ")", "if", "result_code", ":", "# Something wrong happened", "message", "=", "\"Error connecting the MQTT server: {0} ({1})\"", ".", "format", "(", "result_code", ",", "paho", ".", "error_string", "(", "result_code", ")", ")", "_logger", ".", "error", "(", "message", ")", "raise", "ValueError", "(", "message", ")", "except", "Exception", "as", "ex", ":", "# Something went wrong: log it", "_logger", ".", "error", "(", "\"Exception connecting server: %s\"", ",", "ex", ")", "finally", ":", "# Prepare a reconnection timer. It will be cancelled by the", "# on_connect callback", "self", ".", "__start_timer", "(", "10", ")" ]
Fetch the market ticker .
def fetch_ticker ( self ) -> Ticker : return self . _fetch ( 'ticker' , self . market . code ) ( self . _ticker ) ( )
2,623
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L210-L212
[ "def", "dump", "(", "self", ")", ":", "assert", "self", ".", "database", "is", "not", "None", "cmd", "=", "\"SELECT count from {} WHERE rowid={}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "STATE_INFO_ROW", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "self", ".", "_from_sqlite", "(", "ret", "[", "0", "]", "[", "0", "]", ")", "+", "self", ".", "inserts", "if", "count", ">", "self", ".", "row_limit", ":", "msg", "=", "\"cleaning up state, this might take a while.\"", "logger", ".", "warning", "(", "msg", ")", "delete", "=", "count", "-", "self", ".", "row_limit", "delete", "+=", "int", "(", "self", ".", "row_limit", "*", "(", "self", ".", "row_cleanup_quota", "/", "100.0", ")", ")", "cmd", "=", "(", "\"DELETE FROM {} WHERE timestamp IN (\"", "\"SELECT timestamp FROM {} ORDER BY timestamp ASC LIMIT {});\"", ")", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ",", "self", ".", "STATE_TABLE", ",", "delete", ")", ")", "self", ".", "_vacuum", "(", ")", "cmd", "=", "\"SELECT COUNT(*) FROM {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "ret", "[", "0", "]", "[", "0", "]", "cmd", "=", "\"UPDATE {} SET count = {} WHERE rowid = {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "_to_sqlite", "(", "count", ")", ",", "self", ".", "STATE_INFO_ROW", ",", ")", ")", "self", ".", "_update_cache_directory_state", "(", ")", "self", ".", "database", ".", "commit", "(", ")", "self", ".", "cursor", ".", "close", "(", ")", "self", ".", "database", ".", "close", "(", ")", "self", ".", "database", "=", "None", "self", ".", "cursor", "=", "None", "self", ".", "inserts", "=", "0" ]
Fetch the order book .
def fetch_order_book ( self ) -> OrderBook : return self . _fetch ( 'order book' , self . market . code ) ( self . _order_book ) ( )
2,624
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L232-L234
[ "def", "lock_resource_for_update", "(", "cls", ",", "resource_id", ",", "db_session", ")", ":", "db_session", "=", "get_db_session", "(", "db_session", ")", "query", "=", "db_session", ".", "query", "(", "cls", ".", "model", ")", "query", "=", "query", ".", "filter", "(", "cls", ".", "model", ".", "resource_id", "==", "resource_id", ")", "query", "=", "query", ".", "with_for_update", "(", ")", "return", "query", ".", "first", "(", ")" ]
Fetch trades since given timestamp .
def fetch_trades_since ( self , since : int ) -> List [ Trade ] : return self . _fetch_since ( 'trades' , self . market . code ) ( self . _trades_since ) ( since )
2,625
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L241-L243
[ "def", "load_writer_configs", "(", "writer_configs", ",", "ppp_config_dir", ",", "*", "*", "writer_kwargs", ")", ":", "try", ":", "writer_info", "=", "read_writer_config", "(", "writer_configs", ")", "writer_class", "=", "writer_info", "[", "'writer'", "]", "except", "(", "ValueError", ",", "KeyError", ",", "yaml", ".", "YAMLError", ")", ":", "raise", "ValueError", "(", "\"Invalid writer configs: \"", "\"'{}'\"", ".", "format", "(", "writer_configs", ")", ")", "init_kwargs", ",", "kwargs", "=", "writer_class", ".", "separate_init_kwargs", "(", "writer_kwargs", ")", "writer", "=", "writer_class", "(", "ppp_config_dir", "=", "ppp_config_dir", ",", "config_files", "=", "writer_configs", ",", "*", "*", "init_kwargs", ")", "return", "writer", ",", "kwargs" ]
Fetch latest deposits must provide a limit .
def fetch_deposits ( self , limit : int ) -> List [ Deposit ] : return self . _transactions ( self . _deposits , 'deposits' , limit )
2,626
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L292-L294
[ "def", "_init_cycle_dict", "(", "self", ")", ":", "dict_arr", "=", "np", ".", "zeros", "(", "self", ".", "epochs", ",", "dtype", "=", "int", ")", "length_arr", "=", "np", ".", "zeros", "(", "self", ".", "epochs", ",", "dtype", "=", "int", ")", "start_arr", "=", "np", ".", "zeros", "(", "self", ".", "epochs", ",", "dtype", "=", "int", ")", "c_len", "=", "self", ".", "cycle_len", "idx", "=", "0", "for", "i", "in", "range", "(", "self", ".", "cycles", ")", ":", "current_start", "=", "idx", "for", "j", "in", "range", "(", "c_len", ")", ":", "dict_arr", "[", "idx", "]", "=", "i", "length_arr", "[", "idx", "]", "=", "c_len", "start_arr", "[", "idx", "]", "=", "current_start", "idx", "+=", "1", "c_len", "*=", "self", ".", "cycle_mult", "return", "dict_arr", ",", "length_arr", ",", "start_arr" ]
Fetch all deposits since the given timestamp .
def fetch_deposits_since ( self , since : int ) -> List [ Deposit ] : return self . _transactions_since ( self . _deposits_since , 'deposits' , since )
2,627
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L304-L306
[ "def", "only_owner", "(", "func", ")", ":", "def", "decorated", "(", "*", "_", ",", "*", "*", "kwargs", ")", ":", "id", "=", "kwargs", "[", "'id'", "]", "if", "not", "current_user", ".", "is_authenticated", ":", "abort", "(", "401", ")", "elif", "current_user", ".", "id", "!=", "id", ":", "abort", "(", "403", ")", "return", "func", "(", "*", "*", "kwargs", ")", "return", "decorated" ]
Fetch latest withdrawals must provide a limit .
def fetch_withdrawals ( self , limit : int ) -> List [ Withdrawal ] : return self . _transactions ( self . _withdrawals , 'withdrawals' , limit )
2,628
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L313-L315
[ "def", "AddFile", "(", "self", ",", "fd", ")", ":", "hashes", "=", "self", ".", "_HashFile", "(", "fd", ")", "# The empty file is very common, we don't keep the back references for it", "# in the DB since it just takes up too much space.", "empty_hash", "=", "(", "\"e3b0c44298fc1c149afbf4c8996fb924\"", "\"27ae41e4649b934ca495991b7852b855\"", ")", "if", "hashes", ".", "sha256", "==", "empty_hash", ":", "return", "# Update the hashes field now that we have calculated them all.", "fd", ".", "Set", "(", "fd", ".", "Schema", ".", "HASH", ",", "hashes", ")", "fd", ".", "Flush", "(", ")", "# sha256 is the canonical location.", "canonical_urn", "=", "self", ".", "PATH", ".", "Add", "(", "\"generic/sha256\"", ")", ".", "Add", "(", "str", "(", "hashes", ".", "sha256", ")", ")", "if", "not", "list", "(", "aff4", ".", "FACTORY", ".", "Stat", "(", "[", "canonical_urn", "]", ")", ")", ":", "aff4", ".", "FACTORY", ".", "Copy", "(", "fd", ".", "urn", ",", "canonical_urn", ")", "# Remove the STAT entry, it makes no sense to copy it between clients.", "with", "aff4", ".", "FACTORY", ".", "Open", "(", "canonical_urn", ",", "mode", "=", "\"rw\"", ",", "token", "=", "self", ".", "token", ")", "as", "new_fd", ":", "new_fd", ".", "Set", "(", "new_fd", ".", "Schema", ".", "STAT", "(", "None", ")", ")", "self", ".", "_AddToIndex", "(", "canonical_urn", ",", "fd", ".", "urn", ")", "for", "hash_type", ",", "hash_digest", "in", "hashes", ".", "ListSetFields", "(", ")", ":", "# Determine fingerprint type.", "hash_type", "=", "hash_type", ".", "name", "# No need to create a symlink for sha256, it's the canonical location.", "if", "hash_type", "==", "\"sha256\"", ":", "continue", "hash_digest", "=", "str", "(", "hash_digest", ")", "fingerprint_type", "=", "\"generic\"", "if", "hash_type", ".", "startswith", "(", "\"pecoff_\"", ")", ":", "fingerprint_type", "=", "\"pecoff\"", "hash_type", "=", "hash_type", "[", "len", "(", "\"pecoff_\"", ")", ":", "]", "if", "hash_type", "not", "in", "self", ".", "HASH_TYPES", "[", "fingerprint_type", "]", ":", "continue", "file_store_urn", "=", "self", ".", "PATH", ".", "Add", "(", "fingerprint_type", ")", ".", "Add", "(", "hash_type", ")", ".", "Add", "(", "hash_digest", ")", "with", "aff4", ".", "FACTORY", ".", "Create", "(", "file_store_urn", ",", "aff4", ".", "AFF4Symlink", ",", "token", "=", "self", ".", "token", ")", "as", "symlink", ":", "symlink", ".", "Set", "(", "symlink", ".", "Schema", ".", "SYMLINK_TARGET", ",", "canonical_urn", ")", "# We do not want to be externally written here.", "return", "None" ]
Fetch all withdrawals since the given timestamp .
def fetch_withdrawals_since ( self , since : int ) -> List [ Withdrawal ] : return self . _transactions_since ( self . _withdrawals_since , 'withdrawals' , since )
2,629
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L325-L327
[ "def", "save_to_file", "(", "self", ",", "filename", ",", "remap_dim0", "=", "None", ",", "remap_dim1", "=", "None", ")", ":", "# rows - first index", "# columns - second index", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fobj", ":", "columns", "=", "list", "(", "sorted", "(", "self", ".", "_dim1", ")", ")", "for", "col", "in", "columns", ":", "fobj", ".", "write", "(", "','", ")", "fobj", ".", "write", "(", "str", "(", "remap_dim1", "[", "col", "]", "if", "remap_dim1", "else", "col", ")", ")", "fobj", ".", "write", "(", "'\\n'", ")", "for", "row", "in", "sorted", "(", "self", ".", "_dim0", ")", ":", "fobj", ".", "write", "(", "str", "(", "remap_dim0", "[", "row", "]", "if", "remap_dim0", "else", "row", ")", ")", "for", "col", "in", "columns", ":", "fobj", ".", "write", "(", "','", ")", "fobj", ".", "write", "(", "str", "(", "self", "[", "row", ",", "col", "]", ")", ")", "fobj", ".", "write", "(", "'\\n'", ")" ]
Request a withdrawal .
def request_withdrawal ( self , amount : Number , address : str , subtract_fee : bool = False , * * params ) -> Withdrawal : self . log . debug ( f'Requesting {self.currency} withdrawal from {self.name} to {address}' ) amount = self . _parse_money ( amount ) if self . dry_run : withdrawal = Withdrawal . create_default ( TxType . WITHDRAWAL , self . currency , amount , address ) self . log . warning ( f'DRY RUN: Withdrawal requested on {self.name}: {withdrawal}' ) return withdrawal try : withdrawal = self . _withdraw ( amount , address , subtract_fee , * * params ) except Exception as e : msg = f'Failed requesting withdrawal on {self.name}!: amount={amount}, address={address}' raise self . exception ( InvalidWithdrawal , msg , e ) from e self . log . info ( f'Withdrawal requested on {self.name}: {withdrawal}' ) return withdrawal
2,630
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L346-L363
[ "def", "list_blobs", "(", "call", "=", "None", ",", "kwargs", "=", "None", ")", ":", "# pylint: disable=unused-argument", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "if", "'container'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'A container must be specified'", ")", "storageservice", "=", "_get_block_blob_service", "(", "kwargs", ")", "ret", "=", "{", "}", "try", ":", "for", "blob", "in", "storageservice", ".", "list_blobs", "(", "kwargs", "[", "'container'", "]", ")", ".", "items", ":", "ret", "[", "blob", ".", "name", "]", "=", "{", "'blob_type'", ":", "blob", ".", "properties", ".", "blob_type", ",", "'last_modified'", ":", "blob", ".", "properties", ".", "last_modified", ".", "isoformat", "(", ")", ",", "'server_encrypted'", ":", "blob", ".", "properties", ".", "server_encrypted", ",", "}", "except", "Exception", "as", "exc", ":", "log", ".", "warning", "(", "six", ".", "text_type", "(", "exc", ")", ")", "return", "ret" ]
Fetch an order by ID .
def fetch_order ( self , order_id : str ) -> Order : return self . _fetch ( f'order id={order_id}' , exc = OrderNotFound ) ( self . _order ) ( order_id )
2,631
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L410-L412
[ "def", "secure", "(", "self", ")", ":", "log", ".", "debug", "(", "'ConCache securing sockets'", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "cache_sock", ")", ":", "os", ".", "chmod", "(", "self", ".", "cache_sock", ",", "0o600", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "update_sock", ")", ":", "os", ".", "chmod", "(", "self", ".", "update_sock", ",", "0o600", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "upd_t_sock", ")", ":", "os", ".", "chmod", "(", "self", ".", "upd_t_sock", ",", "0o600", ")" ]
Fetch latest open orders must provide a limit .
def fetch_open_orders ( self , limit : int ) -> List [ Order ] : return self . _fetch_orders_limit ( self . _open_orders , limit )
2,632
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L424-L426
[ "def", "do_OPTIONS", "(", "self", ")", ":", "thread_local", ".", "clock_start", "=", "get_time", "(", ")", "thread_local", ".", "status_code", "=", "200", "thread_local", ".", "message", "=", "None", "thread_local", ".", "headers", "=", "[", "]", "thread_local", ".", "end_headers", "=", "[", "]", "thread_local", ".", "size", "=", "-", "1", "thread_local", ".", "method", "=", "'OPTIONS'", "self", ".", "send_response", "(", "200", ")", "if", "self", ".", "is_cross_origin", "(", ")", ":", "no_caching", "=", "self", ".", "cross_origin_headers", "(", ")", "# ten minutes if no custom headers requested", "self", ".", "send_header", "(", "\"Access-Control-Max-Age\"", ",", "0", "if", "no_caching", "else", "10", "*", "60", ")", "self", ".", "send_header", "(", "\"Content-Length\"", ",", "0", ")", "self", ".", "end_headers", "(", ")", "thread_local", ".", "size", "=", "0" ]
Fetch latest closed orders must provide a limit .
def fetch_closed_orders ( self , limit : int ) -> List [ Order ] : return self . _fetch_orders_limit ( self . _closed_orders , limit )
2,633
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L436-L438
[ "def", "_repack_archive", "(", "archive1", ",", "archive2", ",", "verbosity", "=", "0", ",", "interactive", "=", "True", ")", ":", "format1", ",", "compression1", "=", "get_archive_format", "(", "archive1", ")", "format2", ",", "compression2", "=", "get_archive_format", "(", "archive2", ")", "if", "format1", "==", "format2", "and", "compression1", "==", "compression2", ":", "# same format and compression allows to copy the file", "util", ".", "link_or_copy", "(", "archive1", ",", "archive2", ",", "verbosity", "=", "verbosity", ")", "return", "tmpdir", "=", "util", ".", "tmpdir", "(", ")", "try", ":", "kwargs", "=", "dict", "(", "verbosity", "=", "verbosity", ",", "outdir", "=", "tmpdir", ")", "same_format", "=", "(", "format1", "==", "format2", "and", "compression1", "and", "compression2", ")", "if", "same_format", ":", "# only decompress since the format is the same", "kwargs", "[", "'format'", "]", "=", "compression1", "path", "=", "_extract_archive", "(", "archive1", ",", "*", "*", "kwargs", ")", "archive", "=", "os", ".", "path", ".", "abspath", "(", "archive2", ")", "files", "=", "tuple", "(", "os", ".", "listdir", "(", "path", ")", ")", "olddir", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "path", ")", "try", ":", "kwargs", "=", "dict", "(", "verbosity", "=", "verbosity", ",", "interactive", "=", "interactive", ")", "if", "same_format", ":", "# only compress since the format is the same", "kwargs", "[", "'format'", "]", "=", "compression2", "_create_archive", "(", "archive", ",", "files", ",", "*", "*", "kwargs", ")", "finally", ":", "os", ".", "chdir", "(", "olddir", ")", "finally", ":", "shutil", ".", "rmtree", "(", "tmpdir", ",", "onerror", "=", "rmtree_log_error", ")" ]
Fetch closed orders since the given timestamp .
def fetch_closed_orders_since ( self , since : int ) -> List [ Order ] : return self . _fetch_orders_since ( self . _closed_orders_since , since )
2,634
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L448-L450
[ "def", "connect", "(", "self", ",", "db_uri", ",", "debug", "=", "False", ")", ":", "kwargs", "=", "{", "'echo'", ":", "debug", ",", "'convert_unicode'", ":", "True", "}", "# connect to the SQL database", "if", "'mysql'", "in", "db_uri", ":", "kwargs", "[", "'pool_recycle'", "]", "=", "3600", "elif", "'://'", "not", "in", "db_uri", ":", "logger", ".", "debug", "(", "\"detected sqlite path URI: {}\"", ".", "format", "(", "db_uri", ")", ")", "db_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "db_uri", ")", ")", "db_uri", "=", "\"sqlite:///{}\"", ".", "format", "(", "db_path", ")", "self", ".", "engine", "=", "create_engine", "(", "db_uri", ",", "*", "*", "kwargs", ")", "logger", ".", "debug", "(", "'connection established successfully'", ")", "# make sure the same engine is propagated to the BASE classes", "BASE", ".", "metadata", ".", "bind", "=", "self", ".", "engine", "# start a session", "self", ".", "session", "=", "scoped_session", "(", "sessionmaker", "(", "bind", "=", "self", ".", "engine", ")", ")", "# shortcut to query method", "self", ".", "query", "=", "self", ".", "session", ".", "query", "return", "self" ]
Cancel an order by ID .
def cancel_order ( self , order_id : str ) -> str : self . log . debug ( f'Canceling order id={order_id} on {self.name}' ) if self . dry_run : # Don't cancel if dry run self . log . warning ( f'DRY RUN: Order cancelled on {self.name}: id={order_id}' ) return order_id try : # Cancel order self . _cancel_order ( order_id ) except Exception as e : raise self . exception ( OrderNotFound , f'Failed to cancel order: id={order_id}' , e ) from e self . log . info ( f'Order cancelled on {self.name}: id={order_id}' ) return order_id
2,635
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L456-L470
[ "def", "secure", "(", "self", ")", ":", "log", ".", "debug", "(", "'ConCache securing sockets'", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "cache_sock", ")", ":", "os", ".", "chmod", "(", "self", ".", "cache_sock", ",", "0o600", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "update_sock", ")", ":", "os", ".", "chmod", "(", "self", ".", "update_sock", ",", "0o600", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "upd_t_sock", ")", ":", "os", ".", "chmod", "(", "self", ".", "upd_t_sock", ",", "0o600", ")" ]
Cancel multiple orders by a list of IDs .
def cancel_orders ( self , order_ids : List [ str ] ) -> List [ str ] : orders_to_cancel = order_ids self . log . debug ( f'Canceling orders on {self.name}: ids={orders_to_cancel}' ) cancelled_orders = [ ] if self . dry_run : # Don't cancel if dry run self . log . warning ( f'DRY RUN: Orders cancelled on {self.name}: {orders_to_cancel}' ) return orders_to_cancel try : # Iterate and cancel orders if self . has_batch_cancel : self . _cancel_orders ( orders_to_cancel ) cancelled_orders . append ( orders_to_cancel ) orders_to_cancel . clear ( ) else : for i , order_id in enumerate ( orders_to_cancel ) : self . _cancel_order ( order_id ) cancelled_orders . append ( order_id ) orders_to_cancel . pop ( i ) except Exception as e : msg = f'Failed to cancel {len(orders_to_cancel)} orders on {self.name}: ids={orders_to_cancel}' raise self . exception ( OrderNotFound , msg , e ) from e self . log . info ( f'Orders cancelled on {self.name}: ids={cancelled_orders}' ) return cancelled_orders
2,636
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L476-L501
[ "def", "_connected", "(", "self", ",", "transport", ",", "conn", ")", ":", "LOG", ".", "info", "(", "\"Connected to ElkM1\"", ")", "self", ".", "_conn", "=", "conn", "self", ".", "_transport", "=", "transport", "self", ".", "_connection_retry_timer", "=", "1", "if", "url_scheme_is_secure", "(", "self", ".", "_config", "[", "'url'", "]", ")", ":", "self", ".", "_conn", ".", "write_data", "(", "self", ".", "_config", "[", "'userid'", "]", ",", "raw", "=", "True", ")", "self", ".", "_conn", ".", "write_data", "(", "self", ".", "_config", "[", "'password'", "]", ",", "raw", "=", "True", ")", "self", ".", "call_sync_handlers", "(", ")", "if", "not", "self", ".", "_config", "[", "'url'", "]", ".", "startswith", "(", "'serial://'", ")", ":", "self", ".", "_heartbeat", "=", "self", ".", "loop", ".", "call_later", "(", "120", ",", "self", ".", "_reset_connection", ")" ]
Cancel all open orders .
def cancel_all_orders ( self ) -> List [ str ] : order_ids = [ o . id for o in self . fetch_all_open_orders ( ) ] return self . cancel_orders ( order_ids )
2,637
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L503-L506
[ "def", "do_OPTIONS", "(", "self", ")", ":", "thread_local", ".", "clock_start", "=", "get_time", "(", ")", "thread_local", ".", "status_code", "=", "200", "thread_local", ".", "message", "=", "None", "thread_local", ".", "headers", "=", "[", "]", "thread_local", ".", "end_headers", "=", "[", "]", "thread_local", ".", "size", "=", "-", "1", "thread_local", ".", "method", "=", "'OPTIONS'", "self", ".", "send_response", "(", "200", ")", "if", "self", ".", "is_cross_origin", "(", ")", ":", "no_caching", "=", "self", ".", "cross_origin_headers", "(", ")", "# ten minutes if no custom headers requested", "self", ".", "send_header", "(", "\"Access-Control-Max-Age\"", ",", "0", "if", "no_caching", "else", "10", "*", "60", ")", "self", ".", "send_header", "(", "\"Content-Length\"", ",", "0", ")", "self", ".", "end_headers", "(", ")", "thread_local", ".", "size", "=", "0" ]
Minimum amount to place an order .
def min_order_amount ( self ) -> Money : return self . _fetch ( 'minimum order amount' , self . market . code ) ( self . _min_order_amount ) ( )
2,638
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L517-L519
[ "def", "set_column_sizes", "(", "self", ",", "values", ")", ":", "self", ".", "style", "[", "'grid-template-columns'", "]", "=", "' '", ".", "join", "(", "map", "(", "lambda", "value", ":", "(", "str", "(", "value", ")", "if", "str", "(", "value", ")", ".", "endswith", "(", "'%'", ")", "else", "str", "(", "value", ")", "+", "'%'", ")", ",", "values", ")", ")" ]
Place a market order .
def place_market_order ( self , side : Side , amount : Number ) -> Order : return self . place_order ( side , OrderType . MARKET , amount )
2,639
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L552-L554
[ "def", "create_pgroup_snapshot", "(", "self", ",", "source", ",", "*", "*", "kwargs", ")", ":", "# In REST 1.4, support was added for snapshotting multiple pgroups. As a", "# result, the endpoint response changed from an object to an array of", "# objects. To keep the response type consistent between REST versions,", "# we unbox the response when creating a single snapshot.", "result", "=", "self", ".", "create_pgroup_snapshots", "(", "[", "source", "]", ",", "*", "*", "kwargs", ")", "if", "self", ".", "_rest_version", ">=", "LooseVersion", "(", "\"1.4\"", ")", ":", "headers", "=", "result", ".", "headers", "result", "=", "ResponseDict", "(", "result", "[", "0", "]", ")", "result", ".", "headers", "=", "headers", "return", "result" ]
This is the main module for the script . The script will accept a file or a directory and then encrypt it with a provided key before pushing it to S3 into a specified bucket .
def main ( ) : parser = argparse . ArgumentParser ( description = main . __doc__ , add_help = True ) parser . add_argument ( '-M' , '--master_key' , dest = 'master_key' , help = 'Path to the master key ' + 'used for the encryption. Data is transferred without encryption if this' + 'is not provided.' , type = str , required = False , default = None ) parser . add_argument ( '-B' , '--bucket' , dest = 'bucket' , help = 'S3 bucket.' , type = str , required = True ) parser . add_argument ( '-R' , '--remote_dir' , dest = 'remote_dir' , help = 'Pseudo directory within ' + 'the bucket to store the file(s). NOTE: Folder structure below ' + 'REMOTE_DIR will be retained.' , type = str , required = False , default = '' ) parser . add_argument ( 'data' , help = 'File(s) or folder(s) to transfer to S3.' , type = str , nargs = '+' ) params = parser . parse_args ( ) # Input handling if params . master_key and not os . path . exists ( params . master_key ) : raise InputParameterError ( 'The master key was not found at ' + params . master_key ) # If the user doesn't have ~/.boto , it doesn't even make sense to go ahead if not os . path . exists ( os . path . expanduser ( '~/.boto' ) ) : raise RuntimeError ( '~/.boto not found' ) # Ensure that the remote directory doesn't start with a / if params . remote_dir . startswith ( '/' ) : raise InputParameterError ( 'The remote dir cannot start with a \'/\'' ) # Process each of the input arguments. for datum in params . data : datum = os . path . abspath ( datum ) if not os . path . exists ( datum ) : print ( 'ERROR: %s could not be found.' % datum , file = sys . stderr ) continue write_to_s3 ( datum , params . master_key , params . bucket , params . remote_dir ) return None
2,640
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/encrypt_files_in_dir_to_s3.py#L157-L190
[ "def", "get_column_metadata", "(", "conn", ",", "table", ":", "str", ",", "schema", "=", "'public'", ")", ":", "query", "=", "\"\"\"\\\nSELECT\n attname as name,\n format_type(atttypid, atttypmod) AS data_type,\n NOT attnotnull AS nullable\nFROM pg_catalog.pg_attribute\nWHERE attrelid=%s::regclass\n AND attnum > 0 AND NOT attisdropped\nORDER BY attnum;\"\"\"", "qualified_name", "=", "compile_qualified_name", "(", "table", ",", "schema", "=", "schema", ")", "for", "record", "in", "select_dict", "(", "conn", ",", "query", ",", "params", "=", "(", "qualified_name", ",", ")", ")", ":", "yield", "record" ]
Queries S3 to identify the region hosting the provided bucket .
def _get_bucket_endpoint ( self ) : conn = S3Connection ( ) bucket = conn . lookup ( self . bucket_name ) if not bucket : # TODO: Make the bucket here? raise InputParameterError ( 'The provided bucket %s doesn\'t exist' % self . bucket_name ) endpoint = str ( bucket . get_location ( ) ) return endpoint
2,641
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/encrypt_files_in_dir_to_s3.py#L82-L92
[ "async", "def", "timeout", "(", "source", ",", "timeout", ")", ":", "async", "with", "streamcontext", "(", "source", ")", "as", "streamer", ":", "while", "True", ":", "try", ":", "item", "=", "await", "wait_for", "(", "anext", "(", "streamer", ")", ",", "timeout", ")", "except", "StopAsyncIteration", ":", "break", "else", ":", "yield", "item" ]
A wrapper for the entire rna alignment subgraph .
def align_rna ( job , fastqs , univ_options , star_options ) : star = job . wrapJobFn ( run_star , fastqs , univ_options , star_options , cores = star_options [ 'n' ] , memory = PromisedRequirement ( lambda x : int ( 1.85 * x . size ) , star_options [ 'index' ] ) , disk = PromisedRequirement ( star_disk , fastqs , star_options [ 'index' ] ) ) s_and_i = job . wrapJobFn ( sort_and_index_star , star . rv ( ) , univ_options , star_options ) . encapsulate ( ) job . addChild ( star ) star . addChild ( s_and_i ) return s_and_i . rv ( )
2,642
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/rna.py#L39-L58
[ "def", "remove_users_from_organization", "(", "self", ",", "organization_id", ",", "users_list", ")", ":", "log", ".", "warning", "(", "'Removing users...'", ")", "url", "=", "'rest/servicedeskapi/organization/{}/user'", ".", "format", "(", "organization_id", ")", "data", "=", "{", "'usernames'", ":", "users_list", "}", "return", "self", ".", "delete", "(", "url", ",", "headers", "=", "self", ".", "experimental_headers", ",", "data", "=", "data", ")" ]
Align a pair of fastqs with STAR .
def run_star ( job , fastqs , univ_options , star_options ) : assert star_options [ 'type' ] in ( 'star' , 'starlong' ) work_dir = os . getcwd ( ) input_files = { 'rna_cutadapt_1.fastq' : fastqs [ 0 ] , 'rna_cutadapt_2.fastq' : fastqs [ 1 ] , 'star_index.tar.gz' : star_options [ 'index' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) # Handle gzipped file gz = '.gz' if is_gzipfile ( input_files [ 'rna_cutadapt_1.fastq' ] ) else '' if gz : for read_file in 'rna_cutadapt_1.fastq' , 'rna_cutadapt_2.fastq' : os . symlink ( read_file , read_file + gz ) input_files [ read_file + gz ] = input_files [ read_file ] + gz # Untar the index input_files [ 'star_index' ] = untargz ( input_files [ 'star_index.tar.gz' ] , work_dir ) # Check to see if user is using a STAR-Fusion index star_fusion_idx = os . path . join ( input_files [ 'star_index' ] , 'ref_genome.fa.star.idx' ) if os . path . exists ( star_fusion_idx ) : input_files [ 'star_index' ] = star_fusion_idx input_files = { key : docker_path ( path , work_dir = work_dir ) for key , path in input_files . items ( ) } # Using recommended STAR-Fusion parameters: # https://github.com/STAR-Fusion/STAR-Fusion/wiki parameters = [ '--runThreadN' , str ( star_options [ 'n' ] ) , '--genomeDir' , input_files [ 'star_index' ] , '--twopassMode' , 'Basic' , '--outReadsUnmapped' , 'None' , '--chimSegmentMin' , '12' , '--chimJunctionOverhangMin' , '12' , '--alignSJDBoverhangMin' , '10' , '--alignMatesGapMax' , '200000' , '--alignIntronMax' , '200000' , '--chimSegmentReadGapMax' , 'parameter' , '3' , '--alignSJstitchMismatchNmax' , '5' , '-1' , '5' , '5' , '--outFileNamePrefix' , 'rna' , '--readFilesIn' , input_files [ 'rna_cutadapt_1.fastq' + gz ] , input_files [ 'rna_cutadapt_2.fastq' + gz ] , '--outSAMattributes' , 'NH' , 'HI' , 'AS' , 'NM' , 'MD' , '--outSAMtype' , 'BAM' , 'Unsorted' , '--quantMode' , 'TranscriptomeSAM' ] if gz : parameters . extend ( [ '--readFilesCommand' , 'zcat' ] ) if star_options [ 'type' ] == 'star' : docker_call ( tool = 'star' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = star_options [ 'version' ] ) else : docker_call ( tool = 'starlong' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = star_options [ 'version' ] ) output_files = defaultdict ( ) for output_file in [ 'rnaAligned.toTranscriptome.out.bam' , 'rnaAligned.out.bam' , 'rnaChimeric.out.junction' ] : output_files [ output_file ] = job . fileStore . writeGlobalFile ( '/' . join ( [ work_dir , output_file ] ) ) export_results ( job , output_files [ 'rnaAligned.toTranscriptome.out.bam' ] , 'rna_transcriptome.bam' , univ_options , subfolder = 'alignments' ) export_results ( job , output_files [ 'rnaChimeric.out.junction' ] , 'rna_chimeric.junction' , univ_options , subfolder = 'mutations/fusions' ) job . fileStore . logToMaster ( 'Ran STAR on %s successfully' % univ_options [ 'patient' ] ) return output_files
2,643
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/rna.py#L61-L138
[ "def", "get_next_revision", "(", "self", ",", "session_id", ",", "revision", ",", "delta", ")", ":", "session", "=", "self", ".", "sessions", "[", "session_id", "]", "session", ".", "state", "=", "State", ".", "connected", "if", "delta", "==", "revision", ":", "# Increment revision. Never decrement.", "session", ".", "revision", "=", "max", "(", "session", ".", "revision", ",", "revision", ")", "# Wait for next revision to become ready.", "self", ".", "next_revision_available", ".", "wait", "(", ")", "return", "self", ".", "revision" ]
A wrapper for sorting and indexing the genomic star bam generated by run_star . It is required since run_star returns a dict of 2 bams
def sort_and_index_star ( job , star_bams , univ_options , star_options ) : star_options [ 'samtools' ] [ 'n' ] = star_options [ 'n' ] sort = job . wrapJobFn ( sort_bamfile , star_bams [ 'rnaAligned.out.bam' ] , 'rna' , univ_options , samtools_options = star_options [ 'samtools' ] , disk = PromisedRequirement ( sort_disk , star_bams [ 'rnaAligned.out.bam' ] ) ) index = job . wrapJobFn ( index_bamfile , sort . rv ( ) , 'rna' , univ_options , samtools_options = star_options [ 'samtools' ] , sample_info = 'genome_sorted' , disk = PromisedRequirement ( index_disk , sort . rv ( ) ) ) job . addChild ( sort ) sort . addChild ( index ) return { 'rna_genome' : index . rv ( ) , 'rna_transcriptome.bam' : star_bams [ 'rnaAligned.toTranscriptome.out.bam' ] , 'rnaChimeric.out.junction' : star_bams [ 'rnaChimeric.out.junction' ] }
2,644
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/rna.py#L141-L169
[ "def", "remove_punctuation", "(", "text", ",", "exceptions", "=", "[", "]", ")", ":", "all_but", "=", "[", "r'\\w'", ",", "r'\\s'", "]", "all_but", ".", "extend", "(", "exceptions", ")", "pattern", "=", "'[^{}]'", ".", "format", "(", "''", ".", "join", "(", "all_but", ")", ")", "return", "re", ".", "sub", "(", "pattern", ",", "''", ",", "text", ")" ]
Resets the state of the expression
def reset ( self ) : self . expr = [ ] self . matcher = None self . last_matcher = None self . description = None
2,645
https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L44-L49
[ "def", "url_to_resource", "(", "url", ",", "request", "=", "None", ")", ":", "if", "request", "is", "None", ":", "request", "=", "get_current_request", "(", ")", "# cnv = request.registry.getAdapter(request, IResourceUrlConverter)", "reg", "=", "get_current_registry", "(", ")", "cnv", "=", "reg", ".", "getAdapter", "(", "request", ",", "IResourceUrlConverter", ")", "return", "cnv", ".", "url_to_resource", "(", "url", ")" ]
Clone this expression
def clone ( self ) : from copy import copy clone = copy ( self ) clone . expr = copy ( self . expr ) clone . factory = False return clone
2,646
https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L51-L57
[ "def", "tofile", "(", "self", ",", "filename", ",", "format", "=", "'ascii'", ")", ":", "if", "not", "common", ".", "is_string", "(", "filename", ")", ":", "raise", "TypeError", "(", "'argument filename must be string but got %s'", "%", "(", "type", "(", "filename", ")", ")", ")", "if", "format", "not", "in", "[", "'ascii'", ",", "'binary'", "]", ":", "raise", "TypeError", "(", "'argument format must be ascii | binary'", ")", "filename", "=", "filename", ".", "strip", "(", ")", "if", "not", "filename", ":", "raise", "ValueError", "(", "'filename must be non-empty string'", ")", "if", "filename", "[", "-", "4", ":", "]", "!=", "'.vtk'", ":", "filename", "+=", "'.vtk'", "f", "=", "open", "(", "filename", ",", "'wb'", ")", "f", ".", "write", "(", "self", ".", "to_string", "(", "format", ")", ")", "f", ".", "close", "(", ")" ]
Resolve the current expression against the supplied value
def resolve ( self , value = None ) : # If we still have an uninitialized matcher init it now if self . matcher : self . _init_matcher ( ) # Evaluate the current set of matchers forming the expression matcher = self . evaluate ( ) try : value = self . _transform ( value ) self . _assertion ( matcher , value ) except AssertionError as ex : # By re-raising here the exception we reset the traceback raise ex finally : # Reset the state of the object so we can use it again if self . deferred : self . reset ( )
2,647
https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L81-L100
[ "def", "FromData", "(", "cls", ",", "stream", ",", "json_data", ",", "http", ",", "auto_transfer", "=", "None", ",", "gzip_encoded", "=", "False", ",", "*", "*", "kwds", ")", ":", "info", "=", "json", ".", "loads", "(", "json_data", ")", "missing_keys", "=", "cls", ".", "_REQUIRED_SERIALIZATION_KEYS", "-", "set", "(", "info", ".", "keys", "(", ")", ")", "if", "missing_keys", ":", "raise", "exceptions", ".", "InvalidDataError", "(", "'Invalid serialization data, missing keys: %s'", "%", "(", "', '", ".", "join", "(", "missing_keys", ")", ")", ")", "if", "'total_size'", "in", "kwds", ":", "raise", "exceptions", ".", "InvalidUserInputError", "(", "'Cannot override total_size on serialized Upload'", ")", "upload", "=", "cls", ".", "FromStream", "(", "stream", ",", "info", "[", "'mime_type'", "]", ",", "total_size", "=", "info", ".", "get", "(", "'total_size'", ")", ",", "gzip_encoded", "=", "gzip_encoded", ",", "*", "*", "kwds", ")", "if", "isinstance", "(", "stream", ",", "io", ".", "IOBase", ")", "and", "not", "stream", ".", "seekable", "(", ")", ":", "raise", "exceptions", ".", "InvalidUserInputError", "(", "'Cannot restart resumable upload on non-seekable stream'", ")", "if", "auto_transfer", "is", "not", "None", ":", "upload", ".", "auto_transfer", "=", "auto_transfer", "else", ":", "upload", ".", "auto_transfer", "=", "info", "[", "'auto_transfer'", "]", "upload", ".", "strategy", "=", "RESUMABLE_UPLOAD", "upload", ".", "_Initialize", "(", "# pylint: disable=protected-access", "http", ",", "info", "[", "'url'", "]", ")", "upload", ".", "RefreshResumableUploadState", "(", ")", "upload", ".", "EnsureInitialized", "(", ")", "if", "upload", ".", "auto_transfer", ":", "upload", ".", "StreamInChunks", "(", ")", "return", "upload" ]
Perform the actual assertion for the given matcher and value . Override this method to apply a special configuration when performing the assertion . If the assertion fails it should raise an AssertionError .
def _assertion ( self , matcher , value ) : # To support the syntax `any_of(subject) | should ...` we check if the # value to check is an Expectation object and if it is we use the descriptor # protocol to bind the value's assertion logic to this expectation. if isinstance ( value , Expectation ) : assertion = value . _assertion . __get__ ( self , Expectation ) assertion ( matcher , value . value ) else : hc . assert_that ( value , matcher )
2,648
https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L102-L114
[ "async", "def", "copy_to_table", "(", "self", ",", "table_name", ",", "*", ",", "source", ",", "columns", "=", "None", ",", "schema_name", "=", "None", ",", "timeout", "=", "None", ",", "format", "=", "None", ",", "oids", "=", "None", ",", "freeze", "=", "None", ",", "delimiter", "=", "None", ",", "null", "=", "None", ",", "header", "=", "None", ",", "quote", "=", "None", ",", "escape", "=", "None", ",", "force_quote", "=", "None", ",", "force_not_null", "=", "None", ",", "force_null", "=", "None", ",", "encoding", "=", "None", ")", ":", "tabname", "=", "utils", ".", "_quote_ident", "(", "table_name", ")", "if", "schema_name", ":", "tabname", "=", "utils", ".", "_quote_ident", "(", "schema_name", ")", "+", "'.'", "+", "tabname", "if", "columns", ":", "cols", "=", "'({})'", ".", "format", "(", "', '", ".", "join", "(", "utils", ".", "_quote_ident", "(", "c", ")", "for", "c", "in", "columns", ")", ")", "else", ":", "cols", "=", "''", "opts", "=", "self", ".", "_format_copy_opts", "(", "format", "=", "format", ",", "oids", "=", "oids", ",", "freeze", "=", "freeze", ",", "delimiter", "=", "delimiter", ",", "null", "=", "null", ",", "header", "=", "header", ",", "quote", "=", "quote", ",", "escape", "=", "escape", ",", "force_not_null", "=", "force_not_null", ",", "force_null", "=", "force_null", ",", "encoding", "=", "encoding", ")", "copy_stmt", "=", "'COPY {tab}{cols} FROM STDIN {opts}'", ".", "format", "(", "tab", "=", "tabname", ",", "cols", "=", "cols", ",", "opts", "=", "opts", ")", "return", "await", "self", ".", "_copy_in", "(", "copy_stmt", ",", "source", ",", "timeout", ")" ]
Applies any defined transformation to the given value
def _transform ( self , value ) : if self . transform : try : value = self . transform ( value ) except : import sys exc_type , exc_obj , exc_tb = sys . exc_info ( ) raise AssertionError ( 'Error applying transformation <{0}>: {2}: {3}' . format ( self . transform . __name__ , value , exc_type . __name__ , exc_obj ) ) return value
2,649
https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L116-L128
[ "def", "write", "(", "self", ")", ":", "data", "=", "[", "struct", ".", "pack", "(", "\"<4sBBqIIi\"", ",", "b\"OggS\"", ",", "self", ".", "version", ",", "self", ".", "__type_flags", ",", "self", ".", "position", ",", "self", ".", "serial", ",", "self", ".", "sequence", ",", "0", ")", "]", "lacing_data", "=", "[", "]", "for", "datum", "in", "self", ".", "packets", ":", "quot", ",", "rem", "=", "divmod", "(", "len", "(", "datum", ")", ",", "255", ")", "lacing_data", ".", "append", "(", "b\"\\xff\"", "*", "quot", "+", "chr_", "(", "rem", ")", ")", "lacing_data", "=", "b\"\"", ".", "join", "(", "lacing_data", ")", "if", "not", "self", ".", "complete", "and", "lacing_data", ".", "endswith", "(", "b\"\\x00\"", ")", ":", "lacing_data", "=", "lacing_data", "[", ":", "-", "1", "]", "data", ".", "append", "(", "chr_", "(", "len", "(", "lacing_data", ")", ")", ")", "data", ".", "append", "(", "lacing_data", ")", "data", ".", "extend", "(", "self", ".", "packets", ")", "data", "=", "b\"\"", ".", "join", "(", "data", ")", "# Python's CRC is swapped relative to Ogg's needs.", "# crc32 returns uint prior to py2.6 on some platforms, so force uint", "crc", "=", "(", "~", "zlib", ".", "crc32", "(", "data", ".", "translate", "(", "cdata", ".", "bitswap", ")", ",", "-", "1", ")", ")", "&", "0xffffffff", "# Although we're using to_uint_be, this actually makes the CRC", "# a proper le integer, since Python's CRC is byteswapped.", "crc", "=", "cdata", ".", "to_uint_be", "(", "crc", ")", ".", "translate", "(", "cdata", ".", "bitswap", ")", "data", "=", "data", "[", ":", "22", "]", "+", "crc", "+", "data", "[", "26", ":", "]", "return", "data" ]
Converts the current expression into a single matcher applying coordination operators to operands according to their binding rules
def evaluate ( self ) : # Apply Shunting Yard algorithm to convert the infix expression # into Reverse Polish Notation. Since we have a very limited # set of operators and binding rules, the implementation becomes # really simple. The expression is formed of hamcrest matcher instances # and operators identifiers (ints). ops = [ ] rpn = [ ] for token in self . expr : if isinstance ( token , int ) : while len ( ops ) and token <= ops [ - 1 ] : rpn . append ( ops . pop ( ) ) ops . append ( token ) else : rpn . append ( token ) # Append the remaining operators while len ( ops ) : rpn . append ( ops . pop ( ) ) # Walk the RPN expression to create AllOf/AnyOf matchers stack = [ ] for token in rpn : if isinstance ( token , int ) : # Handle the NOT case in a special way since it's unary if token == OPERATOR . NOT : stack [ - 1 ] = IsNot ( stack [ - 1 ] ) continue # Our operators always need two operands if len ( stack ) < 2 : raise RuntimeError ( 'Unable to build a valid expression. Not enough operands available.' ) # Check what kind of matcher we need to create if token == OPERATOR . OR : matcher = hc . any_of ( * stack [ - 2 : ] ) else : # AND, BUT matcher = hc . all_of ( * stack [ - 2 : ] ) stack [ - 2 : ] = [ matcher ] else : stack . append ( token ) if len ( stack ) != 1 : raise RuntimeError ( 'Unable to build a valid expression. The RPN stack should have just one item.' ) matcher = stack . pop ( ) # If a description has been given include it in the matcher if self . description : matcher = hc . described_as ( self . description , matcher ) return matcher
2,650
https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L130-L186
[ "def", "get_longest_table", "(", "url", "=", "'https://www.openoffice.org/dev_docs/source/file_extensions.html'", ",", "header", "=", "0", ")", ":", "dfs", "=", "pd", ".", "read_html", "(", "url", ",", "header", "=", "header", ")", "return", "longest_table", "(", "dfs", ")" ]
Finds a matcher based on the given alias or raises an error if no matcher could be found .
def _find_matcher ( self , alias ) : matcher = lookup ( alias ) if not matcher : msg = 'Matcher "%s" not found' % alias # Try to find similarly named matchers to help the user similar = suggest ( alias , max = 3 , cutoff = 0.5 ) if len ( similar ) > 1 : last = similar . pop ( ) msg += '. Perhaps you meant to use %s or %s?' % ( ', ' . join ( similar ) , last ) elif len ( similar ) > 0 : msg += '. Perhaps you meant to use %s?' % similar . pop ( ) raise KeyError ( msg ) return matcher
2,651
https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L188-L206
[ "def", "end_headers", "(", "self", ")", ":", "if", "self", ".", "request_version", "!=", "'HTTP/0.9'", ":", "self", ".", "_headers_buffer", ".", "append", "(", "b\"\\r\\n\"", ")", "self", ".", "flush_headers", "(", ")" ]
Executes the current matcher appending it to the expression
def _init_matcher ( self , * args , * * kwargs ) : # If subject-less expectation are provided as arguments convert them # to plain Hamcrest matchers in order to allow complex compositions fn = lambda x : x . evaluate ( ) if isinstance ( x , Expectation ) else x args = [ fn ( x ) for x in args ] kwargs = dict ( ( k , fn ( v ) ) for k , v in kwargs . items ( ) ) matcher = self . matcher ( * args , * * kwargs ) self . expr . append ( matcher ) self . matcher = None return matcher
2,652
https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L208-L220
[ "def", "update_syslog", "(", "self", ",", "service_id", ",", "version_number", ",", "name_key", ",", "*", "*", "kwargs", ")", ":", "body", "=", "self", ".", "_formdata", "(", "kwargs", ",", "FastlySyslog", ".", "FIELDS", ")", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/syslog/%s\"", "%", "(", "service_id", ",", "version_number", ",", "name_key", ")", ",", "method", "=", "\"PUT\"", ",", "body", "=", "body", ")", "return", "FastlySyslog", "(", "self", ",", "content", ")" ]
Specify a custom message for the matcher
def described_as ( self , description , * args ) : if len ( args ) : description = description . format ( * args ) self . description = description return self
2,653
https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L222-L227
[ "def", "_serve_forever_wrapper", "(", "self", ",", "_srv", ",", "poll_interval", "=", "0.1", ")", ":", "self", ".", "logger", ".", "info", "(", "'Opening tunnel: {0} <> {1}'", ".", "format", "(", "address_to_str", "(", "_srv", ".", "local_address", ")", ",", "address_to_str", "(", "_srv", ".", "remote_address", ")", ")", ")", "_srv", ".", "serve_forever", "(", "poll_interval", ")", "# blocks until finished", "self", ".", "logger", ".", "info", "(", "'Tunnel: {0} <> {1} released'", ".", "format", "(", "address_to_str", "(", "_srv", ".", "local_address", ")", ",", "address_to_str", "(", "_srv", ".", "remote_address", ")", ")", ")" ]
Returns a mapnik PostGIS or SQLite Datasource .
def make_dbsource ( * * kwargs ) : if 'spatialite' in connection . settings_dict . get ( 'ENGINE' ) : kwargs . setdefault ( 'file' , connection . settings_dict [ 'NAME' ] ) return mapnik . SQLite ( wkb_format = 'spatialite' , * * kwargs ) names = ( ( 'dbname' , 'NAME' ) , ( 'user' , 'USER' ) , ( 'password' , 'PASSWORD' ) , ( 'host' , 'HOST' ) , ( 'port' , 'PORT' ) ) for mopt , dopt in names : val = connection . settings_dict . get ( dopt ) if val : kwargs . setdefault ( mopt , val ) return mapnik . PostGIS ( * * kwargs )
2,654
https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/carto.py#L10-L21
[ "def", "acme_renew_certificates", "(", ")", ":", "for", "csr", "in", "glob", "(", "os", ".", "path", ".", "join", "(", "CERTIFICATES_PATH", ",", "'*.csr'", ")", ")", ":", "common_name", "=", "os", ".", "path", ".", "basename", "(", "csr", ")", "common_name", "=", "os", ".", "path", ".", "splitext", "(", "common_name", ")", "[", "0", "]", "certificate_path", "=", "\"{}.crt\"", ".", "format", "(", "common_name", ")", "certificate_path", "=", "os", ".", "path", ".", "join", "(", "CERTIFICATES_PATH", ",", "certificate_path", ")", "with", "open", "(", "certificate_path", ")", "as", "file", ":", "crt", "=", "OpenSSL", ".", "crypto", ".", "load_certificate", "(", "OpenSSL", ".", "crypto", ".", "FILETYPE_PEM", ",", "file", ".", "read", "(", ")", ")", "expiration", "=", "crt", ".", "get_notAfter", "(", ")", "expiration", "=", "_parse_asn1_generalized_date", "(", "expiration", ")", "remaining", "=", "expiration", "-", "datetime", ".", "utcnow", "(", ")", "if", "remaining", ">", "timedelta", "(", "days", "=", "30", ")", ":", "print", "\"No need to renew {} ({})\"", ".", "format", "(", "certificate_path", ",", "remaining", ")", "continue", "print", "\"Renewing {} ({})\"", ".", "format", "(", "certificate_path", ",", "remaining", ")", "certificate_request_path", "=", "\"{}.csr\"", ".", "format", "(", "common_name", ")", "certificate_request_path", "=", "os", ".", "path", ".", "join", "(", "CERTIFICATES_PATH", ",", "certificate_request_path", ")", "signed_cert", "=", "\"{}-signed.crt\"", ".", "format", "(", "common_name", ")", "signed_cert", "=", "os", ".", "path", ".", "join", "(", "CERTIFICATES_PATH", ",", "signed_cert", ")", "_internal_sign_certificate", "(", "certificate_path", ",", "certificate_request_path", ",", "signed_cert", ")" ]
Returns a map Layer .
def layer ( self , queryset , stylename = None ) : cls = RasterLayer if hasattr ( queryset , 'image' ) else VectorLayer layer = cls ( queryset , style = stylename ) try : style = self . map . find_style ( layer . stylename ) except KeyError : self . map . append_style ( layer . stylename , layer . style ( ) ) layer . styles . append ( layer . stylename ) self . map . layers . append ( layer . _layer ) return layer
2,655
https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/carto.py#L56-L72
[ "def", "loadInternalSheet", "(", "klass", ",", "p", ",", "*", "*", "kwargs", ")", ":", "vs", "=", "klass", "(", "p", ".", "name", ",", "source", "=", "p", ",", "*", "*", "kwargs", ")", "options", ".", "_set", "(", "'encoding'", ",", "'utf8'", ",", "vs", ")", "if", "p", ".", "exists", "(", ")", ":", "vd", ".", "sheets", ".", "insert", "(", "0", ",", "vs", ")", "vs", ".", "reload", ".", "__wrapped__", "(", "vs", ")", "vd", ".", "sheets", ".", "pop", "(", "0", ")", "return", "vs" ]
Zoom map to geometry extent .
def zoom_bbox ( self , bbox ) : try : bbox . transform ( self . map . srs ) except gdal . GDALException : pass else : self . map . zoom_to_box ( mapnik . Box2d ( * bbox . extent ) )
2,656
https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/carto.py#L79-L90
[ "def", "_get_purecn_dx_files", "(", "paired", ",", "out", ")", ":", "out_base", "=", "\"%s-dx\"", "%", "utils", ".", "splitext_plus", "(", "out", "[", "\"rds\"", "]", ")", "[", "0", "]", "all_files", "=", "[", "]", "for", "key", ",", "ext", "in", "[", "[", "(", "\"mutation_burden\"", ",", ")", ",", "\"_mutation_burden.csv\"", "]", ",", "[", "(", "\"plot\"", ",", "\"signatures\"", ")", ",", "\"_signatures.pdf\"", "]", ",", "[", "(", "\"signatures\"", ",", ")", ",", "\"_signatures.csv\"", "]", "]", ":", "cur_file", "=", "\"%s%s\"", "%", "(", "out_base", ",", "ext", ")", "out", "=", "tz", ".", "update_in", "(", "out", ",", "key", ",", "lambda", "x", ":", "cur_file", ")", "all_files", ".", "append", "(", "os", ".", "path", ".", "basename", "(", "cur_file", ")", ")", "return", "out_base", ",", "out", ",", "all_files" ]
Returns a default Style .
def style ( self ) : style = mapnik . Style ( ) rule = mapnik . Rule ( ) self . _symbolizer = self . symbolizer ( ) rule . symbols . append ( self . _symbolizer ) style . rules . append ( rule ) return style
2,657
https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/carto.py#L99-L106
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
A wrapper for run_fusion using the results from cutadapt and star as input .
def wrap_fusion ( job , fastqs , star_output , univ_options , star_fusion_options , fusion_inspector_options ) : # Give user option to skip fusion calling if not star_fusion_options [ 'run' ] : job . fileStore . logToMaster ( 'Skipping STAR-Fusion on %s' % univ_options [ 'patient' ] ) return fusion = job . wrapJobFn ( run_fusion , fastqs , star_output [ 'rnaChimeric.out.junction' ] , univ_options , star_fusion_options , fusion_inspector_options , cores = star_fusion_options [ 'n' ] , memory = PromisedRequirement ( lambda x : int ( 1.85 * x . size ) , star_fusion_options [ 'index' ] ) , disk = PromisedRequirement ( fusion_disk , fastqs , star_fusion_options [ 'index' ] ) ) . encapsulate ( ) job . addChild ( fusion ) return fusion . rv ( )
2,658
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/fusion.py#L38-L70
[ "def", "verify", "(", "self", ")", ":", "try", ":", "if", "self", ".", "timestamp_mismatch", "is", "not", "None", ":", "m", "=", "_iso8601_timestamp_regex", ".", "match", "(", "self", ".", "request_timestamp", ")", "year", "=", "int", "(", "m", ".", "group", "(", "\"year\"", ")", ")", "month", "=", "int", "(", "m", ".", "group", "(", "\"month\"", ")", ")", "day", "=", "int", "(", "m", ".", "group", "(", "\"day\"", ")", ")", "hour", "=", "int", "(", "m", ".", "group", "(", "\"hour\"", ")", ")", "minute", "=", "int", "(", "m", ".", "group", "(", "\"minute\"", ")", ")", "second", "=", "int", "(", "m", ".", "group", "(", "\"second\"", ")", ")", "req_ts", "=", "datetime", "(", "year", ",", "month", ",", "day", ",", "hour", ",", "minute", ",", "second", ")", "now", "=", "datetime", ".", "utcnow", "(", ")", "if", "abs", "(", "req_ts", "-", "now", ")", ">", "timedelta", "(", "0", ",", "self", ".", "timestamp_mismatch", ")", ":", "raise", "InvalidSignatureError", "(", "\"Timestamp mismatch\"", ")", "if", "self", ".", "expected_signature", "!=", "self", ".", "request_signature", ":", "raise", "InvalidSignatureError", "(", "\"Signature mismatch: expected %r, got %r\"", "%", "(", "self", ".", "expected_signature", ",", "self", ".", "request_signature", ")", ")", "except", "(", "AttributeError", ",", "KeyError", ",", "ValueError", ")", "as", "e", ":", "raise", "InvalidSignatureError", "(", "str", "(", "e", ")", ")", "return", "True" ]
Parses STAR - Fusion format and returns an Expando object with basic features
def parse_star_fusion ( infile ) : reader = csv . reader ( infile , delimiter = '\t' ) header = reader . next ( ) header = { key : index for index , key in enumerate ( header ) } features = [ 'LeftGene' , 'LeftLocalBreakpoint' , 'LeftBreakpoint' , 'RightGene' , 'RightLocalBreakpoint' , 'RightBreakpoint' , 'LargeAnchorSupport' , 'JunctionReadCount' , 'SpanningFragCount' ] for line in reader : yield Expando ( dict ( ( feature , line [ header [ feature ] ] ) for feature in features ) )
2,659
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/fusion.py#L235-L252
[ "async", "def", "invoke", "(", "self", ",", "context", ")", ":", "try", ":", "# Note: claim_work(...) might not be safely interruptible! See", "# https://bugzilla.mozilla.org/show_bug.cgi?id=1524069", "tasks", "=", "await", "self", ".", "_run_cancellable", "(", "claim_work", "(", "context", ")", ")", "if", "not", "tasks", "or", "not", "tasks", ".", "get", "(", "'tasks'", ",", "[", "]", ")", ":", "await", "self", ".", "_run_cancellable", "(", "asyncio", ".", "sleep", "(", "context", ".", "config", "[", "'poll_interval'", "]", ")", ")", "return", "None", "# Assume only a single task, but should more than one fall through,", "# run them sequentially. A side effect is our return status will", "# be the status of the final task run.", "status", "=", "None", "for", "task_defn", "in", "tasks", ".", "get", "(", "'tasks'", ",", "[", "]", ")", ":", "prepare_to_run_task", "(", "context", ",", "task_defn", ")", "reclaim_fut", "=", "context", ".", "event_loop", ".", "create_task", "(", "reclaim_task", "(", "context", ",", "context", ".", "task", ")", ")", "try", ":", "status", "=", "await", "do_run_task", "(", "context", ",", "self", ".", "_run_cancellable", ",", "self", ".", "_to_cancellable_process", ")", "artifacts_paths", "=", "filepaths_in_dir", "(", "context", ".", "config", "[", "'artifact_dir'", "]", ")", "except", "WorkerShutdownDuringTask", ":", "shutdown_artifact_paths", "=", "[", "os", ".", "path", ".", "join", "(", "'public'", ",", "'logs'", ",", "log_file", ")", "for", "log_file", "in", "[", "'chain_of_trust.log'", ",", "'live_backing.log'", "]", "]", "artifacts_paths", "=", "[", "path", "for", "path", "in", "shutdown_artifact_paths", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "context", ".", "config", "[", "'artifact_dir'", "]", ",", "path", ")", ")", "]", "status", "=", "STATUSES", "[", "'worker-shutdown'", "]", "status", "=", "worst_level", "(", "status", ",", "await", "do_upload", "(", "context", ",", "artifacts_paths", ")", ")", "await", "complete_task", "(", "context", ",", "status", ")", "reclaim_fut", ".", "cancel", "(", ")", "cleanup", "(", "context", ")", "return", "status", "except", "asyncio", ".", "CancelledError", ":", "return", "None" ]
Parses FusionInspector transcript file and returns dictionary of sequences
def get_transcripts ( transcript_file ) : with open ( transcript_file , 'r' ) as fa : transcripts = { } regex_s = r"(?P<ID>TRINITY.*)\s(?P<fusion>.*--.*):(?P<left_start>\d+)-(?P<right_start>\d+)" regex = re . compile ( regex_s ) while True : # Usually the transcript is on one line try : info = fa . next ( ) seq = fa . next ( ) assert info . startswith ( '>' ) m = regex . search ( info ) if m : transcripts [ m . group ( 'ID' ) ] = seq . strip ( ) except StopIteration : break except AssertionError : print ( "WARNING: Malformed fusion transcript file" ) return transcripts
2,660
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/fusion.py#L255-L284
[ "def", "clear_stalled_files", "(", "self", ")", ":", "# FIXME: put lock in directory?", "CLEAR_AFTER", "=", "self", ".", "config", "[", "\"DELETE_STALLED_AFTER\"", "]", "minimum_age", "=", "time", ".", "time", "(", ")", "-", "CLEAR_AFTER", "for", "user_dir", "in", "self", ".", "UPLOAD_DIR", ".", "iterdir", "(", ")", ":", "if", "not", "user_dir", ".", "is_dir", "(", ")", ":", "logger", ".", "error", "(", "\"Found non-directory in upload dir: %r\"", ",", "bytes", "(", "user_dir", ")", ")", "continue", "for", "content", "in", "user_dir", ".", "iterdir", "(", ")", ":", "if", "not", "content", ".", "is_file", "(", ")", ":", "logger", ".", "error", "(", "\"Found non-file in user upload dir: %r\"", ",", "bytes", "(", "content", ")", ")", "continue", "if", "content", ".", "stat", "(", ")", ".", "st_ctime", "<", "minimum_age", ":", "content", ".", "unlink", "(", ")" ]
Finds the breakpoint in the fusion transcript and splits the 5 donor from the 3 acceptor
def split_fusion_transcript ( annotation_path , transcripts ) : annotation = collections . defaultdict ( dict ) forward = 'ACGTN' reverse = 'TGCAN' trans = string . maketrans ( forward , reverse ) # Pull in assembled transcript annotation five_pr_splits = collections . defaultdict ( dict ) three_pr_splits = collections . defaultdict ( dict ) regex = re . compile ( r'ID=(?P<ID>.*);Name=(?P<Name>.*);Target=(?P<Target>.*)\s(?P<start>\d+)\s(?P<stop>\d+)' ) with open ( annotation_path , 'r' ) as gff : for line in gff : print ( line ) if line . startswith ( '#' ) : _ , eyd , fusion = line . strip ( ) . split ( ) fusion , start_stop = fusion . split ( ':' ) left_break , right_break = start_stop . split ( '-' ) annotation [ fusion ] [ eyd ] = { } annotation [ fusion ] [ eyd ] [ 'left_break' ] = left_break annotation [ fusion ] [ eyd ] [ 'right_break' ] = right_break else : line = line . strip ( ) . split ( '\t' ) fusion = line [ 0 ] strand = line [ 6 ] block_start = line [ 3 ] block_stop = line [ 4 ] attr = line [ 8 ] m = regex . search ( attr ) if m : transcript_id = m . group ( 'Name' ) rb = any ( [ block_start == annotation [ fusion ] [ transcript_id ] [ 'right_break' ] , block_stop == annotation [ fusion ] [ transcript_id ] [ 'right_break' ] ] ) lb = any ( [ block_start == annotation [ fusion ] [ transcript_id ] [ 'left_break' ] , block_stop == annotation [ fusion ] [ transcript_id ] [ 'left_break' ] ] ) if strand == '-' and rb : transcript_split = int ( m . group ( 'stop' ) ) + 1 # Off by one # Take the reverse complement to orient transcripts from 5' to 3' five_seq = transcripts [ transcript_id ] [ transcript_split : ] five_pr_splits [ fusion ] [ transcript_id ] = five_seq . translate ( trans ) [ : : - 1 ] three_seq = transcripts [ transcript_id ] [ : transcript_split ] three_pr_splits [ fusion ] [ transcript_id ] = three_seq . translate ( trans ) [ : : - 1 ] elif strand == '+' and lb : transcript_split = int ( m . group ( 'stop' ) ) s1 = transcripts [ transcript_id ] [ : transcript_split ] five_pr_splits [ fusion ] [ transcript_id ] = s1 s2 = transcripts [ transcript_id ] [ transcript_split : ] three_pr_splits [ fusion ] [ transcript_id ] = s2 return five_pr_splits , three_pr_splits
2,661
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/fusion.py#L287-L352
[ "def", "parse_registries", "(", "filesystem", ",", "registries", ")", ":", "results", "=", "{", "}", "for", "path", "in", "registries", ":", "with", "NamedTemporaryFile", "(", "buffering", "=", "0", ")", "as", "tempfile", ":", "filesystem", ".", "download", "(", "path", ",", "tempfile", ".", "name", ")", "registry", "=", "RegistryHive", "(", "tempfile", ".", "name", ")", "registry", ".", "rootkey", "=", "registry_root", "(", "path", ")", "results", ".", "update", "(", "{", "k", ".", "path", ":", "(", "k", ".", "timestamp", ",", "k", ".", "values", ")", "for", "k", "in", "registry", ".", "keys", "(", ")", "}", ")", "return", "results" ]
Parses FusionInspector bed file to ascertain the ENSEMBL gene ids
def get_gene_ids ( fusion_bed ) : with open ( fusion_bed , 'r' ) as f : gene_to_id = { } regex = re . compile ( r'(?P<gene>ENSG\d*)' ) for line in f : line = line . split ( '\t' ) transcript , gene_bit , name = line [ 3 ] . split ( ';' ) m = regex . search ( gene_bit ) if m : gene_to_id [ name ] = m . group ( 'gene' ) return gene_to_id
2,662
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/fusion.py#L355-L371
[ "def", "pull_session", "(", "session_id", "=", "None", ",", "url", "=", "'default'", ",", "io_loop", "=", "None", ",", "arguments", "=", "None", ")", ":", "coords", "=", "_SessionCoordinates", "(", "session_id", "=", "session_id", ",", "url", "=", "url", ")", "session", "=", "ClientSession", "(", "session_id", "=", "session_id", ",", "websocket_url", "=", "websocket_url_for_server_url", "(", "coords", ".", "url", ")", ",", "io_loop", "=", "io_loop", ",", "arguments", "=", "arguments", ")", "session", ".", "pull", "(", ")", "return", "session" ]
Writes STAR - Fusion results in Transgene BEDPE format
def reformat_star_fusion_output ( job , fusion_annot , fusion_file , transcript_file , transcript_gff_file , univ_options ) : input_files = { 'results.tsv' : fusion_file , 'fusion.bed' : fusion_annot } if transcript_file and transcript_gff_file : input_files [ 'transcripts.fa' ] = transcript_file input_files [ 'transcripts.gff' ] = transcript_gff_file work_dir = job . fileStore . getLocalTempDir ( ) input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) # Pull in assembled transcript file hugo_to_gene_ids = get_gene_ids ( input_files [ 'fusion.bed' ] ) if transcript_file and transcript_gff_file : transcripts = get_transcripts ( input_files [ 'transcripts.fa' ] ) five_pr_splits , three_pr_splits = split_fusion_transcript ( input_files [ 'transcripts.gff' ] , transcripts ) else : five_pr_splits = collections . defaultdict ( dict ) three_pr_splits = collections . defaultdict ( dict ) # Pull in assembled transcript annotation # Header for BEDPE file header = [ '# chr1' , 'start1' , 'end1' , 'chr2' , 'start2' , 'end2' , 'name' , 'score' , 'strand1' , 'strand2' , 'junctionSeq1' , 'junctionSeq2' , 'hugo1' , 'hugo2' ] output_path = os . path . join ( work_dir , 'fusion_results.bedpe' ) with open ( input_files [ 'results.tsv' ] , 'r' ) as in_f , open ( output_path , 'w' ) as out_f : writer = csv . writer ( out_f , delimiter = '\t' ) writer . writerow ( header ) for record in parse_star_fusion ( in_f ) : left_chr , left_break , left_strand = record . LeftBreakpoint . split ( ':' ) right_chr , right_break , right_strand = record . RightBreakpoint . split ( ':' ) fusion = '' . join ( [ record . LeftGene , '--' , record . RightGene ] ) name = '-' . join ( [ hugo_to_gene_ids [ record . LeftGene ] , hugo_to_gene_ids [ record . RightGene ] ] ) score = 'Junction:%s-Spanning:%s' % ( record . JunctionReadCount , record . SpanningFragCount ) # Add empty sequences in case Trinity doesn't output one if len ( five_pr_splits [ fusion ] . keys ( ) ) == 0 : five_pr_splits [ fusion ] [ 'N/A' ] = '.' if len ( three_pr_splits [ fusion ] . keys ( ) ) == 0 : three_pr_splits [ fusion ] [ 'N/A' ] = '.' for transcript_id in five_pr_splits [ fusion ] . keys ( ) : five_prime_seq = five_pr_splits [ fusion ] [ transcript_id ] three_prime_seq = three_pr_splits [ fusion ] [ transcript_id ] writer . writerow ( [ left_chr , '.' , # Donor start position is not necessary left_break , right_chr , right_break , '.' , # Acceptor end position is not necessary name , score , left_strand , right_strand , five_prime_seq , three_prime_seq , record . LeftGene , record . RightGene ] ) bedpe_id = job . fileStore . writeGlobalFile ( output_path ) export_results ( job , bedpe_id , 'fusion.bedpe' , univ_options , subfolder = 'mutations/fusions' ) job . fileStore . logToMaster ( 'Reformatted STAR-Fusion output for %s successfully' % univ_options [ 'patient' ] ) return bedpe_id
2,663
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/fusion.py#L374-L467
[ "def", "reset_cooldown", "(", "self", ",", "ctx", ")", ":", "if", "self", ".", "_buckets", ".", "valid", ":", "bucket", "=", "self", ".", "_buckets", ".", "get_bucket", "(", "ctx", ".", "message", ")", "bucket", ".", "reset", "(", ")" ]
Ensure that the provided entries for the patient groups is formatted properly .
def _ensure_patient_group_is_ok ( patient_object , patient_name = None ) : from protect . addons . common import TCGAToGTEx assert isinstance ( patient_object , ( set , dict ) ) , '%s,%s' % ( patient_object , patient_name ) # set(dict) = set of keys of the dict test_set = set ( patient_object ) if 'tumor_type' not in patient_object : raise ParameterError ( ( 'The patient entry for sample %s ' % patient_name ) + 'does not contain a Tumor type.' ) elif patient_object [ 'tumor_type' ] not in TCGAToGTEx : raise ParameterError ( ( 'The patient entry for sample %s ' % patient_name ) + 'does contains an invalid Tumor type. Please use one of the ' 'valid TCGA tumor types.' ) if { 'tumor_dna_fastq_1' , 'normal_dna_fastq_1' , 'tumor_rna_fastq_1' } . issubset ( test_set ) : # Best case scenario, we get all fastqs pass else : # We have less than 3 fastqs so we have to have a haplotype. if 'hla_haplotype_files' not in test_set : raise ParameterError ( ( 'The patient entry for sample %s ' % patient_name ) + 'does not contain a hla_haplotype_files entry.\nCannot haplotype ' 'patient if all the input sequence files are not fastqs.' ) # Either we have a fastq and/or bam for the tumor and normal, or we need to be given a vcf if ( ( { re . search ( 'tumor_dna_((bam)|(fastq_1)).*' , x ) for x in test_set } == { None } or { re . search ( 'normal_dna_((bam)|(fastq_1)).*' , x ) for x in test_set } == { None } ) and ( 'mutation_vcf' not in test_set and 'fusion_bedpe' not in test_set ) ) : raise ParameterError ( ( 'The patient entry for sample %s ' % patient_name ) + 'does not contain a mutation_vcf or fusion_bedpe entry. If both ' 'tumor and normal DNA sequences (fastqs or bam) are not provided, ' 'a pre-computed vcf and/or bedpe must be provided.' ) # We have to be given a tumor rna fastq or bam unless we are processing ONLY fusions if { re . search ( 'tumor_rna_((bam)|(fastq_1)).*' , x ) for x in test_set } == { None } : if 'mutation_vcf' not in test_set and 'fusion_bedpe' in test_set : # The only case where it is ok to not have the genome mapped rna. pass else : raise ParameterError ( ( 'The patient entry for sample %s ' % patient_name ) + 'does not contain a tumor rna sequence data entry. We require ' 'either tumor_rna_fastq_1 or tumor_rna_bam.' ) # If we are given an RNA bam then it needs to have a corresponding transcriptome bam unless # we have also been provided expression values. if 'tumor_rna_bam' in test_set and 'tumor_rna_transcriptome_bam' not in test_set : if 'expression_files' not in test_set : raise ParameterError ( ( 'The patient entry for sample %s ' % patient_name + 'was provided a tumor rna bam with sequences mapped to the ' 'genome but was not provided a matching rna bam for the ' 'transcriptome or a tar containing expression values. ' 'We require either a matching transcriptome bam to estimate' 'expression, or the precomputed expression values.' ) )
2,664
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L94-L148
[ "def", "SetConsoleTextAttribute", "(", "stream_id", ",", "attrs", ")", ":", "handle", "=", "handles", "[", "stream_id", "]", "return", "windll", ".", "kernel32", ".", "SetConsoleTextAttribute", "(", "handle", ",", "attrs", ")" ]
Add the entries in defaults dict into input_dict if they don t exist in input_dict
def _add_default_entries ( input_dict , defaults_dict ) : for key , value in defaults_dict . iteritems ( ) : if key == 'patients' : print ( 'Cannot default `patients`.' ) continue if isinstance ( value , dict ) : if key not in input_dict or input_dict [ key ] is None : # User didn't specify anython for the tool, but the entry was still in there so we # just copy over the whole defaults dict input_dict [ key ] = value else : r = _add_default_entries ( input_dict . get ( key , { } ) , value ) input_dict [ key ] = r else : # Only write if not in input_dict if key not in input_dict or input_dict [ key ] is None : # Either the user didn't have the entry, or had it without a value input_dict [ key ] = value return input_dict
2,665
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L151-L180
[ "def", "_orthogonalize", "(", "X", ")", ":", "if", "X", ".", "size", "==", "X", ".", "shape", "[", "0", "]", ":", "return", "X", "from", "scipy", ".", "linalg", "import", "pinv", ",", "norm", "for", "i", "in", "range", "(", "1", ",", "X", ".", "shape", "[", "1", "]", ")", ":", "X", "[", ":", ",", "i", "]", "-=", "np", ".", "dot", "(", "np", ".", "dot", "(", "X", "[", ":", ",", "i", "]", ",", "X", "[", ":", ",", ":", "i", "]", ")", ",", "pinv", "(", "X", "[", ":", ",", ":", "i", "]", ")", ")", "# X[:, i] /= norm(X[:, i])", "return", "X" ]
Process one group from the input yaml . Ensure it has the required entries . If there is a subgroup that should be processed and then appended to the rest of the subgroups in that group handle it accordingly .
def _process_group ( input_group , required_group , groupname , append_subgroups = None ) : if append_subgroups is None : append_subgroups = [ ] tool_options = { } for key in input_group : _ensure_set_contains ( input_group [ key ] , required_group . get ( key , { } ) , groupname + '::' + key ) if key in append_subgroups : continue else : tool_options [ key ] = input_group [ key ] for key in input_group : if key in append_subgroups : continue else : for yek in append_subgroups : tool_options [ key ] . update ( input_group [ yek ] ) return tool_options
2,666
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L183-L211
[ "def", "clear_cache", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "clear_cache", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
For a path to a fastq_1 file return a fastq_2 file with the same prefix and naming scheme .
def get_fastq_2 ( job , patient_id , sample_type , fastq_1 ) : prefix , extn = fastq_1 , 'temp' final_extn = '' while extn : prefix , extn = os . path . splitext ( prefix ) final_extn = extn + final_extn if prefix . endswith ( '1' ) : prefix = prefix [ : - 1 ] job . fileStore . logToMaster ( '"%s" prefix for "%s" determined to be %s' % ( sample_type , patient_id , prefix ) ) break else : raise ParameterError ( 'Could not determine prefix from provided fastq (%s). Is it ' 'of the form <fastq_prefix>1.[fq/fastq][.gz]?' % fastq_1 ) if final_extn not in [ '.fastq' , '.fastq.gz' , '.fq' , '.fq.gz' ] : raise ParameterError ( 'If and _2 fastq path is not specified, only .fastq, .fq or ' 'their gzippped extensions are accepted. Could not process ' '%s:%s.' % ( patient_id , sample_type + '_fastq_1' ) ) return '' . join ( [ prefix , '2' , final_extn ] )
2,667
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L214-L241
[ "def", "eval", "(", "self", ",", "packet", ")", ":", "result", "=", "None", "terms", "=", "None", "if", "self", ".", "_when", "is", "None", "or", "self", ".", "_when", ".", "eval", "(", "packet", ")", ":", "result", "=", "self", ".", "_equation", ".", "eval", "(", "packet", ")", "return", "result" ]
Parse the config file and spawn a ProTECT job for every input sample .
def parse_config_file ( job , config_file , max_cores = None ) : sample_set , univ_options , processed_tool_inputs = _parse_config_file ( job , config_file , max_cores ) # Start a job for each sample in the sample set for patient_id in sample_set . keys ( ) : job . addFollowOnJobFn ( launch_protect , sample_set [ patient_id ] , univ_options , processed_tool_inputs ) return None
2,668
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L436-L449
[ "def", "filter_on_demands", "(", "ava", ",", "required", "=", "None", ",", "optional", "=", "None", ")", ":", "# Is all what's required there:", "if", "required", "is", "None", ":", "required", "=", "{", "}", "lava", "=", "dict", "(", "[", "(", "k", ".", "lower", "(", ")", ",", "k", ")", "for", "k", "in", "ava", ".", "keys", "(", ")", "]", ")", "for", "attr", ",", "vals", "in", "required", ".", "items", "(", ")", ":", "attr", "=", "attr", ".", "lower", "(", ")", "if", "attr", "in", "lava", ":", "if", "vals", ":", "for", "val", "in", "vals", ":", "if", "val", "not", "in", "ava", "[", "lava", "[", "attr", "]", "]", ":", "raise", "MissingValue", "(", "\"Required attribute value missing: %s,%s\"", "%", "(", "attr", ",", "val", ")", ")", "else", ":", "raise", "MissingValue", "(", "\"Required attribute missing: %s\"", "%", "(", "attr", ",", ")", ")", "if", "optional", "is", "None", ":", "optional", "=", "{", "}", "oka", "=", "[", "k", ".", "lower", "(", ")", "for", "k", "in", "required", ".", "keys", "(", ")", "]", "oka", ".", "extend", "(", "[", "k", ".", "lower", "(", ")", "for", "k", "in", "optional", ".", "keys", "(", ")", "]", ")", "# OK, so I can imaging releasing values that are not absolutely necessary", "# but not attributes that are not asked for.", "for", "attr", "in", "lava", ".", "keys", "(", ")", ":", "if", "attr", "not", "in", "oka", ":", "del", "ava", "[", "lava", "[", "attr", "]", "]", "return", "ava" ]
Iterate through all the tool options and download required files from their remote locations .
def get_all_tool_inputs ( job , tools , outer_key = '' , mutation_caller_list = None ) : for tool in tools : for option in tools [ tool ] : if isinstance ( tools [ tool ] [ option ] , dict ) : tools [ tool ] [ option ] = get_all_tool_inputs ( job , { option : tools [ tool ] [ option ] } , outer_key = ':' . join ( [ outer_key , tool ] ) . lstrip ( ':' ) ) [ option ] else : # If a file is of the type file, vcf, tar or fasta, it needs to be downloaded from # S3 if reqd, then written to job store. if option . split ( '_' ) [ - 1 ] in [ 'file' , 'vcf' , 'index' , 'fasta' , 'fai' , 'idx' , 'dict' , 'tbi' , 'beds' , 'gtf' , 'config' ] : tools [ tool ] [ option ] = job . addChildJobFn ( get_pipeline_inputs , ':' . join ( [ outer_key , tool , option ] ) . lstrip ( ':' ) , tools [ tool ] [ option ] ) . rv ( ) elif option == 'version' : tools [ tool ] [ option ] = str ( tools [ tool ] [ option ] ) if mutation_caller_list is not None : # Guaranteed to occur only in the outermost loop indexes = tools . pop ( 'indexes' ) indexes [ 'chromosomes' ] = parse_chromosome_string ( job , indexes [ 'chromosomes' ] ) for mutation_caller in mutation_caller_list : if mutation_caller == 'indexes' : continue tools [ mutation_caller ] . update ( indexes ) return tools
2,669
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L740-L774
[ "def", "convert_seeded_answers", "(", "answers", ")", ":", "converted", "=", "{", "}", "for", "index", ",", "answer", "in", "enumerate", "(", "answers", ")", ":", "converted", ".", "setdefault", "(", "answer", "[", "'answer'", "]", ",", "{", "}", ")", "converted", "[", "answer", "[", "'answer'", "]", "]", "[", "'seeded'", "+", "str", "(", "index", ")", "]", "=", "answer", "[", "'rationale'", "]", "return", "converted" ]
Get the input file from s3 or disk and write to file store .
def get_pipeline_inputs ( job , input_flag , input_file , encryption_key = None , per_file_encryption = False , gdc_download_token = None ) : work_dir = os . getcwd ( ) job . fileStore . logToMaster ( 'Obtaining file (%s) to the file job store' % input_flag ) if input_file . startswith ( ( 'http' , 'https' , 'ftp' ) ) : input_file = get_file_from_url ( job , input_file , encryption_key = encryption_key , per_file_encryption = per_file_encryption , write_to_jobstore = True ) elif input_file . startswith ( ( 'S3' , 's3' ) ) : input_file = get_file_from_s3 ( job , input_file , encryption_key = encryption_key , per_file_encryption = per_file_encryption , write_to_jobstore = True ) elif input_file . startswith ( ( 'GDC' , 'gdc' ) ) : input_file = get_file_from_gdc ( job , input_file , gdc_download_token = gdc_download_token , write_to_jobstore = True ) else : assert os . path . exists ( input_file ) , 'Bogus Input : ' + input_file input_file = job . fileStore . writeGlobalFile ( input_file ) return input_file
2,670
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L777-L806
[ "def", "parse_recommendations", "(", "self", ",", "recommendations_page", ")", ":", "user_info", "=", "self", ".", "parse_sidebar", "(", "recommendations_page", ")", "second_col", "=", "recommendations_page", ".", "find", "(", "u'div'", ",", "{", "u'id'", ":", "u'content'", "}", ")", ".", "find", "(", "u'table'", ")", ".", "find", "(", "u'tr'", ")", ".", "find_all", "(", "u'td'", ",", "recursive", "=", "False", ")", "[", "1", "]", "try", ":", "recommendations", "=", "second_col", ".", "find_all", "(", "u\"div\"", ",", "{", "u\"class\"", ":", "u\"spaceit borderClass\"", "}", ")", "if", "recommendations", ":", "user_info", "[", "u'recommendations'", "]", "=", "{", "}", "for", "row", "in", "recommendations", "[", "1", ":", "]", ":", "anime_table", "=", "row", ".", "find", "(", "u'table'", ")", "animes", "=", "anime_table", ".", "find_all", "(", "u'td'", ")", "liked_media_link", "=", "animes", "[", "0", "]", ".", "find", "(", "u'a'", ",", "recursive", "=", "False", ")", "link_parts", "=", "liked_media_link", ".", "get", "(", "u'href'", ")", ".", "split", "(", "u'/'", ")", "# of the form /anime|manga/64/Rozen_Maiden", "liked_media", "=", "getattr", "(", "self", ".", "session", ",", "link_parts", "[", "1", "]", ")", "(", "int", "(", "link_parts", "[", "2", "]", ")", ")", ".", "set", "(", "{", "u'title'", ":", "liked_media_link", ".", "text", "}", ")", "recommended_media_link", "=", "animes", "[", "1", "]", ".", "find", "(", "u'a'", ",", "recursive", "=", "False", ")", "link_parts", "=", "recommended_media_link", ".", "get", "(", "u'href'", ")", ".", "split", "(", "u'/'", ")", "# of the form /anime|manga/64/Rozen_Maiden", "recommended_media", "=", "getattr", "(", "self", ".", "session", ",", "link_parts", "[", "1", "]", ")", "(", "int", "(", "link_parts", "[", "2", "]", ")", ")", ".", "set", "(", "{", "u'title'", ":", "recommended_media_link", ".", "text", "}", ")", "recommendation_text", "=", "row", ".", "find", "(", "u'p'", ")", ".", "text", "recommendation_menu", "=", "row", ".", "find", "(", "u'div'", ",", "recursive", "=", "False", ")", "utilities", ".", "extract_tags", "(", "recommendation_menu", ")", "recommendation_date", "=", "utilities", ".", "parse_profile_date", "(", "recommendation_menu", ".", "text", ".", "split", "(", "u' - '", ")", "[", "1", "]", ")", "user_info", "[", "u'recommendations'", "]", "[", "liked_media", "]", "=", "{", "link_parts", "[", "1", "]", ":", "recommended_media", ",", "'text'", ":", "recommendation_text", ",", "'date'", ":", "recommendation_date", "}", "except", ":", "if", "not", "self", ".", "session", ".", "suppress_parse_exceptions", ":", "raise", "return", "user_info" ]
Obtain the input files for the patient and write them to the file store .
def prepare_samples ( job , patient_dict , univ_options ) : job . fileStore . logToMaster ( 'Downloading Inputs for %s' % univ_options [ 'patient' ] ) # For each sample type, check if the prefix is an S3 link or a regular file # Download S3 files. output_dict = { } for input_file in patient_dict : if not input_file . endswith ( ( 'bam' , 'bai' , '_1' , '_2' , 'files' , 'vcf' , 'bedpe' ) ) : output_dict [ input_file ] = patient_dict [ input_file ] continue output_dict [ input_file ] = get_pipeline_inputs ( job , ':' . join ( [ univ_options [ 'patient' ] , input_file ] ) , patient_dict [ input_file ] , encryption_key = ( univ_options [ 'sse_key' ] if patient_dict [ 'ssec_encrypted' ] else None ) , per_file_encryption = univ_options [ 'sse_key_is_master' ] , gdc_download_token = univ_options [ 'gdc_download_token' ] ) return output_dict
2,671
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L809-L845
[ "def", "_get_services", "(", ")", ":", "handle_scm", "=", "win32service", ".", "OpenSCManager", "(", "None", ",", "None", ",", "win32service", ".", "SC_MANAGER_ENUMERATE_SERVICE", ")", "try", ":", "services", "=", "win32service", ".", "EnumServicesStatusEx", "(", "handle_scm", ")", "except", "AttributeError", ":", "services", "=", "win32service", ".", "EnumServicesStatus", "(", "handle_scm", ")", "finally", ":", "win32service", ".", "CloseServiceHandle", "(", "handle_scm", ")", "return", "services" ]
Convenience function to return the bam and its index in the correct format for a sample type .
def get_patient_bams ( job , patient_dict , sample_type , univ_options , bwa_options , mutect_options ) : output_dict = { } if 'dna' in sample_type : sample_info = 'fix_pg_sorted' prefix = sample_type + '_' + sample_info else : sample_info = 'genome_sorted' prefix = 'rna_' + sample_info if sample_type + '_bam' in patient_dict [ 'gdc_inputs' ] : output_dict [ prefix + '.bam' ] = patient_dict [ sample_type + '_bam' ] [ 0 ] output_dict [ prefix + '.bam.bai' ] = patient_dict [ sample_type + '_bam' ] [ 1 ] elif sample_type + '_bai' in patient_dict : output_dict [ prefix + '.bam' ] = patient_dict [ sample_type + '_bam' ] output_dict [ prefix + '.bam.bai' ] = patient_dict [ sample_type + '_bai' ] else : from protect . alignment . dna import index_bamfile , index_disk output_job = job . wrapJobFn ( index_bamfile , patient_dict [ sample_type + '_bam' ] , 'rna' if sample_type == 'tumor_rna' else sample_type , univ_options , bwa_options [ 'samtools' ] , sample_info = sample_info , export = False , disk = PromisedRequirement ( index_disk , patient_dict [ sample_type + '_bam' ] ) ) job . addChild ( output_job ) output_dict = output_job . rv ( ) if sample_type == 'tumor_rna' : if 'tumor_rna_transcriptome_bam' not in patient_dict : patient_dict [ 'tumor_rna_transcriptome_bam' ] = None return { 'rna_genome' : output_dict , 'rna_transcriptome.bam' : patient_dict [ 'tumor_rna_transcriptome_bam' ] } else : return output_dict
2,672
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L860-L901
[ "def", "check_internet_on", "(", "secrets_file_path", ")", ":", "while", "True", ":", "if", "internet_on", "(", ")", "is", "True", "and", "not", "os", ".", "path", ".", "exists", "(", "secrets_file_path", ")", ":", "break", "else", ":", "print", "(", "\"Turn on your internet and unplug your USB to continue...\"", ")", "time", ".", "sleep", "(", "10", ")", "return", "True" ]
Convenience function to get the vcf from the patient dict
def get_patient_vcf ( job , patient_dict ) : temp = job . fileStore . readGlobalFile ( patient_dict [ 'mutation_vcf' ] , os . path . join ( os . getcwd ( ) , 'temp.gz' ) ) if is_gzipfile ( temp ) : outfile = job . fileStore . writeGlobalFile ( gunzip ( temp ) ) job . fileStore . deleteGlobalFile ( patient_dict [ 'mutation_vcf' ] ) else : outfile = patient_dict [ 'mutation_vcf' ] return outfile
2,673
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L904-L919
[ "def", "bucket", "(", "arg", ",", "buckets", ",", "closed", "=", "'left'", ",", "close_extreme", "=", "True", ",", "include_under", "=", "False", ",", "include_over", "=", "False", ",", ")", ":", "op", "=", "Bucket", "(", "arg", ",", "buckets", ",", "closed", "=", "closed", ",", "close_extreme", "=", "close_extreme", ",", "include_under", "=", "include_under", ",", "include_over", "=", "include_over", ",", ")", "return", "op", ".", "to_expr", "(", ")" ]
Convenience function to get the mhc haplotype from the patient dict
def get_patient_mhc_haplotype ( job , patient_dict ) : haplotype_archive = job . fileStore . readGlobalFile ( patient_dict [ 'hla_haplotype_files' ] ) haplotype_archive = untargz ( haplotype_archive , os . getcwd ( ) ) output_dict = { } for filename in 'mhci_alleles.list' , 'mhcii_alleles.list' : output_dict [ filename ] = job . fileStore . writeGlobalFile ( os . path . join ( haplotype_archive , filename ) ) return output_dict
2,674
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L940-L954
[ "def", "match_color_index", "(", "self", ",", "color", ")", ":", "from", "jcvi", ".", "utils", ".", "webcolors", "import", "color_diff", "if", "isinstance", "(", "color", ",", "int", ")", ":", "return", "color", "if", "color", ":", "if", "isinstance", "(", "color", ",", "six", ".", "string_types", ")", ":", "rgb", "=", "map", "(", "int", ",", "color", ".", "split", "(", "','", ")", ")", "else", ":", "rgb", "=", "color", ".", "Get", "(", ")", "logging", ".", "disable", "(", "logging", ".", "DEBUG", ")", "distances", "=", "[", "color_diff", "(", "rgb", ",", "x", ")", "for", "x", "in", "self", ".", "xlwt_colors", "]", "logging", ".", "disable", "(", "logging", ".", "NOTSET", ")", "result", "=", "distances", ".", "index", "(", "min", "(", "distances", ")", ")", "self", ".", "unused_colors", ".", "discard", "(", "self", ".", "xlwt_colors", "[", "result", "]", ")", "return", "result" ]
Convenience function to get the expression from the patient dict
def get_patient_expression ( job , patient_dict ) : expression_archive = job . fileStore . readGlobalFile ( patient_dict [ 'expression_files' ] ) expression_archive = untargz ( expression_archive , os . getcwd ( ) ) output_dict = { } for filename in 'rsem.genes.results' , 'rsem.isoforms.results' : output_dict [ filename ] = job . fileStore . writeGlobalFile ( os . path . join ( expression_archive , filename ) ) return output_dict
2,675
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L957-L971
[ "def", "append_sint32", "(", "self", ",", "value", ")", ":", "zigzag_value", "=", "wire_format", ".", "zig_zag_encode", "(", "value", ")", "self", ".", "_stream", ".", "append_var_uint32", "(", "zigzag_value", ")" ]
Generate a config file for a ProTECT run on hg19 .
def generate_config_file ( ) : shutil . copy ( os . path . join ( os . path . dirname ( __file__ ) , 'input_parameters.yaml' ) , os . path . join ( os . getcwd ( ) , 'ProTECT_config.yaml' ) )
2,676
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L974-L981
[ "def", "_getNearestMappingIndexList", "(", "fromValList", ",", "toValList", ")", ":", "indexList", "=", "[", "]", "for", "fromTimestamp", "in", "fromValList", ":", "smallestDiff", "=", "_getSmallestDifference", "(", "toValList", ",", "fromTimestamp", ")", "i", "=", "toValList", ".", "index", "(", "smallestDiff", ")", "indexList", ".", "append", "(", "i", ")", "return", "indexList" ]
This is the main function for ProTECT .
def main ( ) : parser = argparse . ArgumentParser ( prog = 'ProTECT' , description = 'Prediction of T-Cell Epitopes for Cancer Therapy' , epilog = 'Contact Arjun Rao (aarao@ucsc.edu) if you encounter ' 'any problems while running ProTECT' ) inputs = parser . add_mutually_exclusive_group ( required = True ) inputs . add_argument ( '--config_file' , dest = 'config_file' , help = 'Config file to be used in the ' 'run.' , type = str , default = None ) inputs . add_argument ( '--generate_config' , dest = 'generate_config' , help = 'Generate a config file ' 'in the current directory that is pre-filled with references and flags for ' 'an hg19 run.' , action = 'store_true' , default = False ) parser . add_argument ( '--max-cores-per-job' , dest = 'max_cores' , help = 'Maximum cores to use per ' 'job. Aligners and Haplotypers ask for cores dependent on the machine that ' 'the launchpad gets assigned to -- In a heterogeneous cluster, this can ' 'lead to problems. This value should be set to the number of cpus on the ' 'smallest node in a cluster.' , type = int , required = False , default = None ) # We parse the args once to see if the user has asked for a config file to be generated. In # this case, we don't need a jobstore. To handle the case where Toil arguments are passed to # ProTECT, we parse known args, and if the used specified config_file instead of generate_config # we re-parse the arguments with the added Toil parser. params , others = parser . parse_known_args ( ) if params . generate_config : generate_config_file ( ) else : Job . Runner . addToilOptions ( parser ) params = parser . parse_args ( ) params . config_file = os . path . abspath ( params . config_file ) if params . maxCores : if not params . max_cores : params . max_cores = int ( params . maxCores ) else : if params . max_cores > int ( params . maxCores ) : print ( "The value provided to max-cores-per-job (%s) was greater than that " "provided to maxCores (%s). Setting max-cores-per-job = maxCores." % ( params . max_cores , params . maxCores ) , file = sys . stderr ) params . max_cores = int ( params . maxCores ) start = Job . wrapJobFn ( parse_config_file , params . config_file , params . max_cores ) Job . Runner . startToil ( start , params ) return None
2,677
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L984-L1026
[ "def", "_weights_callback", "(", "self", ",", "msg", ")", ":", "# Read weights", "weights", "=", "np", ".", "array", "(", "msg", ".", "data", ")", "# If needed, initialize indiv_weight_buffers", "if", "len", "(", "self", ".", "_weight_buffers", ")", "==", "0", ":", "self", ".", "_weight_buffers", "=", "[", "[", "]", "for", "i", "in", "range", "(", "len", "(", "weights", ")", ")", "]", "# Record individual weights", "for", "i", ",", "w", "in", "enumerate", "(", "weights", ")", ":", "if", "len", "(", "self", ".", "_weight_buffers", "[", "i", "]", ")", "==", "self", ".", "_ntaps", ":", "self", ".", "_weight_buffers", "[", "i", "]", ".", "pop", "(", "0", ")", "self", ".", "_weight_buffers", "[", "i", "]", ".", "append", "(", "w", ")" ]
Poll Check for a non - response string generated by LCDd and return any string read . LCDd generates strings for key presses menu events & screen visibility changes .
def poll ( self ) : if select . select ( [ self . tn ] , [ ] , [ ] , 0 ) == ( [ self . tn ] , [ ] , [ ] ) : response = urllib . unquote ( self . tn . read_until ( b"\n" ) . decode ( ) ) if self . debug : print "Telnet Poll: %s" % ( response [ : - 1 ] ) # TODO Keep track of which screen is displayed return response else : return None
2,678
https://github.com/jinglemansweep/lcdproc/blob/973628fc326177c9deaf3f2e1a435159eb565ae0/lcdproc/server.py#L61-L74
[ "def", "data", "(", "self", ",", "data", ")", ":", "self", ".", "_data", "=", "self", ".", "_enforceDataType", "(", "data", ")", "# Enforce self._data to be a QFont", "self", ".", "familyCti", ".", "data", "=", "fontFamilyIndex", "(", "self", ".", "data", ",", "list", "(", "self", ".", "familyCti", ".", "iterConfigValues", ")", ")", "self", ".", "pointSizeCti", ".", "data", "=", "self", ".", "data", ".", "pointSize", "(", ")", "self", ".", "weightCti", ".", "data", "=", "fontWeightIndex", "(", "self", ".", "data", ",", "list", "(", "self", ".", "weightCti", ".", "iterConfigValues", ")", ")", "self", ".", "italicCti", ".", "data", "=", "self", ".", "data", ".", "italic", "(", ")" ]
Converts a module namespace to a Python dictionary . Used by get_settings_diff .
def module_to_dict ( module , omittable = lambda k : k . startswith ( '_' ) ) : return dict ( [ ( k , repr ( v ) ) for k , v in module . __dict__ . items ( ) if not omittable ( k ) ] )
2,679
https://github.com/APSL/django-kaio/blob/b74b109bcfba31d973723bc419e2c95d190b80b7/kaio/management/commands/generate_ini.py#L15-L19
[ "def", "info_gain", "(", "current_impurity", ",", "true_branch", ",", "false_branch", ",", "criterion", ")", ":", "measure_impurity", "=", "gini_impurity", "if", "criterion", "==", "\"gini\"", "else", "entropy", "p", "=", "float", "(", "len", "(", "true_branch", ")", ")", "/", "(", "len", "(", "true_branch", ")", "+", "len", "(", "false_branch", ")", ")", "return", "current_impurity", "-", "p", "*", "measure_impurity", "(", "true_branch", ")", "-", "(", "1", "-", "p", ")", "*", "measure_impurity", "(", "false_branch", ")" ]
Run snpeff on an input vcf .
def run_snpeff ( job , merged_mutation_file , univ_options , snpeff_options ) : work_dir = os . getcwd ( ) input_files = { 'merged_mutations.vcf' : merged_mutation_file , 'snpeff_index.tar.gz' : snpeff_options [ 'index' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) input_files [ 'snpeff_index' ] = untargz ( input_files [ 'snpeff_index.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } parameters = [ 'eff' , '-dataDir' , input_files [ 'snpeff_index' ] , '-c' , '/' . join ( [ input_files [ 'snpeff_index' ] , 'snpEff_' + univ_options [ 'ref' ] + '_gencode.config' ] ) , '-no-intergenic' , '-no-downstream' , '-no-upstream' , # '-canon', '-noStats' , univ_options [ 'ref' ] + '_gencode' , input_files [ 'merged_mutations.vcf' ] ] xmx = snpeff_options [ 'java_Xmx' ] if snpeff_options [ 'java_Xmx' ] else univ_options [ 'java_Xmx' ] with open ( '/' . join ( [ work_dir , 'mutations.vcf' ] ) , 'w' ) as snpeff_file : docker_call ( tool = 'snpeff' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , java_xmx = xmx , outfile = snpeff_file , tool_version = snpeff_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( snpeff_file . name ) export_results ( job , output_file , snpeff_file . name , univ_options , subfolder = 'mutations/snpeffed' ) job . fileStore . logToMaster ( 'Ran snpeff on %s successfully' % univ_options [ 'patient' ] ) return output_file
2,680
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_annotation/snpeff.py#L32-L69
[ "def", "active_repositories", "(", "doc", ")", ":", "if", "doc", ".", "get", "(", "'state'", ")", "!=", "'deactivated'", ":", "for", "repository_id", ",", "repo", "in", "doc", ".", "get", "(", "'repositories'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "if", "repo", ".", "get", "(", "'state'", ")", "!=", "'deactivated'", ":", "repo", "[", "'id'", "]", "=", "repository_id", "repo", "[", "'organisation_id'", "]", "=", "doc", "[", "'_id'", "]", "yield", "repository_id", ",", "repo" ]
Generate a list of all files in input_directory each as a list containing path components .
def paths_in_directory ( input_directory ) : paths = [ ] for base_path , directories , filenames in os . walk ( input_directory ) : relative_path = os . path . relpath ( base_path , input_directory ) path_components = relative_path . split ( os . sep ) if path_components [ 0 ] == "." : path_components = path_components [ 1 : ] if path_components and path_components [ 0 ] . startswith ( "." ) : # hidden dir continue path_components = filter ( bool , path_components ) # remove empty components for filename in filenames : if filename . startswith ( "." ) : # hidden file continue paths . append ( path_components + [ filename ] ) return paths
2,681
https://github.com/paypal/baler/blob/db4f09dd2c7729b2df5268c87ad3b4cb43396abf/baler/baler.py#L22-L41
[ "def", "_retryable_write", "(", "self", ",", "retryable", ",", "func", ",", "session", ")", ":", "with", "self", ".", "_tmp_session", "(", "session", ")", "as", "s", ":", "return", "self", ".", "_retry_with_session", "(", "retryable", ",", "func", ",", "s", ",", "None", ")" ]
A wrapper for assess_car_t_validity .
def run_car_t_validity_assessment ( job , rsem_files , univ_options , reports_options ) : return job . addChildJobFn ( assess_car_t_validity , rsem_files [ 'rsem.genes.results' ] , univ_options , reports_options ) . rv ( )
2,682
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/addons/assess_car_t_validity.py#L25-L36
[ "def", "update_domain_base_path_mapping", "(", "self", ",", "domain_name", ",", "lambda_name", ",", "stage", ",", "base_path", ")", ":", "api_id", "=", "self", ".", "get_api_id", "(", "lambda_name", ")", "if", "not", "api_id", ":", "print", "(", "\"Warning! Can't update base path mapping!\"", ")", "return", "base_path_mappings", "=", "self", ".", "apigateway_client", ".", "get_base_path_mappings", "(", "domainName", "=", "domain_name", ")", "found", "=", "False", "for", "base_path_mapping", "in", "base_path_mappings", ".", "get", "(", "'items'", ",", "[", "]", ")", ":", "if", "base_path_mapping", "[", "'restApiId'", "]", "==", "api_id", "and", "base_path_mapping", "[", "'stage'", "]", "==", "stage", ":", "found", "=", "True", "if", "base_path_mapping", "[", "'basePath'", "]", "!=", "base_path", ":", "self", ".", "apigateway_client", ".", "update_base_path_mapping", "(", "domainName", "=", "domain_name", ",", "basePath", "=", "base_path_mapping", "[", "'basePath'", "]", ",", "patchOperations", "=", "[", "{", "\"op\"", ":", "\"replace\"", ",", "\"path\"", ":", "\"/basePath\"", ",", "\"value\"", ":", "''", "if", "base_path", "is", "None", "else", "base_path", "}", "]", ")", "if", "not", "found", ":", "self", ".", "apigateway_client", ".", "create_base_path_mapping", "(", "domainName", "=", "domain_name", ",", "basePath", "=", "''", "if", "base_path", "is", "None", "else", "base_path", ",", "restApiId", "=", "api_id", ",", "stage", "=", "stage", ")" ]
A wrapper for the entire dna alignment subgraph .
def align_dna ( job , fastqs , sample_type , univ_options , bwa_options ) : # The mkdup and regroup steps use picard that allots heap space using the Xmx key in the # univ_options dictionary. This should reflect in the job allotment. Since We want all these # jobs to occur on the same node, we ened to give them all the same memory requirements. bwa = job . wrapJobFn ( run_bwa , fastqs , sample_type , univ_options , bwa_options , disk = PromisedRequirement ( bwa_disk , fastqs , bwa_options [ 'index' ] ) , memory = univ_options [ 'java_Xmx' ] , cores = bwa_options [ 'n' ] ) sam2bam = job . wrapJobFn ( bam_conversion , bwa . rv ( ) , sample_type , univ_options , bwa_options [ 'samtools' ] , disk = PromisedRequirement ( sam2bam_disk , bwa . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) # reheader takes the same disk as sam2bam so we can serialize this on the same worker. reheader = job . wrapJobFn ( fix_bam_header , sam2bam . rv ( ) , sample_type , univ_options , bwa_options [ 'samtools' ] , disk = PromisedRequirement ( sam2bam_disk , bwa . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) regroup = job . wrapJobFn ( add_readgroups , reheader . rv ( ) , sample_type , univ_options , bwa_options [ 'picard' ] , disk = PromisedRequirement ( regroup_disk , reheader . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) mkdup = job . wrapJobFn ( mark_duplicates , regroup . rv ( ) , sample_type , univ_options , bwa_options [ 'picard' ] , disk = PromisedRequirement ( mkdup_disk , regroup . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) index = job . wrapJobFn ( index_bamfile , mkdup . rv ( ) , sample_type , univ_options , bwa_options [ 'samtools' ] , sample_info = 'fix_pg_sorted' , disk = PromisedRequirement ( index_disk , mkdup . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) job . addChild ( bwa ) bwa . addChild ( sam2bam ) sam2bam . addChild ( reheader ) reheader . addChild ( regroup ) regroup . addChild ( mkdup ) mkdup . addChild ( index ) return index . rv ( )
2,683
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/dna.py#L54-L103
[ "def", "remove_users_from_organization", "(", "self", ",", "organization_id", ",", "users_list", ")", ":", "log", ".", "warning", "(", "'Removing users...'", ")", "url", "=", "'rest/servicedeskapi/organization/{}/user'", ".", "format", "(", "organization_id", ")", "data", "=", "{", "'usernames'", ":", "users_list", "}", "return", "self", ".", "delete", "(", "url", ",", "headers", "=", "self", ".", "experimental_headers", ",", "data", "=", "data", ")" ]
Align a pair of fastqs with bwa .
def run_bwa ( job , fastqs , sample_type , univ_options , bwa_options ) : work_dir = os . getcwd ( ) input_files = { 'dna_1.fastq' : fastqs [ 0 ] , 'dna_2.fastq' : fastqs [ 1 ] , 'bwa_index.tar.gz' : bwa_options [ 'index' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) # Handle gzipped file gz = '.gz' if is_gzipfile ( input_files [ 'dna_1.fastq' ] ) else '' if gz : for read_file in 'dna_1.fastq' , 'dna_2.fastq' : os . symlink ( read_file , read_file + gz ) input_files [ read_file + gz ] = input_files [ read_file ] + gz # Untar the index input_files [ 'bwa_index' ] = untargz ( input_files [ 'bwa_index.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } parameters = [ 'mem' , '-t' , str ( bwa_options [ 'n' ] ) , '-v' , '1' , # Don't print INFO messages to the stderr '/' . join ( [ input_files [ 'bwa_index' ] , univ_options [ 'ref' ] ] ) , input_files [ 'dna_1.fastq' + gz ] , input_files [ 'dna_2.fastq' + gz ] ] with open ( '' . join ( [ work_dir , '/' , sample_type , '.sam' ] ) , 'w' ) as samfile : docker_call ( tool = 'bwa' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = samfile , tool_version = bwa_options [ 'version' ] ) # samfile.name retains the path info output_file = job . fileStore . writeGlobalFile ( samfile . name ) job . fileStore . logToMaster ( 'Ran bwa on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return output_file
2,684
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/dna.py#L106-L147
[ "def", "_get_entity_id_if_container_metric", "(", "self", ",", "labels", ")", ":", "if", "CadvisorPrometheusScraperMixin", ".", "_is_container_metric", "(", "labels", ")", ":", "pod", "=", "self", ".", "_get_pod_by_metric_label", "(", "labels", ")", "if", "is_static_pending_pod", "(", "pod", ")", ":", "# If the pod is static, ContainerStatus is unavailable.", "# Return the pod UID so that we can collect metrics from it later on.", "return", "self", ".", "_get_pod_uid", "(", "labels", ")", "return", "self", ".", "_get_container_id", "(", "labels", ")" ]
Convert a sam to a bam .
def bam_conversion ( job , samfile , sample_type , univ_options , samtools_options ) : work_dir = os . getcwd ( ) input_files = { sample_type + '.sam' : samfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) bamfile = '/' . join ( [ work_dir , sample_type + '.bam' ] ) parameters = [ 'view' , '-bS' , '-o' , docker_path ( bamfile ) , input_files [ sample_type + '.sam' ] ] docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = samtools_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( bamfile ) # The samfile is no longer useful so delete it job . fileStore . deleteGlobalFile ( samfile ) job . fileStore . logToMaster ( 'Ran sam2bam on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return output_file
2,685
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/dna.py#L150-L179
[ "def", "get_station_by_name", "(", "self", ",", "station_name", ",", "num_minutes", "=", "None", ",", "direction", "=", "None", ",", "destination", "=", "None", ",", "stops_at", "=", "None", ")", ":", "url", "=", "self", ".", "api_base_url", "+", "'getStationDataByNameXML'", "params", "=", "{", "'StationDesc'", ":", "station_name", "}", "if", "num_minutes", ":", "url", "=", "url", "+", "'_withNumMins'", "params", "[", "'NumMins'", "]", "=", "num_minutes", "response", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "timeout", "=", "10", ")", "if", "response", ".", "status_code", "!=", "200", ":", "return", "[", "]", "trains", "=", "self", ".", "_parse_station_data", "(", "response", ".", "content", ")", "if", "direction", "is", "not", "None", "or", "destination", "is", "not", "None", ":", "return", "self", ".", "_prune_trains", "(", "trains", ",", "direction", "=", "direction", ",", "destination", "=", "destination", ",", "stops_at", "=", "stops_at", ")", "return", "trains" ]
Fix the bam header to remove the command line call . Failing to do this causes Picard to reject the bam .
def fix_bam_header ( job , bamfile , sample_type , univ_options , samtools_options , retained_chroms = None ) : if retained_chroms is None : retained_chroms = [ ] work_dir = os . getcwd ( ) input_files = { sample_type + '.bam' : bamfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ 'view' , '-H' , input_files [ sample_type + '.bam' ] ] with open ( '/' . join ( [ work_dir , sample_type + '_input_bam.header' ] ) , 'w' ) as headerfile : docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = headerfile , tool_version = samtools_options [ 'version' ] ) with open ( headerfile . name , 'r' ) as headerfile , open ( '/' . join ( [ work_dir , sample_type + '_output_bam.header' ] ) , 'w' ) as outheaderfile : for line in headerfile : if line . startswith ( '@PG' ) : line = '\t' . join ( [ x for x in line . strip ( ) . split ( '\t' ) if not x . startswith ( 'CL' ) ] ) if retained_chroms and line . startswith ( '@SQ' ) : if line . strip ( ) . split ( ) [ 1 ] . lstrip ( 'SN:' ) not in retained_chroms : continue print ( line . strip ( ) , file = outheaderfile ) parameters = [ 'reheader' , docker_path ( outheaderfile . name ) , input_files [ sample_type + '.bam' ] ] with open ( '/' . join ( [ work_dir , sample_type + '_fixPG.bam' ] ) , 'w' ) as fixpg_bamfile : docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = fixpg_bamfile , tool_version = samtools_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( fixpg_bamfile . name ) # The old bam file is now useless. job . fileStore . deleteGlobalFile ( bamfile ) job . fileStore . logToMaster ( 'Ran reheader on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return output_file
2,686
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/dna.py#L182-L230
[ "def", "lv_grid_generators_bus_bar", "(", "nd", ")", ":", "lv_stats", "=", "{", "}", "for", "la", "in", "nd", ".", "_mv_grid_districts", "[", "0", "]", ".", "lv_load_areas", "(", ")", ":", "for", "lvgd", "in", "la", ".", "lv_grid_districts", "(", ")", ":", "station_neighbors", "=", "list", "(", "lvgd", ".", "lv_grid", ".", "_graph", "[", "lvgd", ".", "lv_grid", ".", "_station", "]", ".", "keys", "(", ")", ")", "# check if nodes of a statio are members of list generators", "station_generators", "=", "[", "x", "for", "x", "in", "station_neighbors", "if", "x", "in", "lvgd", ".", "lv_grid", ".", "generators", "(", ")", "]", "lv_stats", "[", "repr", "(", "lvgd", ".", "lv_grid", ".", "_station", ")", "]", "=", "station_generators", "return", "lv_stats" ]
Add read groups to the bam .
def add_readgroups ( job , bamfile , sample_type , univ_options , picard_options ) : work_dir = os . getcwd ( ) input_files = { sample_type + '.bam' : bamfile } get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ 'AddOrReplaceReadGroups' , 'CREATE_INDEX=false' , 'I=/data/' + sample_type + '.bam' , 'O=/data/' + sample_type + '_reheader.bam' , 'SO=coordinate' , 'ID=1' , '' . join ( [ 'LB=' , univ_options [ 'patient' ] ] ) , 'PL=ILLUMINA' , 'PU=12345' , '' . join ( [ 'SM=' , sample_type . rstrip ( '_dna' ) ] ) ] docker_call ( tool = 'picard' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , java_xmx = univ_options [ 'java_Xmx' ] , tool_version = picard_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( '/' . join ( [ work_dir , sample_type + '_reheader.bam' ] ) ) # Delete the old bam file job . fileStore . deleteGlobalFile ( bamfile ) job . fileStore . logToMaster ( 'Ran add_read_groups on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return output_file
2,687
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/dna.py#L233-L267
[ "def", "genemania_force_directed", "(", "self", ",", "curveSteepness", "=", "None", ",", "defaultEdgeWeight", "=", "None", ",", "defaultSpringCoefficient", "=", "None", ",", "defaultSpringLength", "=", "None", ",", "EdgeAttribute", "=", "None", ",", "ignoreHiddenElements", "=", "None", ",", "isDeterministic", "=", "None", ",", "maxNodeMass", "=", "None", ",", "maxWeightCutoff", "=", "None", ",", "midpointEdges", "=", "None", ",", "minNodeMass", "=", "None", ",", "minWeightCutoff", "=", "None", ",", "network", "=", "None", ",", "NodeAttribute", "=", "None", ",", "nodeList", "=", "None", ",", "numIterations", "=", "None", ",", "singlePartition", "=", "None", ",", "Type", "=", "None", ",", "verbose", "=", "None", ")", ":", "network", "=", "check_network", "(", "self", ",", "network", ",", "verbose", "=", "verbose", ")", "PARAMS", "=", "set_param", "(", "[", "'curveSteepness'", ",", "'defaultEdgeWeight'", ",", "'defaultSpringCoefficient'", ",", "'defaultSpringLength'", ",", "'EdgeAttribute'", ",", "'ignoreHiddenElements'", ",", "'isDeterministic'", ",", "'maxNodeMass'", ",", "'maxWeightCutoff'", ",", "'midpointEdges'", ",", "'minNodeMass'", ",", "'minWeightCutoff'", ",", "'network'", ",", "'NodeAttribute'", ",", "'nodeList'", ",", "'numIterations'", ",", "'singlePartition'", ",", "'Type'", "]", ",", "[", "curveSteepness", ",", "defaultEdgeWeight", ",", "defaultSpringCoefficient", ",", "defaultSpringLength", ",", "EdgeAttribute", ",", "ignoreHiddenElements", ",", "isDeterministic", ",", "maxNodeMass", ",", "maxWeightCutoff", ",", "midpointEdges", ",", "minNodeMass", ",", "minWeightCutoff", ",", "network", ",", "NodeAttribute", ",", "nodeList", ",", "numIterations", ",", "singlePartition", ",", "Type", "]", ")", "response", "=", "api", "(", "url", "=", "self", ".", "__url", "+", "\"/genemania-force-directed\"", ",", "PARAMS", "=", "PARAMS", ",", "method", "=", "\"POST\"", ",", "verbose", "=", "verbose", ")", "return", "response" ]
Returns the weekday of the date . 0 = aaitabar
def weekday ( cls , year , month , day ) : return NepDate . from_bs_date ( year , month , day ) . weekday ( )
2,688
https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L24-L26
[ "def", "get_connection_details", "(", "session", ",", "vcenter_resource_model", ",", "resource_context", ")", ":", "session", "=", "session", "resource_context", "=", "resource_context", "# get vCenter connection details from vCenter resource", "user", "=", "vcenter_resource_model", ".", "user", "vcenter_url", "=", "resource_context", ".", "address", "password", "=", "session", ".", "DecryptPassword", "(", "vcenter_resource_model", ".", "password", ")", ".", "Value", "return", "VCenterConnectionDetails", "(", "vcenter_url", ",", "user", ",", "password", ")" ]
Returns the number of days in a month
def monthrange ( cls , year , month ) : functions . check_valid_bs_range ( NepDate ( year , month , 1 ) ) return values . NEPALI_MONTH_DAY_DATA [ year ] [ month - 1 ]
2,689
https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L29-L32
[ "def", "convert_complexFaultSource", "(", "self", ",", "node", ")", ":", "geom", "=", "node", ".", "complexFaultGeometry", "edges", "=", "self", ".", "geo_lines", "(", "geom", ")", "mfd", "=", "self", ".", "convert_mfdist", "(", "node", ")", "msr", "=", "valid", ".", "SCALEREL", "[", "~", "node", ".", "magScaleRel", "]", "(", ")", "with", "context", "(", "self", ".", "fname", ",", "node", ")", ":", "cmplx", "=", "source", ".", "ComplexFaultSource", "(", "source_id", "=", "node", "[", "'id'", "]", ",", "name", "=", "node", "[", "'name'", "]", ",", "tectonic_region_type", "=", "node", ".", "attrib", ".", "get", "(", "'tectonicRegion'", ")", ",", "mfd", "=", "mfd", ",", "rupture_mesh_spacing", "=", "self", ".", "complex_fault_mesh_spacing", ",", "magnitude_scaling_relationship", "=", "msr", ",", "rupture_aspect_ratio", "=", "~", "node", ".", "ruptAspectRatio", ",", "edges", "=", "edges", ",", "rake", "=", "~", "node", ".", "rake", ",", "temporal_occurrence_model", "=", "self", ".", "get_tom", "(", "node", ")", ")", "return", "cmplx" ]
Similar to itermonthdates but returns day number instead of NepDate object
def itermonthdays ( cls , year , month ) : for day in NepCal . itermonthdates ( year , month ) : if day . month == month : yield day . day else : yield 0
2,690
https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L65-L72
[ "def", "get_cluster_request_parameters", "(", "cluster_info", ",", "cloud_config", ",", "engine_config", ")", ":", "cluster_request", "=", "{", "}", "cloud_config", "=", "util", ".", "_make_minimal", "(", "cloud_config", ".", "__dict__", ")", "if", "bool", "(", "cloud_config", ")", ":", "cluster_request", "[", "'cloud_config'", "]", "=", "cloud_config", "engine_config", "=", "util", ".", "_make_minimal", "(", "engine_config", ".", "__dict__", ")", "if", "bool", "(", "engine_config", ")", ":", "cluster_request", "[", "'engine_config'", "]", "=", "engine_config", "cluster_request", ".", "update", "(", "util", ".", "_make_minimal", "(", "cluster_info", ".", "__dict__", ")", ")", "return", "cluster_request" ]
Similar to itermonthdays2 but returns tuples of day and weekday .
def itermonthdays2 ( cls , year , month ) : for day in NepCal . itermonthdates ( year , month ) : if day . month == month : yield ( day . day , day . weekday ( ) ) else : yield ( 0 , day . weekday ( ) )
2,691
https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L75-L82
[ "def", "reset_rammbock", "(", "self", ")", ":", "for", "client", "in", "self", ".", "_clients", ":", "client", ".", "close", "(", ")", "for", "server", "in", "self", ".", "_servers", ":", "server", ".", "close", "(", ")", "self", ".", "_init_caches", "(", ")" ]
Returns a list of week in a month . A week is a list of NepDate objects
def monthdatescalendar ( cls , year , month ) : weeks = [ ] week = [ ] for day in NepCal . itermonthdates ( year , month ) : week . append ( day ) if len ( week ) == 7 : weeks . append ( week ) week = [ ] if len ( week ) > 0 : weeks . append ( week ) return weeks
2,692
https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L85-L96
[ "def", "_bind_topics", "(", "self", ",", "topics", ")", ":", "# FIXME: Allow for these subscriptions to fail and clean up the previous ones", "# so that this function is atomic", "self", ".", "client", ".", "subscribe", "(", "topics", ".", "status", ",", "self", ".", "_on_status_message", ")", "self", ".", "client", ".", "subscribe", "(", "topics", ".", "tracing", ",", "self", ".", "_on_trace", ")", "self", ".", "client", ".", "subscribe", "(", "topics", ".", "streaming", ",", "self", ".", "_on_report", ")", "self", ".", "client", ".", "subscribe", "(", "topics", ".", "response", ",", "self", ".", "_on_response_message", ")" ]
Return a list of the weeks in the month month of the year as full weeks . Weeks are lists of seven day numbers .
def monthdayscalendar ( cls , year , month ) : weeks = [ ] week = [ ] for day in NepCal . itermonthdays ( year , month ) : week . append ( day ) if len ( week ) == 7 : weeks . append ( week ) week = [ ] if len ( week ) > 0 : weeks . append ( week ) return weeks
2,693
https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L99-L111
[ "def", "aggregate", "(", "self", ",", "val1", ",", "val2", ")", ":", "assert", "val1", "is", "not", "None", "assert", "val2", "is", "not", "None", "return", "self", ".", "_aggregator", "(", "val1", ",", "val2", ")" ]
Return a list of the weeks in the month month of the year as full weeks . Weeks are lists of seven tuples of day numbers and weekday numbers .
def monthdays2calendar ( cls , year , month ) : weeks = [ ] week = [ ] for day in NepCal . itermonthdays2 ( year , month ) : week . append ( day ) if len ( week ) == 7 : weeks . append ( week ) week = [ ] if len ( week ) > 0 : weeks . append ( week ) return weeks
2,694
https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L114-L126
[ "def", "aggregate", "(", "self", ",", "val1", ",", "val2", ")", ":", "assert", "val1", "is", "not", "None", "assert", "val2", "is", "not", "None", "return", "self", ".", "_aggregator", "(", "val1", ",", "val2", ")" ]
A wrapper for the the entire SomaticSniper sub - graph .
def run_somaticsniper_with_merge ( job , tumor_bam , normal_bam , univ_options , somaticsniper_options ) : spawn = job . wrapJobFn ( run_somaticsniper , tumor_bam , normal_bam , univ_options , somaticsniper_options , split = False ) . encapsulate ( ) job . addChild ( spawn ) return spawn . rv ( )
2,695
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L50-L64
[ "def", "add_item", "(", "self", ",", "item_url", ",", "item_metadata", ")", ":", "c", "=", "self", ".", "conn", ".", "cursor", "(", ")", "c", ".", "execute", "(", "\"DELETE FROM items WHERE url=?\"", ",", "(", "str", "(", "item_url", ")", ",", ")", ")", "self", ".", "conn", ".", "commit", "(", ")", "c", ".", "execute", "(", "\"INSERT INTO items VALUES (?, ?, ?)\"", ",", "(", "str", "(", "item_url", ")", ",", "item_metadata", ",", "self", ".", "__now_iso_8601", "(", ")", ")", ")", "self", ".", "conn", ".", "commit", "(", ")", "c", ".", "close", "(", ")" ]
Run the SomaticSniper subgraph on the DNA bams . Optionally split the results into per - chromosome vcfs .
def run_somaticsniper ( job , tumor_bam , normal_bam , univ_options , somaticsniper_options , split = True ) : # Get a list of chromosomes to handle if somaticsniper_options [ 'chromosomes' ] : chromosomes = somaticsniper_options [ 'chromosomes' ] else : chromosomes = sample_chromosomes ( job , somaticsniper_options [ 'genome_fai' ] ) perchrom_somaticsniper = defaultdict ( ) snipe = job . wrapJobFn ( run_somaticsniper_full , tumor_bam , normal_bam , univ_options , somaticsniper_options , disk = PromisedRequirement ( sniper_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , somaticsniper_options [ 'genome_fasta' ] ) , memory = '6G' ) pileup = job . wrapJobFn ( run_pileup , tumor_bam , univ_options , somaticsniper_options , disk = PromisedRequirement ( pileup_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , somaticsniper_options [ 'genome_fasta' ] ) , memory = '6G' ) filtersnipes = job . wrapJobFn ( filter_somaticsniper , tumor_bam , snipe . rv ( ) , pileup . rv ( ) , univ_options , somaticsniper_options , disk = PromisedRequirement ( sniper_filter_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , somaticsniper_options [ 'genome_fasta' ] ) , memory = '6G' ) job . addChild ( snipe ) job . addChild ( pileup ) snipe . addChild ( filtersnipes ) pileup . addChild ( filtersnipes ) if split : unmerge_snipes = job . wrapJobFn ( unmerge , filtersnipes . rv ( ) , 'somaticsniper' , chromosomes , somaticsniper_options , univ_options ) filtersnipes . addChild ( unmerge_snipes ) return unmerge_snipes . rv ( ) else : return filtersnipes . rv ( )
2,696
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L67-L123
[ "def", "_GetDelayImportTimestamps", "(", "self", ",", "pefile_object", ")", ":", "delay_import_timestamps", "=", "[", "]", "if", "not", "hasattr", "(", "pefile_object", ",", "'DIRECTORY_ENTRY_DELAY_IMPORT'", ")", ":", "return", "delay_import_timestamps", "for", "importdata", "in", "pefile_object", ".", "DIRECTORY_ENTRY_DELAY_IMPORT", ":", "dll_name", "=", "importdata", ".", "dll", "try", ":", "dll_name", "=", "dll_name", ".", "decode", "(", "'ascii'", ")", "except", "UnicodeDecodeError", ":", "dll_name", "=", "dll_name", ".", "decode", "(", "'ascii'", ",", "errors", "=", "'replace'", ")", "timestamp", "=", "getattr", "(", "importdata", ".", "struct", ",", "'dwTimeStamp'", ",", "0", ")", "delay_import_timestamps", ".", "append", "(", "[", "dll_name", ",", "timestamp", "]", ")", "return", "delay_import_timestamps" ]
Run SomaticSniper on the DNA bams .
def run_somaticsniper_full ( job , tumor_bam , normal_bam , univ_options , somaticsniper_options ) : work_dir = os . getcwd ( ) input_files = { 'tumor.bam' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , 'tumor.bam.bai' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam.bai' ] , 'normal.bam' : normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , 'normal.bam.bai' : normal_bam [ 'normal_dna_fix_pg_sorted.bam.bai' ] , 'genome.fa.tar.gz' : somaticsniper_options [ 'genome_fasta' ] , 'genome.fa.fai.tar.gz' : somaticsniper_options [ 'genome_fai' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) for key in ( 'genome.fa' , 'genome.fa.fai' ) : input_files [ key ] = untargz ( input_files [ key + '.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } output_file = os . path . join ( work_dir , 'somatic-sniper_full.vcf' ) parameters = [ '-f' , input_files [ 'genome.fa' ] , '-F' , 'vcf' , '-G' , '-L' , '-q' , '1' , '-Q' , '15' , input_files [ 'tumor.bam' ] , input_files [ 'normal.bam' ] , docker_path ( output_file ) ] docker_call ( tool = 'somaticsniper' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = somaticsniper_options [ 'version' ] ) outfile = job . fileStore . writeGlobalFile ( output_file ) job . fileStore . logToMaster ( 'Ran SomaticSniper on %s successfully' % univ_options [ 'patient' ] ) return outfile
2,697
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L126-L165
[ "def", "_create_download_failed_message", "(", "exception", ",", "url", ")", ":", "message", "=", "'Failed to download from:\\n{}\\nwith {}:\\n{}'", ".", "format", "(", "url", ",", "exception", ".", "__class__", ".", "__name__", ",", "exception", ")", "if", "_is_temporal_problem", "(", "exception", ")", ":", "if", "isinstance", "(", "exception", ",", "requests", ".", "ConnectionError", ")", ":", "message", "+=", "'\\nPlease check your internet connection and try again.'", "else", ":", "message", "+=", "'\\nThere might be a problem in connection or the server failed to process '", "'your request. Please try again.'", "elif", "isinstance", "(", "exception", ",", "requests", ".", "HTTPError", ")", ":", "try", ":", "server_message", "=", "''", "for", "elem", "in", "decode_data", "(", "exception", ".", "response", ".", "content", ",", "MimeType", ".", "XML", ")", ":", "if", "'ServiceException'", "in", "elem", ".", "tag", "or", "'Message'", "in", "elem", ".", "tag", ":", "server_message", "+=", "elem", ".", "text", ".", "strip", "(", "'\\n\\t '", ")", "except", "ElementTree", ".", "ParseError", ":", "server_message", "=", "exception", ".", "response", ".", "text", "message", "+=", "'\\nServer response: \"{}\"'", ".", "format", "(", "server_message", ")", "return", "message" ]
Filter SomaticSniper calls .
def filter_somaticsniper ( job , tumor_bam , somaticsniper_output , tumor_pileup , univ_options , somaticsniper_options ) : work_dir = os . getcwd ( ) input_files = { 'tumor.bam' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , 'tumor.bam.bai' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam.bai' ] , 'input.vcf' : somaticsniper_output , 'pileup.txt' : tumor_pileup , 'genome.fa.tar.gz' : somaticsniper_options [ 'genome_fasta' ] , 'genome.fa.fai.tar.gz' : somaticsniper_options [ 'genome_fai' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) for key in ( 'genome.fa' , 'genome.fa.fai' ) : input_files [ key ] = untargz ( input_files [ key + '.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } # Run snpfilter.pl parameters = [ 'snpfilter.pl' , '--snp-file' , input_files [ 'input.vcf' ] , '--indel-file' , input_files [ 'pileup.txt' ] ] # Creates /data/input.vcf.SNPfilter docker_call ( tool = 'somaticsniper-addons' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = somaticsniper_options [ 'version' ] ) # Run prepare_for_readcount.pl parameters = [ 'prepare_for_readcount.pl' , '--snp-file' , input_files [ 'input.vcf' ] + '.SNPfilter' ] # Creates /data/input.vcf.SNPfilter.pos docker_call ( tool = 'somaticsniper-addons' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = somaticsniper_options [ 'version' ] ) # Run bam-readcount parameters = [ '-b' , '15' , '-f' , input_files [ 'genome.fa' ] , '-l' , input_files [ 'input.vcf' ] + '.SNPfilter.pos' , '-w' , '1' , input_files [ 'tumor.bam' ] ] # Creates the read counts file with open ( os . path . join ( work_dir , 'readcounts.txt' ) , 'w' ) as readcounts_file : docker_call ( tool = 'bam-readcount' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = readcounts_file , tool_version = somaticsniper_options [ 'bam_readcount' ] [ 'version' ] ) # Run fpfilter.pl parameters = [ 'fpfilter.pl' , '--snp-file' , input_files [ 'input.vcf' ] + '.SNPfilter' , '--readcount-file' , docker_path ( readcounts_file . name ) ] # Creates input.vcf.SNPfilter.fp_pass and input.vcf.SNPfilter.fp_fail docker_call ( tool = 'somaticsniper-addons' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = somaticsniper_options [ 'version' ] ) # Run highconfidence.pl parameters = [ 'highconfidence.pl' , '--snp-file' , input_files [ 'input.vcf' ] + '.SNPfilter.fp_pass' ] # Creates input.vcf.SNPfilter.fp_pass.hc docker_call ( tool = 'somaticsniper-addons' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = somaticsniper_options [ 'version' ] ) outfile = job . fileStore . writeGlobalFile ( os . path . join ( os . getcwd ( ) , 'input.vcf.SNPfilter.fp_pass.hc' ) ) job . fileStore . logToMaster ( 'Filtered SomaticSniper for %s successfully' % univ_options [ 'patient' ] ) return outfile
2,698
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L168-L243
[ "def", "handle_not_found", "(", "exception", ",", "*", "*", "extra", ")", ":", "assert", "isinstance", "(", "exception", ",", "NotFound", ")", "page", "=", "Page", ".", "query", ".", "filter", "(", "db", ".", "or_", "(", "Page", ".", "url", "==", "request", ".", "path", ",", "Page", ".", "url", "==", "request", ".", "path", "+", "\"/\"", ")", ")", ".", "first", "(", ")", "if", "page", ":", "_add_url_rule", "(", "page", ".", "url", ")", "return", "render_template", "(", "[", "page", ".", "template_name", ",", "current_app", ".", "config", "[", "'PAGES_DEFAULT_TEMPLATE'", "]", "]", ",", "page", "=", "page", ")", "elif", "'wrapped'", "in", "extra", ":", "return", "extra", "[", "'wrapped'", "]", "(", "exception", ")", "else", ":", "return", "exception" ]
Runs a samtools pileup on the tumor bam .
def run_pileup ( job , tumor_bam , univ_options , somaticsniper_options ) : work_dir = os . getcwd ( ) input_files = { 'tumor.bam' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , 'tumor.bam.bai' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam.bai' ] , 'genome.fa.tar.gz' : somaticsniper_options [ 'genome_fasta' ] , 'genome.fa.fai.tar.gz' : somaticsniper_options [ 'genome_fai' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) for key in ( 'genome.fa' , 'genome.fa.fai' ) : input_files [ key ] = untargz ( input_files [ key + '.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } parameters = [ 'pileup' , '-cvi' , '-f' , docker_path ( input_files [ 'genome.fa' ] ) , docker_path ( input_files [ 'tumor.bam' ] ) ] with open ( os . path . join ( work_dir , 'pileup.txt' ) , 'w' ) as pileup_file : docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = pileup_file , tool_version = somaticsniper_options [ 'samtools' ] [ 'version' ] ) outfile = job . fileStore . writeGlobalFile ( pileup_file . name ) job . fileStore . logToMaster ( 'Ran samtools pileup on %s successfully' % univ_options [ 'patient' ] ) return outfile
2,699
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L261-L294
[ "def", "get_cluster_nodes", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_cluster_nodes_cache'", ")", ":", "server", ",", "port", "=", "self", ".", "_servers", "[", "0", "]", ".", "split", "(", "':'", ")", "try", ":", "self", ".", "_cluster_nodes_cache", "=", "(", "get_cluster_info", "(", "server", ",", "port", ",", "self", ".", "_ignore_cluster_errors", ")", "[", "'nodes'", "]", ")", "except", "(", "socket", ".", "gaierror", ",", "socket", ".", "timeout", ")", "as", "err", ":", "raise", "Exception", "(", "'Cannot connect to cluster {0} ({1})'", ".", "format", "(", "self", ".", "_servers", "[", "0", "]", ",", "err", ")", ")", "return", "self", ".", "_cluster_nodes_cache" ]