idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
38,900
def space_lock ( args ) : r = fapi . lock_workspace ( args . project , args . workspace ) fapi . _check_response_code ( r , 204 ) if fcconfig . verbosity : eprint ( 'Locked workspace {0}/{1}' . format ( args . project , args . workspace ) ) return 0
Lock a workspace
38,901
def space_unlock ( args ) : r = fapi . unlock_workspace ( args . project , args . workspace ) fapi . _check_response_code ( r , 204 ) if fcconfig . verbosity : eprint ( 'Unlocked workspace {0}/{1}' . format ( args . project , args . workspace ) ) return 0
Unlock a workspace
38,902
def space_new ( args ) : r = fapi . create_workspace ( args . project , args . workspace , args . authdomain , dict ( ) ) fapi . _check_response_code ( r , 201 ) if fcconfig . verbosity : eprint ( r . content ) return 0
Create a new workspace .
38,903
def space_info ( args ) : r = fapi . get_workspace ( args . project , args . workspace ) fapi . _check_response_code ( r , 200 ) return r . text
Get metadata for a workspace .
38,904
def space_delete ( args ) : message = "WARNING: this will delete workspace: \n\t{0}/{1}" . format ( args . project , args . workspace ) if not args . yes and not _confirm_prompt ( message ) : return 0 r = fapi . delete_workspace ( args . project , args . workspace ) fapi . _check_response_code ( r , [ 200 , 202 , 204 , 404 ] ) if fcconfig . verbosity : print ( 'Deleted workspace {0}/{1}' . format ( args . project , args . workspace ) ) return 0
Delete a workspace .
38,905
def space_clone ( args ) : if not args . to_workspace : args . to_workspace = args . workspace if not args . to_project : args . to_project = args . project if ( args . project == args . to_project and args . workspace == args . to_workspace ) : eprint ( "Error: destination project and namespace must differ from" " cloned workspace" ) return 1 r = fapi . clone_workspace ( args . project , args . workspace , args . to_project , args . to_workspace ) fapi . _check_response_code ( r , 201 ) if fcconfig . verbosity : msg = "{}/{} successfully cloned to {}/{}" . format ( args . project , args . workspace , args . to_project , args . to_workspace ) print ( msg ) return 0
Replicate a workspace
38,906
def space_acl ( args ) : r = fapi . get_workspace_acl ( args . project , args . workspace ) fapi . _check_response_code ( r , 200 ) result = dict ( ) for user , info in sorted ( r . json ( ) [ 'acl' ] . items ( ) ) : result [ user ] = info [ 'accessLevel' ] return result
Retrieve access control list for a workspace
38,907
def space_set_acl ( args ) : acl_updates = [ { "email" : user , "accessLevel" : args . role } for user in args . users ] r = fapi . update_workspace_acl ( args . project , args . workspace , acl_updates ) fapi . _check_response_code ( r , 200 ) errors = r . json ( ) [ 'usersNotFound' ] if len ( errors ) : eprint ( "Unable to assign role for unrecognized users:" ) for user in errors : eprint ( "\t{0}" . format ( user [ 'email' ] ) ) return 1 if fcconfig . verbosity : print ( "Successfully updated {0} role(s)" . format ( len ( acl_updates ) ) ) return 0
Assign an ACL role to list of users for a workspace
38,908
def space_search ( args ) : r = fapi . list_workspaces ( ) fapi . _check_response_code ( r , 200 ) workspaces = r . json ( ) extra_terms = [ ] if args . bucket : workspaces = [ w for w in workspaces if re . search ( args . bucket , w [ 'workspace' ] [ 'bucketName' ] ) ] extra_terms . append ( 'bucket' ) pretty_spaces = [ ] for space in workspaces : ns = space [ 'workspace' ] [ 'namespace' ] ws = space [ 'workspace' ] [ 'name' ] pspace = ns + '/' + ws pspace += '\t' + space [ 'workspace' ] [ 'bucketName' ] pretty_spaces . append ( pspace ) return sorted ( pretty_spaces , key = lambda s : s . lower ( ) )
Search for workspaces matching certain criteria
38,909
def entity_import ( args ) : project = args . project workspace = args . workspace chunk_size = args . chunk_size with open ( args . tsvfile ) as tsvf : headerline = tsvf . readline ( ) . strip ( ) entity_data = [ l . rstrip ( '\n' ) for l in tsvf ] return _batch_load ( project , workspace , headerline , entity_data , chunk_size )
Upload an entity loadfile .
38,910
def entity_types ( args ) : r = fapi . list_entity_types ( args . project , args . workspace ) fapi . _check_response_code ( r , 200 ) return r . json ( ) . keys ( )
List entity types in a workspace
38,911
def entity_list ( args ) : r = fapi . get_entities_with_type ( args . project , args . workspace ) fapi . _check_response_code ( r , 200 ) return [ '{0}\t{1}' . format ( e [ 'entityType' ] , e [ 'name' ] ) for e in r . json ( ) ]
List entities in a workspace .
38,912
def participant_list ( args ) : if args . entity_type and args . entity : if args . entity_type == 'participant' : return [ args . entity . strip ( ) ] r = fapi . get_entity ( args . project , args . workspace , args . entity_type , args . entity ) fapi . _check_response_code ( r , 200 ) participants = r . json ( ) [ 'attributes' ] [ "participants" ] [ 'items' ] return [ participant [ 'entityName' ] for participant in participants ] return __get_entities ( args , "participant" , page_size = 2000 )
List participants within a container
38,913
def pair_list ( args ) : if args . entity_type and args . entity : if args . entity_type == 'pair' : return [ args . entity . strip ( ) ] elif args . entity_type == 'participant' : entities = _entity_paginator ( args . project , args . workspace , 'pair' , page_size = 2000 ) return [ e [ 'name' ] for e in entities if e [ 'attributes' ] [ 'participant' ] [ 'entityName' ] == args . entity ] r = fapi . get_entity ( args . project , args . workspace , args . entity_type , args . entity ) fapi . _check_response_code ( r , 200 ) pairs = r . json ( ) [ 'attributes' ] [ "pairs" ] [ 'items' ] return [ pair [ 'entityName' ] for pair in pairs ] return __get_entities ( args , "pair" , page_size = 2000 )
List pairs within a container .
38,914
def sample_list ( args ) : if args . entity_type and args . entity : if args . entity_type == 'sample' : return [ args . entity . strip ( ) ] elif args . entity_type == 'participant' : samples = _entity_paginator ( args . project , args . workspace , 'sample' , page_size = 2000 ) return [ e [ 'name' ] for e in samples if e [ 'attributes' ] [ 'participant' ] [ 'entityName' ] == args . entity ] r = fapi . get_entity ( args . project , args . workspace , args . entity_type , args . entity ) fapi . _check_response_code ( r , 200 ) if args . entity_type == 'pair' : pair = r . json ( ) [ 'attributes' ] samples = [ pair [ 'case_sample' ] , pair [ 'control_sample' ] ] else : samples = r . json ( ) [ 'attributes' ] [ "samples" ] [ 'items' ] return [ sample [ 'entityName' ] for sample in samples ] return __get_entities ( args , "sample" , page_size = 2000 )
List samples within a container .
38,915
def entity_delete ( args ) : msg = "WARNING: this will delete {0} {1} in {2}/{3}" . format ( args . entity_type , args . entity , args . project , args . workspace ) if not ( args . yes or _confirm_prompt ( msg ) ) : return json_body = [ { "entityType" : args . entity_type , "entityName" : args . entity } ] r = fapi . delete_entities ( args . project , args . workspace , json_body ) fapi . _check_response_code ( r , 204 ) if fcconfig . verbosity : print ( "Succesfully deleted " + args . type + " " + args . entity )
Delete entity in a workspace .
38,916
def meth_wdl ( args ) : r = fapi . get_repository_method ( args . namespace , args . method , args . snapshot_id , True ) fapi . _check_response_code ( r , 200 ) return r . text
Retrieve WDL for given version of a repository method
38,917
def meth_acl ( args ) : r = fapi . get_repository_method_acl ( args . namespace , args . method , args . snapshot_id ) fapi . _check_response_code ( r , 200 ) acls = sorted ( r . json ( ) , key = lambda k : k [ 'user' ] ) return map ( lambda acl : '{0}\t{1}' . format ( acl [ 'user' ] , acl [ 'role' ] ) , acls )
Retrieve access control list for given version of a repository method
38,918
def meth_set_acl ( args ) : acl_updates = [ { "user" : user , "role" : args . role } for user in set ( expand_fc_groups ( args . users ) ) if user != fapi . whoami ( ) ] id = args . snapshot_id if not id : r = fapi . list_repository_methods ( namespace = args . namespace , name = args . method ) fapi . _check_response_code ( r , 200 ) versions = r . json ( ) if len ( versions ) == 0 : if fcconfig . verbosity : eprint ( "method {0}/{1} not found" . format ( args . namespace , args . method ) ) return 1 latest = sorted ( versions , key = lambda m : m [ 'snapshotId' ] ) [ - 1 ] id = latest [ 'snapshotId' ] r = fapi . update_repository_method_acl ( args . namespace , args . method , id , acl_updates ) fapi . _check_response_code ( r , 200 ) if fcconfig . verbosity : print ( "Updated ACL for {0}/{1}:{2}" . format ( args . namespace , args . method , id ) ) return 0
Assign an ACL role to a list of users for a workflow .
38,919
def expand_fc_groups ( users ) : groups = None for user in users : fcgroup = None if '@' not in user : fcgroup = user elif user . lower ( ) . endswith ( '@firecloud.org' ) : if groups is None : r = fapi . get_groups ( ) fapi . _check_response_code ( r , 200 ) groups = { group [ 'groupEmail' ] . lower ( ) : group [ 'groupName' ] for group in r . json ( ) if group [ 'role' ] == 'Admin' } if user . lower ( ) not in groups : if fcconfig . verbosity : eprint ( "You do not have access to the members of {}" . format ( user ) ) yield user continue else : fcgroup = groups [ user . lower ( ) ] else : yield user continue r = fapi . get_group ( fcgroup ) fapi . _check_response_code ( r , 200 ) fcgroup_data = r . json ( ) for admin in fcgroup_data [ 'adminsEmails' ] : yield admin for member in fcgroup_data [ 'membersEmails' ] : yield member
If user is a firecloud group return all members of the group . Caveat is that only group admins may do this .
38,920
def meth_list ( args ) : r = fapi . list_repository_methods ( namespace = args . namespace , name = args . method , snapshotId = args . snapshot_id ) fapi . _check_response_code ( r , 200 ) methods = r . json ( ) results = [ ] for m in methods : ns = m [ 'namespace' ] n = m [ 'name' ] sn_id = m [ 'snapshotId' ] results . append ( '{0}\t{1}\t{2}' . format ( ns , n , sn_id ) ) return sorted ( results , key = lambda s : s . lower ( ) )
List workflows in the methods repository
38,921
def config_acl ( args ) : r = fapi . get_repository_config_acl ( args . namespace , args . config , args . snapshot_id ) fapi . _check_response_code ( r , 200 ) acls = sorted ( r . json ( ) , key = lambda k : k [ 'user' ] ) return map ( lambda acl : '{0}\t{1}' . format ( acl [ 'user' ] , acl [ 'role' ] ) , acls )
Retrieve access control list for a method configuration
38,922
def config_set_acl ( args ) : acl_updates = [ { "user" : user , "role" : args . role } for user in set ( expand_fc_groups ( args . users ) ) if user != fapi . whoami ( ) ] id = args . snapshot_id if not id : r = fapi . list_repository_configs ( namespace = args . namespace , name = args . config ) fapi . _check_response_code ( r , 200 ) versions = r . json ( ) if len ( versions ) == 0 : if fcconfig . verbosity : eprint ( "Configuration {0}/{1} not found" . format ( args . namespace , args . config ) ) return 1 latest = sorted ( versions , key = lambda c : c [ 'snapshotId' ] ) [ - 1 ] id = latest [ 'snapshotId' ] r = fapi . update_repository_config_acl ( args . namespace , args . config , id , acl_updates ) fapi . _check_response_code ( r , 200 ) if fcconfig . verbosity : print ( "Updated ACL for {0}/{1}:{2}" . format ( args . namespace , args . config , id ) ) return 0
Assign an ACL role to a list of users for a config .
38,923
def config_get ( args ) : r = fapi . get_workspace_config ( args . project , args . workspace , args . namespace , args . config ) fapi . _check_response_code ( r , 200 ) return json . dumps ( r . json ( ) , indent = 4 , separators = ( ',' , ': ' ) , sort_keys = True , ensure_ascii = False )
Retrieve a method config from a workspace send stdout
38,924
def config_wdl ( args ) : r = fapi . get_workspace_config ( args . project , args . workspace , args . namespace , args . config ) fapi . _check_response_code ( r , 200 ) method = r . json ( ) [ "methodRepoMethod" ] args . namespace = method [ "methodNamespace" ] args . method = method [ "methodName" ] args . snapshot_id = method [ "methodVersion" ] return meth_wdl ( args )
Retrieve the WDL for a method config in a workspace send stdout
38,925
def config_diff ( args ) : config_1 = config_get ( args ) . splitlines ( ) args . project = args . Project args . workspace = args . Workspace cfg_1_name = args . config if args . Config is not None : args . config = args . Config if args . Namespace is not None : args . namespace = args . Namespace config_2 = config_get ( args ) . splitlines ( ) if not args . verbose : config_1 = skip_cfg_ver ( config_1 ) config_2 = skip_cfg_ver ( config_2 ) return list ( unified_diff ( config_1 , config_2 , cfg_1_name , args . config , lineterm = '' ) )
Compare method configuration definitions across workspaces . Ignores methodConfigVersion if the verbose argument is not set
38,926
def config_delete ( args ) : r = fapi . delete_workspace_config ( args . project , args . workspace , args . namespace , args . config ) fapi . _check_response_code ( r , [ 200 , 204 ] ) return r . text if r . text else None
Remove a method config from a workspace
38,927
def attr_copy ( args ) : if not args . to_workspace : args . to_workspace = args . workspace if not args . to_project : args . to_project = args . project if ( args . project == args . to_project and args . workspace == args . to_workspace ) : eprint ( "destination project and namespace must differ from" " source workspace" ) return 1 r = fapi . get_workspace ( args . project , args . workspace ) fapi . _check_response_code ( r , 200 ) workspace_attrs = r . json ( ) [ 'workspace' ] [ 'attributes' ] if args . attributes : workspace_attrs = { k : v for k , v in iteritems ( workspace_attrs ) if k in args . attributes } if len ( workspace_attrs ) == 0 : print ( "No workspace attributes defined in {0}/{1}" . format ( args . project , args . workspace ) ) return 1 message = "This will copy the following workspace attributes to {0}/{1}\n" message = message . format ( args . to_project , args . to_workspace ) for k , v in sorted ( iteritems ( workspace_attrs ) ) : message += '\t{0}\t{1}\n' . format ( k , v ) if not args . yes and not _confirm_prompt ( message ) : return 0 updates = [ fapi . _attr_set ( k , v ) for k , v in iteritems ( workspace_attrs ) ] r = fapi . update_workspace_attributes ( args . to_project , args . to_workspace , updates ) fapi . _check_response_code ( r , 200 ) return 0
Copy workspace attributes between workspaces .
38,928
def health ( args ) : r = fapi . health ( ) fapi . _check_response_code ( r , 200 ) return r . content
Health FireCloud Server
38,929
def sset_loop ( args ) : fiss_func = __cmd_to_func ( args . action ) if not fiss_func : eprint ( "invalid FISS cmd '" + args . action + "'" ) return 1 r = fapi . get_entities ( args . project , args . workspace , "sample_set" ) fapi . _check_response_code ( r , 200 ) sample_sets = [ entity [ 'name' ] for entity in r . json ( ) ] args . entity_type = "sample_set" for sset in sample_sets : print ( '\n# {0}::{1}/{2} {3}' . format ( args . project , args . workspace , sset , args . action ) ) args . entity = sset try : result = fiss_func ( args ) except Exception as e : status = __pretty_print_fc_exception ( e ) if not args . keep_going : return status printToCLI ( result ) return 0
Loop over all sample sets in a workspace performing a func
38,930
def monitor ( args ) : r = fapi . list_submissions ( args . project , args . workspace ) fapi . _check_response_code ( r , 200 ) statuses = sorted ( r . json ( ) , key = lambda k : k [ 'submissionDate' ] , reverse = True ) header = '\t' . join ( list ( statuses [ 0 ] . keys ( ) ) ) expander = lambda v : '{0}' . format ( v ) def expander ( thing ) : if isinstance ( thing , dict ) : entityType = thing . get ( "entityType" , None ) if entityType : return "{0}:{1}" . format ( entityType , thing [ 'entityName' ] ) return "{0}" . format ( thing ) return [ header ] + [ '\t' . join ( map ( expander , v . values ( ) ) ) for v in statuses ]
Retrieve status of jobs submitted from a given workspace as a list of TSV lines sorted by descending order of job submission date
38,931
def supervise ( args ) : project = args . project workspace = args . workspace namespace = args . namespace workflow = args . workflow sample_sets = args . sample_sets recovery_file = args . json_checkpoint if not sample_sets : r = fapi . get_entities ( args . project , args . workspace , "sample_set" ) fapi . _check_response_code ( r , 200 ) sample_sets = [ s [ 'name' ] for s in r . json ( ) ] message = "Sample Sets ({}):\n\t" . format ( len ( sample_sets ) ) + "\n\t" . join ( sample_sets ) prompt = "\nLaunch workflow in " + project + "/" + workspace + " on these sample sets? [Y\\n]: " if not args . yes and not _confirm_prompt ( message , prompt ) : return return supervisor . supervise ( project , workspace , namespace , workflow , sample_sets , recovery_file )
Run legacy Firehose - style workflow of workflows
38,932
def entity_copy ( args ) : if not args . to_workspace : args . to_workspace = args . workspace if not args . to_project : args . to_project = args . project if ( args . project == args . to_project and args . workspace == args . to_workspace ) : eprint ( "destination project and namespace must differ from" " source workspace" ) return 1 if not args . entities : ents = _entity_paginator ( args . project , args . workspace , args . entity_type , page_size = 500 , filter_terms = None , sort_direction = 'asc' ) args . entities = [ e [ 'name' ] for e in ents ] prompt = "Copy {0} {1}(s) from {2}/{3} to {4}/{5}?\n[Y\\n]: " prompt = prompt . format ( len ( args . entities ) , args . entity_type , args . project , args . workspace , args . to_project , args . to_workspace ) if not args . yes and not _confirm_prompt ( "" , prompt ) : return r = fapi . copy_entities ( args . project , args . workspace , args . to_project , args . to_workspace , args . entity_type , args . entities , link_existing_entities = args . link ) fapi . _check_response_code ( r , 201 ) return 0
Copy entities from one workspace to another .
38,933
def _validate_helper ( args , config_d , workspace_d , entity_d = None ) : invalid_inputs = sorted ( config_d [ "invalidInputs" ] ) invalid_outputs = sorted ( config_d [ "invalidOutputs" ] ) invalid_inputs = [ ( i , config_d [ 'methodConfiguration' ] [ 'inputs' ] [ i ] ) for i in invalid_inputs ] invalid_outputs = [ ( i , config_d [ 'methodConfiguration' ] [ 'outputs' ] [ i ] ) for i in invalid_outputs ] missing_attrs = [ ] missing_wksp_attrs = [ ] if entity_d : entity_type = config_d [ 'methodConfiguration' ] [ 'rootEntityType' ] entity_attrs = set ( entity_d [ 'attributes' ] ) workspace_attrs = workspace_d [ 'workspace' ] [ 'attributes' ] for inp , val in iteritems ( config_d [ 'methodConfiguration' ] [ 'inputs' ] ) : if val . startswith ( "this." ) : expected_attr = val . split ( '.' ) [ 1 ] if expected_attr == "name" : continue if expected_attr not in entity_attrs : missing_attrs . append ( ( inp , val ) ) if val . startswith ( "workspace." ) : expected_attr = val . split ( '.' ) [ 1 ] if expected_attr not in workspace_attrs : missing_wksp_attrs . append ( ( inp , val ) ) return invalid_inputs , invalid_outputs , missing_attrs , missing_wksp_attrs
Return FISSFC validation information on config for a certain entity
38,934
def _confirm_prompt ( message , prompt = "\nAre you sure? [y/yes (default: no)]: " , affirmations = ( "Y" , "Yes" , "yes" , "y" ) ) : answer = input ( message + prompt ) return answer in affirmations
Display a message then confirmation prompt and return true if the user responds with one of the affirmations .
38,935
def _nonempty_project ( string ) : value = str ( string ) if len ( value ) == 0 : msg = "No project provided and no default project configured" raise argparse . ArgumentTypeError ( msg ) return value
Argparse validator for ensuring a workspace is provided
38,936
def _entity_paginator ( namespace , workspace , etype , page_size = 500 , filter_terms = None , sort_direction = "asc" ) : page = 1 all_entities = [ ] r = fapi . get_entities_query ( namespace , workspace , etype , page = page , page_size = page_size , sort_direction = sort_direction , filter_terms = filter_terms ) fapi . _check_response_code ( r , 200 ) response_body = r . json ( ) total_pages = response_body [ 'resultMetadata' ] [ 'filteredPageCount' ] entities = response_body [ 'results' ] all_entities . extend ( entities ) page = 2 while page <= total_pages : r = fapi . get_entities_query ( namespace , workspace , etype , page = page , page_size = page_size , sort_direction = sort_direction , filter_terms = filter_terms ) fapi . _check_response_code ( r , 200 ) entities = r . json ( ) [ 'results' ] all_entities . extend ( entities ) page += 1 return all_entities
Pages through the get_entities_query endpoint to get all entities in the workspace without crashing .
38,937
def __cmd_to_func ( cmd ) : fiss_module = sys . modules [ __name__ ] func = getattr ( fiss_module , cmd , None ) if func and not hasattr ( func , 'fiss_cmd' ) : func = None return func
Returns the function object in this module matching cmd .
38,938
def _valid_headerline ( l ) : if not l : return False headers = l . split ( '\t' ) first_col = headers [ 0 ] tsplit = first_col . split ( ':' ) if len ( tsplit ) != 2 : return False if tsplit [ 0 ] in ( 'entity' , 'update' ) : return tsplit [ 1 ] in ( 'participant_id' , 'participant_set_id' , 'sample_id' , 'sample_set_id' , 'pair_id' , 'pair_set_id' ) elif tsplit [ 0 ] == 'membership' : if len ( headers ) < 2 : return False return tsplit [ 1 ] . replace ( 'set_' , '' ) == headers [ 1 ] else : return False
return true if the given string is a valid loadfile header
38,939
def _batch_load ( project , workspace , headerline , entity_data , chunk_size = 500 ) : if fcconfig . verbosity : print ( "Batching " + str ( len ( entity_data ) ) + " updates to Firecloud..." ) if not _valid_headerline ( headerline ) : eprint ( "Invalid loadfile header:\n" + headerline ) return 1 update_type = "membership" if headerline . startswith ( "membership" ) else "entitie" etype = headerline . split ( '\t' ) [ 0 ] . split ( ':' ) [ 1 ] . replace ( "_id" , "" ) total = int ( len ( entity_data ) / chunk_size ) + 1 batch = 0 for i in range ( 0 , len ( entity_data ) , chunk_size ) : batch += 1 if fcconfig . verbosity : print ( "Updating {0} {1}s {2}-{3}, batch {4}/{5}" . format ( etype , update_type , i + 1 , min ( i + chunk_size , len ( entity_data ) ) , batch , total ) ) this_data = headerline + '\n' + '\n' . join ( entity_data [ i : i + chunk_size ] ) r = fapi . upload_entities ( project , workspace , this_data ) fapi . _check_response_code ( r , 200 ) return 0
Submit a large number of entity updates in batches of chunk_size
38,940
def create_payload ( entities ) : types = { e . etype for e in entities } if len ( types ) != 1 : raise ValueError ( "Can't create payload with " + str ( len ( types ) ) + " types" ) all_attrs = set ( ) for e in entities : all_attrs . update ( set ( e . attrs . keys ( ) ) ) all_attrs = list ( all_attrs ) header = "entity:" + entities [ 0 ] . etype + "_id" payload = '\t' . join ( [ header ] + all_attrs ) + '\n' for e in entities : line = e . entity_id for a in all_attrs : line += '\t' + e . attrs . get ( a , "" ) payload += line + '\n' return payload
Create a tsv payload describing entities .
38,941
def create_loadfile ( entities , f ) : with open ( f , 'w' ) as out : out . write ( Entity . create_payload ( entities ) )
Create payload and save to file .
38,942
def needs_gcloud ( self ) : gcloud_default_path = [ 'google-cloud-sdk' , 'bin' ] if platform . system ( ) != "Windows" : gcloud_default_path = os . path . join ( os . path . expanduser ( '~' ) , * gcloud_default_path ) else : gcloud_default_path = os . path . join ( os . environ [ 'LOCALAPPDATA' ] , 'Google' , 'Cloud SDK' , * gcloud_default_path ) return not os . getenv ( 'SERVER_SOFTWARE' , '' ) . startswith ( 'Google App Engine/' ) and gcloud_default_path not in os . environ [ "PATH" ] . split ( os . pathsep ) and which ( 'gcloud' ) is None
Returns true if gcloud is unavailable and needed for authentication .
38,943
def action ( arguments ) : for input_file in arguments . input_files : logging . info ( input_file ) with common . atomic_write ( input_file . name , file_factory = common . FileType ( 'wt' ) ) as tf : convert . transform_file ( input_file , tf , arguments ) if hasattr ( input_file , 'close' ) : input_file . close ( )
Run mogrify . Most of the action is in convert this just creates a temp file for the output .
38,944
def all_unambiguous ( sequence_str ) : result = [ [ ] ] for c in sequence_str : result = [ i + [ a ] for i in result for a in _AMBIGUOUS_MAP . get ( c , c ) ] return [ '' . join ( i ) for i in result ]
All unambiguous versions of sequence_str
38,945
def moving_average ( iterable , n ) : it = iter ( iterable ) d = collections . deque ( itertools . islice ( it , n - 1 ) ) d . appendleft ( 0 ) s = sum ( d ) for elem in it : s += elem - d . popleft ( ) d . append ( elem ) yield s / float ( n )
From Python collections module documentation
38,946
def parse_barcode_file ( fp , primer = None , header = False ) : tr = trie . trie ( ) reader = csv . reader ( fp ) if header : next ( reader ) records = ( record for record in reader if record ) for record in records : specimen , barcode = record [ : 2 ] if primer is not None : pr = primer else : pr = record [ 2 ] for sequence in all_unambiguous ( barcode + pr ) : if sequence in tr : raise ValueError ( "Duplicate sample: {0}, {1} both have {2}" , specimen , tr [ sequence ] , sequence ) logging . info ( '%s->%s' , sequence , specimen ) tr [ sequence ] = specimen return tr
Load label barcode primer records from a CSV file .
38,947
def action ( arguments ) : if arguments . quality_window_mean_qual and not arguments . quality_window : raise ValueError ( "--quality-window-mean-qual specified without " "--quality-window" ) if trie is None or triefind is None : raise ValueError ( 'Missing Bio.trie and/or Bio.triefind modules. Cannot continue' ) filters = [ ] input_type = fileformat . from_handle ( arguments . sequence_file ) output_type = fileformat . from_handle ( arguments . output_file ) with arguments . sequence_file as fp : if arguments . input_qual : sequences = QualityIO . PairedFastaQualIterator ( fp , arguments . input_qual ) else : sequences = SeqIO . parse ( fp , input_type ) listener = RecordEventListener ( ) if arguments . details_out : rh = RecordReportHandler ( arguments . details_out , arguments . argv , arguments . details_comment ) rh . register_with ( listener ) sequences = listener . iterable_hook ( 'read' , sequences ) if arguments . min_mean_quality and input_type == 'fastq' : qfilter = QualityScoreFilter ( arguments . min_mean_quality ) filters . append ( qfilter ) if arguments . max_length : max_length_filter = MaxLengthFilter ( arguments . max_length ) filters . append ( max_length_filter ) if arguments . min_length : min_length_filter = MinLengthFilter ( arguments . min_length ) filters . append ( min_length_filter ) if arguments . max_ambiguous is not None : max_ambig_filter = MaxAmbiguousFilter ( arguments . max_ambiguous ) filters . append ( max_ambig_filter ) if arguments . pct_ambiguous is not None : pct_ambig_filter = PctAmbiguousFilter ( arguments . pct_ambiguous ) filters . append ( pct_ambig_filter ) if arguments . ambiguous_action : ambiguous_filter = AmbiguousBaseFilter ( arguments . ambiguous_action ) filters . append ( ambiguous_filter ) if arguments . quality_window : min_qual = ( arguments . quality_window_mean_qual or arguments . min_mean_quality ) window_filter = WindowQualityScoreFilter ( arguments . quality_window , min_qual ) filters . insert ( 0 , window_filter ) if arguments . barcode_file : with arguments . barcode_file : tr = parse_barcode_file ( arguments . barcode_file , arguments . primer , arguments . barcode_header ) f = PrimerBarcodeFilter ( tr ) filters . append ( f ) if arguments . map_out : barcode_writer = csv . writer ( arguments . map_out , quoting = getattr ( csv , arguments . quoting ) , lineterminator = '\n' ) def barcode_handler ( record , sample , barcode = None ) : barcode_writer . writerow ( ( record . id , sample ) ) listener . register_handler ( 'found_barcode' , barcode_handler ) for f in filters : f . listener = listener sequences = f . filter_records ( sequences ) sequences = listener . iterable_hook ( 'write' , sequences ) with arguments . output_file : SeqIO . write ( sequences , arguments . output_file , output_type ) rpt_rows = ( f . report_dict ( ) for f in filters ) with arguments . report_out as fp : writer = csv . DictWriter ( fp , BaseFilter . report_fields , lineterminator = '\n' , delimiter = '\t' ) writer . writeheader ( ) writer . writerows ( rpt_rows )
Given parsed arguments filter input files .
38,948
def iterable_hook ( self , name , iterable ) : for record in iterable : self ( name , record ) yield record
Fire an event named name with each item in iterable
38,949
def _found_barcode ( self , record , sample , barcode = None ) : assert record . id == self . current_record [ 'sequence_name' ] self . current_record [ 'sample' ] = sample
Hook called when barcode is found
38,950
def filter_records ( self , records ) : for record in records : try : filtered = self . filter_record ( record ) assert ( filtered ) if filtered . seq == record . seq : self . passed_unchanged += 1 else : self . passed_changed += 1 yield filtered except FailedFilter as e : self . failed += 1 v = e . value if self . listener : self . listener ( 'failed_filter' , record , filter_name = self . name , value = v )
Apply the filter to records
38,951
def filter_record ( self , record ) : nloc = record . seq . find ( 'N' ) if nloc == - 1 : return record elif self . action == 'truncate' : return record [ : nloc ] elif self . action == 'drop' : raise FailedFilter ( ) else : assert False
Filter a record truncating or dropping at an N
38,952
def filter_record ( self , record ) : if len ( record ) >= self . min_length : return record else : raise FailedFilter ( len ( record ) )
Filter record dropping any that don t meet minimum length
38,953
def filter_record ( self , record ) : if len ( record ) >= self . max_length : return record [ : self . max_length ] else : return record
Filter record truncating any over some maximum length
38,954
def summarize_sequence_file ( source_file , file_type = None ) : is_alignment = True avg_length = None min_length = sys . maxsize max_length = 0 sequence_count = 0 with common . FileType ( 'rt' ) ( source_file ) as fp : if not file_type : file_type = fileformat . from_handle ( fp ) for record in SeqIO . parse ( fp , file_type ) : sequence_count += 1 sequence_length = len ( record ) if max_length != 0 : if sequence_length != max_length : is_alignment = False if sequence_length > max_length : max_length = sequence_length if sequence_length < min_length : min_length = sequence_length if sequence_count == 1 : avg_length = float ( sequence_length ) else : avg_length = avg_length + ( ( sequence_length - avg_length ) / sequence_count ) if avg_length is None : min_length = max_length = avg_length = 0 if sequence_count <= 1 : is_alignment = False return ( source_file , str ( is_alignment ) . upper ( ) , min_length , max_length , avg_length , sequence_count )
Summarizes a sequence file returning a tuple containing the name whether the file is an alignment minimum sequence length maximum sequence length average length number of sequences .
38,955
def _record_buffer ( records , buffer_size = DEFAULT_BUFFER_SIZE ) : with tempfile . SpooledTemporaryFile ( buffer_size , mode = 'wb+' ) as tf : pickler = pickle . Pickler ( tf ) for record in records : pickler . dump ( record ) def record_iter ( ) : tf . seek ( 0 ) unpickler = pickle . Unpickler ( tf ) while True : try : yield unpickler . load ( ) except EOFError : break yield record_iter
Buffer for transform functions which require multiple passes through data .
38,956
def dashes_cleanup ( records , prune_chars = '.:?~' ) : logging . info ( "Applying _dashes_cleanup: converting any of '{}' to '-'." . format ( prune_chars ) ) translation_table = { ord ( c ) : '-' for c in prune_chars } for record in records : record . seq = Seq ( str ( record . seq ) . translate ( translation_table ) , record . seq . alphabet ) yield record
Take an alignment and convert any undesirable characters such as ? or ~ to - .
38,957
def deduplicate_sequences ( records , out_file ) : logging . info ( 'Applying _deduplicate_sequences generator: ' 'removing any duplicate records with identical sequences.' ) checksum_sequences = collections . defaultdict ( list ) for record in records : checksum = seguid ( record . seq ) sequences = checksum_sequences [ checksum ] if not sequences : yield record sequences . append ( record . id ) if out_file is not None : with out_file : for sequences in checksum_sequences . values ( ) : out_file . write ( '%s\n' % ( ' ' . join ( sequences ) , ) )
Remove any duplicate records with identical sequences keep the first instance seen and discard additional occurences .
38,958
def deduplicate_taxa ( records ) : logging . info ( 'Applying _deduplicate_taxa generator: ' + 'removing any duplicate records with identical IDs.' ) taxa = set ( ) for record in records : taxid = record . id if '|' in record . id : try : taxid = int ( record . id . split ( "|" ) [ 0 ] ) except : logging . warn ( "Unable to parse integer taxid from %s" , taxid ) if taxid in taxa : continue taxa . add ( taxid ) yield record
Remove any duplicate records with identical IDs keep the first instance seen and discard additional occurences .
38,959
def first_name_capture ( records ) : logging . info ( 'Applying _first_name_capture generator: ' 'making sure ID only contains the first whitespace-delimited ' 'word.' ) whitespace = re . compile ( r'\s+' ) for record in records : if whitespace . search ( record . description ) : yield SeqRecord ( record . seq , id = record . id , description = "" ) else : yield record
Take only the first whitespace - delimited word as the name of the sequence . Essentially removes any extra text from the sequence s description .
38,960
def include_from_file ( records , handle ) : ids = set ( i . strip ( ) for i in handle ) for record in records : if record . id . strip ( ) in ids : yield record
Filter the records keeping only sequences whose ID is contained in the handle .
38,961
def drop_columns ( records , slices ) : for record in records : drop = set ( i for slice in slices for i in range ( * slice . indices ( len ( record ) ) ) ) keep = [ i not in drop for i in range ( len ( record ) ) ] record . seq = Seq ( '' . join ( itertools . compress ( record . seq , keep ) ) , record . seq . alphabet ) yield record
Drop all columns present in slices from records
38,962
def cut_sequences_relative ( records , slices , record_id ) : with _record_buffer ( records ) as r : try : record = next ( i for i in r ( ) if i . id == record_id ) except StopIteration : raise ValueError ( "Record with id {0} not found." . format ( record_id ) ) new_slices = _update_slices ( record , slices ) for record in multi_cut_sequences ( r ( ) , new_slices ) : yield record
Cuts records to slices indexed by non - gap positions in record_id
38,963
def multi_mask_sequences ( records , slices ) : for record in records : record_indices = list ( range ( len ( record ) ) ) keep_indices = reduce ( lambda i , s : i - frozenset ( record_indices [ s ] ) , slices , frozenset ( record_indices ) ) seq = '' . join ( b if i in keep_indices else '-' for i , b in enumerate ( str ( record . seq ) ) ) record . seq = Seq ( seq ) yield record
Replace characters sliced by slices with gap characters .
38,964
def reverse_sequences ( records ) : logging . info ( 'Applying _reverse_sequences generator: ' 'reversing the order of sites in sequences.' ) for record in records : rev_record = SeqRecord ( record . seq [ : : - 1 ] , id = record . id , name = record . name , description = record . description ) _reverse_annotations ( record , rev_record ) yield rev_record
Reverse the order of sites in sequences .
38,965
def reverse_complement_sequences ( records ) : logging . info ( 'Applying _reverse_complement_sequences generator: ' 'transforming sequences into reverse complements.' ) for record in records : rev_record = SeqRecord ( record . seq . reverse_complement ( ) , id = record . id , name = record . name , description = record . description ) _reverse_annotations ( record , rev_record ) yield rev_record
Transform sequences into reverse complements .
38,966
def ungap_sequences ( records , gap_chars = GAP_TABLE ) : logging . info ( 'Applying _ungap_sequences generator: removing all gap characters' ) for record in records : yield ungap_all ( record , gap_chars )
Remove gaps from sequences given an alignment .
38,967
def _update_id ( record , new_id ) : old_id = record . id record . id = new_id record . description = re . sub ( '^' + re . escape ( old_id ) , new_id , record . description ) return record
Update a record id to new_id also modifying the ID in record . description
38,968
def name_append_suffix ( records , suffix ) : logging . info ( 'Applying _name_append_suffix generator: ' 'Appending suffix ' + suffix + ' to all ' 'sequence IDs.' ) for record in records : new_id = record . id + suffix _update_id ( record , new_id ) yield record
Given a set of sequences append a suffix for each sequence s name .
38,969
def name_insert_prefix ( records , prefix ) : logging . info ( 'Applying _name_insert_prefix generator: ' 'Inserting prefix ' + prefix + ' for all ' 'sequence IDs.' ) for record in records : new_id = prefix + record . id _update_id ( record , new_id ) yield record
Given a set of sequences insert a prefix for each sequence s name .
38,970
def name_include ( records , filter_regex ) : logging . info ( 'Applying _name_include generator: ' 'including only IDs matching ' + filter_regex + ' in results.' ) regex = re . compile ( filter_regex ) for record in records : if regex . search ( record . id ) or regex . search ( record . description ) : yield record
Given a set of sequences filter out any sequences with names that do not match the specified regular expression . Ignore case .
38,971
def name_replace ( records , search_regex , replace_pattern ) : regex = re . compile ( search_regex ) for record in records : maybe_id = record . description . split ( None , 1 ) [ 0 ] if maybe_id == record . id : record . description = regex . sub ( replace_pattern , record . description ) record . id = record . description . split ( None , 1 ) [ 0 ] else : record . id = regex . sub ( replace_pattern , record . id ) record . description = regex . sub ( replace_pattern , record . description ) yield record
Given a set of sequences replace all occurrences of search_regex with replace_pattern . Ignore case .
38,972
def seq_include ( records , filter_regex ) : regex = re . compile ( filter_regex ) for record in records : if regex . search ( str ( record . seq ) ) : yield record
Filter any sequences who s seq does not match the filter . Ignore case .
38,973
def head ( records , head ) : logging . info ( 'Applying _head generator: ' 'limiting results to top ' + head + ' records.' ) if head == '-0' : for record in records : yield record elif '-' in head : with _record_buffer ( records ) as r : record_count = sum ( 1 for record in r ( ) ) end_index = max ( record_count + int ( head ) , 0 ) for record in itertools . islice ( r ( ) , end_index ) : yield record else : for record in itertools . islice ( records , int ( head ) ) : yield record
Limit results to the top N records . With the leading - print all but the last N records .
38,974
def tail ( records , tail ) : logging . info ( 'Applying _tail generator: ' 'limiting results to top ' + tail + ' records.' ) if tail == '+0' : for record in records : yield record elif '+' in tail : tail = int ( tail ) - 1 for record in itertools . islice ( records , tail , None ) : yield record else : with _record_buffer ( records ) as r : record_count = sum ( 1 for record in r ( ) ) start_index = max ( record_count - int ( tail ) , 0 ) for record in itertools . islice ( r ( ) , start_index , None ) : yield record
Limit results to the bottom N records . Use + N to output records starting with the Nth .
38,975
def gap_proportion ( sequences , gap_chars = '-' ) : aln_len = None gaps = [ ] for i , sequence in enumerate ( sequences ) : if aln_len is None : aln_len = len ( sequence ) gaps = [ 0 ] * aln_len else : if not len ( sequence ) == aln_len : raise ValueError ( ( "Unexpected sequence length {0}. Is this " "an alignment?" ) . format ( len ( sequence ) ) ) for j , char in enumerate ( sequence . seq ) : if char in gap_chars : gaps [ j ] += 1 sequence_count = float ( i + 1 ) gap_props = [ i / sequence_count for i in gaps ] return gap_props
Generates a list with the proportion of gaps by index in a set of sequences .
38,976
def squeeze ( records , gap_threshold = 1.0 ) : with _record_buffer ( records ) as r : gap_proportions = gap_proportion ( r ( ) ) keep_columns = [ g < gap_threshold for g in gap_proportions ] for record in r ( ) : sequence = str ( record . seq ) squeezed = itertools . compress ( sequence , keep_columns ) yield SeqRecord ( Seq ( '' . join ( squeezed ) ) , id = record . id , description = record . description )
Remove any gaps that are present in the same position across all sequences in an alignment . Takes a second sequence iterator for determining gap positions .
38,977
def max_length_discard ( records , max_length ) : logging . info ( 'Applying _max_length_discard generator: ' 'discarding records longer than ' '.' ) for record in records : if len ( record ) > max_length : logging . debug ( 'Discarding long sequence: %s, length=%d' , record . id , len ( record ) ) else : yield record
Discard any records that are longer than max_length .
38,978
def min_length_discard ( records , min_length ) : logging . info ( 'Applying _min_length_discard generator: ' 'discarding records shorter than %d.' , min_length ) for record in records : if len ( record ) < min_length : logging . debug ( 'Discarding short sequence: %s, length=%d' , record . id , len ( record ) ) else : yield record
Discard any records that are shorter than min_length .
38,979
def min_ungap_length_discard ( records , min_length ) : for record in records : if len ( ungap_all ( record ) ) >= min_length : yield record
Discard any records that are shorter than min_length after removing gaps .
38,980
def batch ( iterable , chunk_size ) : i = iter ( iterable ) while True : r = list ( itertools . islice ( i , chunk_size ) ) if not r : break yield r
Return items from iterable in chunk_size bits .
38,981
def _validate_translation ( self , aligned_prot , aligned_nucl ) : codons = [ '' . join ( i ) for i in batch ( str ( aligned_nucl ) , 3 ) ] for codon , aa in zip ( codons , str ( aligned_prot ) ) : if codon == '---' and aa == '-' : continue try : trans = self . translation_table . forward_table [ codon ] if not trans == aa : raise ValueError ( "Codon {0} translates to {1}, not {2}" . format ( codon , trans , aa ) ) except ( KeyError , CodonTable . TranslationError ) : if aa != 'X' : if self . unknown_action == 'fail' : raise ValueError ( "Unknown codon: {0} mapped to {1}" . format ( codon , aa ) ) elif self . unknown_action == 'warn' : logging . warn ( 'Cannot verify that unknown codon %s ' 'maps to %s' , codon , aa ) return True
Given a seq for protein and nucleotide ensure that the translation holds
38,982
def map_all ( self , prot_alignment , nucl_sequences ) : zipped = itertools . zip_longest ( prot_alignment , nucl_sequences ) for p , n in zipped : if p is None : raise ValueError ( "Exhausted protein sequences" ) elif n is None : raise ValueError ( "Exhausted nucleotide sequences" ) yield self . map_alignment ( p , n )
Convert protein sequences to nucleotide alignment
38,983
def from_extension ( extension ) : if not extension . startswith ( '.' ) : raise ValueError ( "Extensions must begin with a period." ) try : return EXTENSION_TO_TYPE [ extension . lower ( ) ] except KeyError : raise UnknownExtensionError ( "seqmagick does not know how to handle " + "files with extensions like this: " + extension )
Look up the BioPython file type corresponding with input extension .
38,984
def from_filename ( file_name ) : base , extension = os . path . splitext ( file_name ) if extension in COMPRESS_EXT : extension = os . path . splitext ( base ) [ 1 ] return from_extension ( extension )
Look up the BioPython file type corresponding to an input file name .
38,985
def from_handle ( fh , stream_default = 'fasta' ) : if fh in ( sys . stdin , sys . stdout , sys . stderr ) : return stream_default return from_filename ( fh . name )
Look up the BioPython file type corresponding to a file - like object .
38,986
def parse_arguments ( argv ) : parser = argparse . ArgumentParser ( description = 'seqmagick - Manipulate ' + ' sequence files.' , prog = 'seqmagick' ) parser . add_argument ( '-V' , '--version' , action = 'version' , version = 'seqmagick v' + version , help = "Print the version number and exit" ) parser . add_argument ( '-v' , '--verbose' , dest = 'verbosity' , action = 'count' , default = 1 , help = "Be more verbose. Specify -vv or -vvv for even more" ) parser . add_argument ( '-q' , '--quiet' , action = 'store_const' , const = 0 , dest = 'verbosity' , help = "Suppress output" ) subparsers = parser . add_subparsers ( dest = 'subparser_name' ) parser_help = subparsers . add_parser ( 'help' , help = 'Detailed help for actions using help <action>' ) parser_help . add_argument ( 'action' ) actions = { } for name , mod in subcommands . itermodules ( ) : subparser = subparsers . add_parser ( name , help = mod . __doc__ , description = mod . __doc__ ) mod . build_parser ( subparser ) actions [ name ] = mod . action arguments = parser . parse_args ( argv ) arguments . argv = argv action = arguments . subparser_name if action == 'help' : return parse_arguments ( [ str ( arguments . action ) , '-h' ] ) return actions [ action ] , arguments
Extract command - line arguments for different actions .
38,987
def ungap_index_map ( sequence , gap_chars = '-' ) : counter = itertools . count ( 0 ) . __next__ ungap_indexes = [ counter ( ) if c not in gap_chars else None for c in iter ( sequence ) ] return dict ( ( ungapped , gapped ) for ungapped , gapped in zip ( ungap_indexes , range ( len ( sequence ) ) ) if ungapped is not None )
Returns a dict mapping from an index in the ungapped sequence to an index in the gapped sequence .
38,988
def _iupac_ambiguous_equal ( ambig_base , unambig_base ) : iupac_translation = { 'A' : 'A' , 'C' : 'C' , 'G' : 'G' , 'T' : 'T' , 'U' : 'U' , 'R' : 'AG' , 'Y' : 'CT' , 'S' : 'GC' , 'W' : 'AT' , 'K' : 'GT' , 'M' : 'AC' , 'B' : 'CGT' , 'D' : 'AGT' , 'H' : 'ACT' , 'V' : 'ACG' , 'N' : 'ACGT' , '-' : '-' } for i in ( ambig_base , unambig_base ) : if not len ( i ) == 1 : raise ValueError ( "only one base may be passed." ) return unambig_base . upper ( ) in iupac_translation [ ambig_base . upper ( ) ]
Tests two bases for equality accounting for IUPAC ambiguous DNA
38,989
def hamming_distance ( s1 , s2 , equality_function = operator . eq ) : if not len ( s1 ) == len ( s2 ) : raise ValueError ( "String lengths are not equal" ) return sum ( not equality_function ( c1 , c2 ) for c1 , c2 in zip ( s1 , s2 ) )
Returns the hamming distance between two strings .
38,990
def trim ( sequences , start , end ) : logging . info ( "Trimming from %d to %d" , start , end ) return ( sequence [ start : end ] for sequence in sequences )
Slice the input sequences from start to end
38,991
def action ( arguments ) : source_format = ( arguments . source_format or fileformat . from_handle ( arguments . source_file ) ) output_format = ( arguments . output_format or fileformat . from_handle ( arguments . output_file ) ) with arguments . source_file : sequences = SeqIO . parse ( arguments . source_file , source_format , alphabet = Alphabet . Gapped ( Alphabet . single_letter_alphabet ) ) ( forward_start , forward_end ) , ( reverse_start , reverse_end ) = locate_primers ( sequences , arguments . forward_primer , arguments . reverse_primer , arguments . reverse_complement , arguments . max_hamming_distance ) if arguments . include_primers : start = forward_start end = reverse_end + 1 else : start = forward_end + 1 end = reverse_start arguments . source_file . seek ( 0 ) sequences = SeqIO . parse ( arguments . source_file , source_format , alphabet = Alphabet . Gapped ( Alphabet . single_letter_alphabet ) ) prune_action = _ACTIONS [ arguments . prune_action ] transformed_sequences = prune_action ( sequences , start , end ) with arguments . output_file : SeqIO . write ( transformed_sequences , arguments . output_file , output_format )
Trim the alignment as specified
38,992
def hash_starts_numeric ( records ) : for record in records : seq_hash = hashlib . sha1 ( str ( record . seq ) ) . hexdigest ( ) if seq_hash [ 0 ] . isdigit ( ) : yield record
Very useful function that only accepts records with a numeric start to their sha - 1 hash .
38,993
def atomic_write ( path , mode = 'wt' , permissions = None , file_factory = None , ** kwargs ) : if permissions is None : permissions = apply_umask ( ) if path == '-' : yield sys . stdout else : base_dir = os . path . dirname ( path ) kwargs [ 'suffix' ] = os . path . basename ( path ) tf = tempfile . NamedTemporaryFile ( dir = base_dir , mode = mode , delete = False , ** kwargs ) if file_factory is not None : tf . close ( ) tf = file_factory ( tf . name ) try : with tf : yield tf os . rename ( tf . name , path ) os . chmod ( path , permissions ) except : os . remove ( tf . name ) raise
Open a file for atomic writing .
38,994
def typed_range ( type_func , minimum , maximum ) : @ functools . wraps ( type_func ) def inner ( string ) : result = type_func ( string ) if not result >= minimum and result <= maximum : raise argparse . ArgumentTypeError ( "Please provide a value between {0} and {1}" . format ( minimum , maximum ) ) return result return inner
Require variables to be of the specified type between minimum and maximum
38,995
def partial_append_action ( fn , argument_keys = None ) : if isinstance ( argument_keys , str ) : argument_keys = [ argument_keys ] argument_keys = argument_keys or [ ] class PartialAppendAction ( argparse . Action ) : def __init__ ( self , option_strings , dest , const = None , default = None , required = False , help = None , type = None , metavar = None , nargs = None , ** kwargs ) : super ( PartialAppendAction , self ) . __init__ ( option_strings = option_strings , dest = dest , nargs = len ( argument_keys ) , const = const , default = default , required = required , metavar = metavar , type = type , help = help , ** kwargs ) def __call__ ( self , parser , namespace , values , option_string = None ) : items = copy . copy ( getattr ( namespace , self . dest , None ) ) or [ ] if values is None : values = [ ] elif not isinstance ( values , list ) : values = [ values ] if len ( argument_keys ) != len ( values ) : raise ValueError ( "Unexpected number of values" ) kwargs = dict ( list ( zip ( argument_keys , values ) ) ) f = functools . partial ( fn , ** kwargs ) items . append ( f ) setattr ( namespace , self . dest , items ) return PartialAppendAction
Creates a new class extending argparse . Action which appends a partially - applied function to dest .
38,996
def positive_value ( target_type ) : def inner ( string ) : value = target_type ( string ) if not value >= 0 : raise argparse . ArgumentTypeError ( "Invalid positive number: " + string ) return value return inner
Wraps target_type in a function that requires the parsed argument be > = 0
38,997
def build_parser ( parser ) : add_options ( parser ) parser . add_argument ( 'source_file' , type = common . FileType ( 'rt' ) , help = "Input sequence file" ) parser . add_argument ( 'dest_file' , help = "Output file" ) return parser
Add shared arguments to the convert or mogrify parser .
38,998
def parse_fasta ( filepath ) : with open ( filepath , 'r' ) as f : seqs = [ ] header = '' for line in f : line = line . strip ( ) if line == '' : continue if line [ 0 ] == '>' : if header == '' : header = line . replace ( '>' , '' ) else : yield header , '' . join ( seqs ) seqs = [ ] header = line . replace ( '>' , '' ) else : seqs . append ( line ) yield header , '' . join ( seqs )
Parse a fasta file returning a generator yielding tuples of fasta headers to sequences .
38,999
def fasta_format_check ( fasta_path , logger ) : header_count = 0 line_count = 1 nt_count = 0 with open ( fasta_path ) as f : for l in f : l = l . strip ( ) if l == '' : continue if l [ 0 ] == '>' : header_count += 1 continue if header_count == 0 and l [ 0 ] != '>' : error_msg = 'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with ">" expected.' . format ( line_count = line_count ) logger . error ( error_msg ) raise Exception ( error_msg ) non_nucleotide_chars_in_line = set ( l ) - VALID_NUCLEOTIDES if len ( non_nucleotide_chars_in_line ) > 0 : error_msg = 'Line {line} contains the following non-nucleotide characters: {non_nt_chars}' . format ( line = line_count , non_nt_chars = ', ' . join ( [ x for x in non_nucleotide_chars_in_line ] ) ) logger . error ( error_msg ) raise Exception ( error_msg ) nt_count += len ( l ) line_count += 1 if nt_count == 0 : error_msg = 'File "{}" does not contain any nucleotide sequence.' . format ( fasta_path ) logger . error ( error_msg ) raise Exception ( error_msg ) logger . info ( 'Valid FASTA format "{}" ({} bp)' . format ( fasta_path , nt_count ) )
Check that a file is valid FASTA format .