idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
22,100
def raise_error ( self , error ) : exc = JSONRPCError ( error ) if self . raises_errors : raise exc return exc
Recreates the exception .
22,101
def handle ( self , environ , start_response ) : request = Request ( environ ) request . max_content_length = self . max_content_length access_control_headers = { 'Access-Control-Allow-Methods' : 'POST' , 'Access-Control-Allow-Origin' : self . allow_origin , 'Access-Control-Allow-Headers' : 'Content-Type, X-Requested-With, Accept, Origin' } if request . method == 'OPTIONS' : response = Response ( headers = access_control_headers ) elif request . method == 'POST' : msg = request . stream . read ( ) context = self . _queue_class ( ) self . messages . put ( ( context , msg ) ) response = Response ( context . get ( ) , headers = access_control_headers ) else : response = Response ( 'Only POST supported' , 405 ) return response ( environ , start_response )
WSGI handler function .
22,102
def receive_one_message ( self ) : context , message = self . transport . receive_message ( ) if callable ( self . trace ) : self . trace ( ' , context , message ) def handle_message ( context , message ) : try : request = self . protocol . parse_request ( message ) except tinyrpc . exc . RPCError as e : response = e . error_respond ( ) else : response = self . dispatcher . dispatch ( request ) if response is not None : result = response . serialize ( ) if callable ( self . trace ) : self . trace ( '<--' , context , result ) self . transport . send_reply ( context , result ) self . _spawn ( handle_message , context , message )
Handle a single request .
22,103
def raise_error ( self , error ) : ex = exc . RPCError ( 'Error calling remote procedure: %s' % error . error [ 'message' ] ) if self . raises_errors : raise ex return ex
Raises the exception in the client .
22,104
def create ( cls , zmq_context , endpoint ) : socket = zmq_context . socket ( zmq . ROUTER ) socket . bind ( endpoint ) return cls ( socket )
Create new server transport .
22,105
def create ( cls , zmq_context , endpoint ) : socket = zmq_context . socket ( zmq . REQ ) socket . connect ( endpoint ) return cls ( socket )
Create new client transport .
22,106
def receive_message ( self ) : if not ( 'REQUEST_METHOD' in os . environ and os . environ [ 'REQUEST_METHOD' ] == 'POST' ) : print ( "Status: 405 Method not Allowed; only POST is accepted" ) exit ( 0 ) content_length = int ( os . environ [ 'CONTENT_LENGTH' ] ) request_json = sys . stdin . read ( content_length ) request_json = urlparse . unquote ( request_json ) return None , request_json
Receive a message from the transport .
22,107
def send_reply ( self , context , reply ) : print ( "Status: 200 OK" ) print ( "Content-Type: application/json" ) print ( "Cache-Control: no-cache" ) print ( "Pragma: no-cache" ) print ( "Content-Length: %d" % len ( reply ) ) print ( ) print ( reply . decode ( ) )
Sends a reply to a client .
22,108
def _spawn ( self , func , * args , ** kwargs ) : gevent . spawn ( func , * args , ** kwargs )
Spawn a handler function .
22,109
def call ( self , method , args , kwargs , one_way = False ) : req = self . protocol . create_request ( method , args , kwargs , one_way ) rep = self . _send_and_handle_reply ( req , one_way ) if one_way : return return rep . result
Calls the requested method and returns the result .
22,110
def call_all ( self , requests ) : threads = [ ] if 'gevent' in sys . modules : import gevent for r in requests : req = self . protocol . create_request ( r . method , r . args , r . kwargs ) tr = r . transport . transport if len ( r ) == 4 else None threads . append ( gevent . spawn ( self . _send_and_handle_reply , req , False , tr , True ) ) gevent . joinall ( threads ) return [ t . value for t in threads ] else : for r in requests : req = self . protocol . create_request ( r . method , r . args , r . kwargs ) tr = r . transport . transport if len ( r ) == 4 else None threads . append ( self . _send_and_handle_reply ( req , False , tr , True ) ) return threads
Calls the methods in the request in parallel .
22,111
def batch_call ( self , calls ) : req = self . protocol . create_batch_request ( ) for call_args in calls : req . append ( self . protocol . create_request ( * call_args ) ) return self . _send_and_handle_reply ( req )
Experimental use at your own peril .
22,112
def public ( name = None ) : if callable ( name ) : f = name f . _rpc_public_name = f . __name__ return f def _ ( f ) : f . _rpc_public_name = name or f . __name__ return f return _
Decorator . Mark a method as eligible for registration by a dispatcher .
22,113
def public ( self , name = None ) : if callable ( name ) : self . add_method ( name ) return name def _ ( f ) : self . add_method ( f , name = name ) return f return _
Convenient decorator .
22,114
def add_subdispatch ( self , dispatcher , prefix = '' ) : self . subdispatchers . setdefault ( prefix , [ ] ) . append ( dispatcher )
Adds a subdispatcher possibly in its own namespace .
22,115
def get_method ( self , name ) : if name in self . method_map : return self . method_map [ name ] for prefix , subdispatchers in self . subdispatchers . items ( ) : if name . startswith ( prefix ) : for sd in subdispatchers : try : return sd . get_method ( name [ len ( prefix ) : ] ) except exc . MethodNotFoundError : pass raise exc . MethodNotFoundError ( name )
Retrieve a previously registered method .
22,116
def register_instance ( self , obj , prefix = '' ) : dispatch = self . __class__ ( ) for name , f in inspect . getmembers ( obj , lambda f : callable ( f ) and hasattr ( f , '_rpc_public_name' ) ) : dispatch . add_method ( f , f . _rpc_public_name ) self . add_subdispatch ( dispatch , prefix )
Create new subdispatcher and register all public object methods on it .
22,117
def dispatch ( self , request ) : if hasattr ( request , 'create_batch_response' ) : results = [ self . _dispatch ( req ) for req in request ] response = request . create_batch_response ( ) if response is not None : response . extend ( results ) return response else : return self . _dispatch ( request )
Fully handle request .
22,118
def pop ( self , key , default = None ) : with self . lock : try : item = OrderedDict . __getitem__ ( self , key ) del self [ key ] return item [ 0 ] except KeyError : return default
Get item from the dict and remove it .
22,119
def get ( self , key , default = None , with_age = False ) : " Return the value for key if key is in the dictionary, else default. " try : return self . __getitem__ ( key , with_age ) except KeyError : if with_age : return default , None else : return default
Return the value for key if key is in the dictionary else default .
22,120
def reload ( self , callback = None , errback = None ) : return self . load ( reload = True , callback = callback , errback = errback )
Reload record data from the API .
22,121
def load ( self , callback = None , errback = None , reload = False ) : if not reload and self . data : raise RecordException ( 'record already loaded' ) def success ( result , * args ) : self . _parseModel ( result ) if callback : return callback ( self ) else : return self return self . _rest . retrieve ( self . parentZone . zone , self . domain , self . type , callback = success , errback = errback )
Load record data from the API .
22,122
def delete ( self , callback = None , errback = None ) : if not self . data : raise RecordException ( 'record not loaded' ) def success ( result , * args ) : if callback : return callback ( result ) else : return result return self . _rest . delete ( self . parentZone . zone , self . domain , self . type , callback = success , errback = errback )
Delete the record from the zone including all advanced configuration meta data etc .
22,123
def qps ( self , callback = None , errback = None ) : if not self . data : raise RecordException ( 'record not loaded' ) stats = Stats ( self . parentZone . config ) return stats . qps ( zone = self . parentZone . zone , domain = self . domain , type = self . type , callback = callback , errback = errback )
Return the current QPS for this record
22,124
def addAnswers ( self , answers , callback = None , errback = None , ** kwargs ) : if not self . data : raise RecordException ( 'record not loaded' ) orig_answers = self . data [ 'answers' ] new_answers = self . _rest . _getAnswersForBody ( answers ) orig_answers . extend ( new_answers ) return self . update ( answers = orig_answers , callback = callback , errback = errback , ** kwargs )
Add answers to the record .
22,125
def createFromAPIKey ( self , apikey , maybeWriteDefault = False ) : self . _data = { 'default_key' : 'default' , 'keys' : { 'default' : { 'key' : apikey , 'desc' : 'imported API key' } } } self . _keyID = 'default' self . _doDefaults ( ) if maybeWriteDefault : path = os . path . expanduser ( self . DEFAULT_CONFIG_FILE ) self . write ( path )
Create a basic config from a single API key
22,126
def loadFromFile ( self , path ) : if '~' in path : path = os . path . expanduser ( path ) f = open ( path ) body = f . read ( ) f . close ( ) self . _path = path self . loadFromString ( body )
Load JSON config file from disk at the given path
22,127
def write ( self , path = None ) : if not self . _path and not path : raise ConfigException ( 'no config path given' ) if path : self . _path = path if '~' in self . _path : self . _path = os . path . expanduser ( self . _path ) f = open ( self . _path , 'w' ) f . write ( json . dumps ( self . _data ) ) f . close ( )
Write config data to disk . If this config object already has a path it will write to it . If it doesn t one must be passed during this call .
22,128
def useKeyID ( self , keyID ) : if keyID not in self . _data [ 'keys' ] : raise ConfigException ( 'keyID does not exist: %s' % keyID ) self . _keyID = keyID
Use the given API key config specified by keyID during subsequent API calls
22,129
def getKeyConfig ( self , keyID = None ) : k = keyID if keyID is not None else self . _keyID if not k or k not in self . _data [ 'keys' ] : raise ConfigException ( 'request key does not exist: %s' % k ) return self . _data [ 'keys' ] [ k ]
Get key configuration specified by keyID or current keyID .
22,130
def isKeyWriteLocked ( self , keyID = None ) : kcfg = self . getKeyConfig ( keyID ) return 'writeLock' in kcfg and kcfg [ 'writeLock' ] is True
Determine if a key config is write locked .
22,131
def getAPIKey ( self , keyID = None ) : kcfg = self . getKeyConfig ( keyID ) if 'key' not in kcfg : raise ConfigException ( 'invalid config: missing api key' ) return kcfg [ 'key' ]
Retrieve the NS1 API Key for the given keyID
22,132
def getEndpoint ( self ) : port = '' endpoint = '' keyConfig = self . getKeyConfig ( ) if 'port' in keyConfig : port = ':' + keyConfig [ 'port' ] elif self . _data [ 'port' ] != self . PORT : port = ':' + self . _data [ 'port' ] if 'endpoint' in keyConfig : endpoint = keyConfig [ 'endpoint' ] else : endpoint = self . _data [ 'endpoint' ] return 'https://%s%s/%s/' % ( endpoint , port , self . _data [ 'api_version' ] )
Retrieve the NS1 API Endpoint URL that will be used for requests .
22,133
def load ( self , callback = None , errback = None , reload = False ) : if not reload and self . data : raise NetworkException ( 'Network already loaded' ) def success ( result , * args ) : self . data = result self . id = result [ 'id' ] self . name = result [ 'name' ] self . report = self . _rest . report ( self . id ) if callback : return callback ( self ) else : return self if self . id is None : if self . name is None : raise NetworkException ( 'Must at least specify an id or name' ) else : self . id = [ network for network in self . _rest . list ( ) if network [ 'name' ] == self . name ] [ 0 ] [ 'id' ] return self . _rest . retrieve ( self . id , callback = success , errback = errback )
Load network data from the API .
22,134
def delete ( self , callback = None , errback = None ) : return self . _rest . delete ( self . id , callback = callback , errback = errback )
Delete the Network and all associated addresses
22,135
def new_address ( self , prefix , type , callback = None , errback = None , ** kwargs ) : if not self . data : raise NetworkException ( 'Network not loaded' ) return Address ( self . config , prefix , type , self ) . create ( ** kwargs )
Create a new address space in this Network
22,136
def load ( self , callback = None , errback = None , reload = False ) : if not reload and self . data : raise AddressException ( 'Address already loaded' ) def success ( result , * args ) : self . data = result self . id = result [ 'id' ] self . prefix = result [ 'prefix' ] self . type = result [ 'type' ] self . network = Network ( self . config , id = result [ 'network_id' ] ) if self . type != 'host' : self . report = self . _rest . report ( self . id ) children = self . _rest . retrieve_children ( self . id ) self . children = [ Address ( self . config , id = child [ 'id' ] ) for child in children if len ( children ) > 0 ] try : parent = self . _rest . retrieve_parent ( self . id ) self . parent = Address ( self . config , id = parent [ 'id' ] ) except ResourceException : pass if callback : return callback ( self ) else : return self if self . id is None : if self . prefix is None or self . type is None or self . network is None : raise AddressException ( 'Must at least specify an id or prefix, type and network' ) else : network_id = self . network . id try : self . id = [ address for address in self . _rest . list ( ) if address [ 'prefix' ] == self . prefix and address [ 'type' ] == self . type and address [ 'network_id' ] == network_id ] [ 0 ] [ 'id' ] except IndexError : raise AddressException ( "Could not find address by prefix. It may not exist, or is a child address. " "Use the topmost parent prefix or specify ID" ) return self . _rest . retrieve ( self . id , callback = success , errback = errback )
Load address data from the API .
22,137
def load ( self , callback = None , errback = None , reload = False ) : if not reload and self . data : raise ScopegroupException ( 'Scope Group already loaded' ) def success ( result , * args ) : self . data = result self . id = result [ 'id' ] self . dhcp4 = result [ 'dhcp4' ] self . dhcp6 = result [ 'dhcp6' ] self . name = result [ 'name' ] self . service_group_id = result [ 'service_group_id' ] if callback : return callback ( self ) else : return self if self . id is None : if self . dhcp4 is None or self . dhcp6 is None or self . name is None or self . service_group_id is None : raise AddressException ( 'Must at least specify an id or name and service_group_id' ) else : try : self . id = [ scope_group for scope_group in self . _rest . list ( ) if scope_group [ 'name' ] == self . name and scope_group [ 'service_group_id' ] == self . service_group_id ] [ 0 ] [ 'id' ] except IndexError : raise AddressException ( "Could not find Scope Group by name and service_group_id. It may not exist" ) return self . _rest . retrieve ( self . id , callback = success , errback = errback )
Load Scopegroup data from the API .
22,138
def search ( self , q = None , has_geo = False , callback = None , errback = None ) : if not self . data : raise ZoneException ( 'zone not loaded' ) return self . _rest . search ( self . zone , q , has_geo , callback , errback )
Search within a zone for specific metadata . Zone must already be loaded .
22,139
def delete ( self , callback = None , errback = None ) : return self . _rest . delete ( self . zone , callback = callback , errback = errback )
Delete the zone and ALL records it contains .
22,140
def createLinkToSelf ( self , new_zone , callback = None , errback = None , ** kwargs ) : zone = Zone ( self . config , new_zone ) kwargs [ 'link' ] = self . data [ 'zone' ] return zone . create ( callback = callback , errback = errback , ** kwargs )
Create a new linked zone linking to ourselves . All records in this zone will then be available as linked records in the new zone .
22,141
def cloneRecord ( self , existing_domain , new_domain , rtype , zone = None , callback = None , errback = None ) : if zone is None : zone = self . zone if not new_domain . endswith ( zone ) : new_domain = new_domain + '.' + zone def onSaveNewRecord ( new_data ) : if zone != self . zone : pZone = Zone ( self . config , zone ) else : pZone = self new_rec = Record ( pZone , new_domain , rtype ) new_rec . _parseModel ( new_data ) if callback : return callback ( new_rec ) else : return new_rec def onLoadRecord ( old_rec ) : data = old_rec . data data [ 'zone' ] = zone data [ 'domain' ] = new_domain restapi = Records ( self . config ) return restapi . create_raw ( zone , new_domain , rtype , data , callback = onSaveNewRecord , errback = errback ) return self . loadRecord ( existing_domain , rtype , callback = onLoadRecord , errback = errback )
Clone the given record to a new record such that their configs are identical .
22,142
def loadRecord ( self , domain , rtype , callback = None , errback = None ) : rec = Record ( self , domain , rtype ) return rec . load ( callback = callback , errback = errback )
Load a high level Record object from a domain within this Zone .
22,143
def qps ( self , callback = None , errback = None ) : stats = Stats ( self . config ) return stats . qps ( zone = self . zone , callback = callback , errback = errback )
Return the current QPS for this zone
22,144
def usage ( self , callback = None , errback = None , ** kwargs ) : stats = Stats ( self . config ) return stats . usage ( zone = self . zone , callback = callback , errback = errback , ** kwargs )
Return the current usage information for this zone
22,145
def load ( self , callback = None , errback = None , reload = False ) : if not reload and self . data : raise MonitorException ( 'monitor already loaded' ) def success ( result , * args ) : self . data = result if callback : return callback ( self ) else : return self return self . _rest . retrieve ( self . data [ 'id' ] , callback = success , errback = errback )
Load monitor data from the API .
22,146
def delete ( self , callback = None , errback = None ) : return self . _rest . delete ( self . data [ 'id' ] , callback = callback , errback = errback )
Delete the monitor
22,147
def update ( self , callback = None , errback = None , ** kwargs ) : if not self . data : raise MonitorException ( 'monitor not loaded' ) def success ( result , * args ) : self . data = result if callback : return callback ( self ) else : return self return self . _rest . update ( self . data [ 'id' ] , { } , callback = success , errback = errback , ** kwargs )
Update monitor configuration . Pass a list of keywords and their values to update .
22,148
def zones ( self ) : import ns1 . rest . zones return ns1 . rest . zones . Zones ( self . config )
Return a new raw REST interface to zone resources
22,149
def records ( self ) : import ns1 . rest . records return ns1 . rest . records . Records ( self . config )
Return a new raw REST interface to record resources
22,150
def addresses ( self ) : import ns1 . rest . ipam return ns1 . rest . ipam . Addresses ( self . config )
Return a new raw REST interface to address resources
22,151
def networks ( self ) : import ns1 . rest . ipam return ns1 . rest . ipam . Networks ( self . config )
Return a new raw REST interface to network resources
22,152
def scope_groups ( self ) : import ns1 . rest . ipam return ns1 . rest . ipam . Scopegroups ( self . config )
Return a new raw REST interface to scope_group resources
22,153
def stats ( self ) : import ns1 . rest . stats return ns1 . rest . stats . Stats ( self . config )
Return a new raw REST interface to stats resources
22,154
def datasource ( self ) : import ns1 . rest . data return ns1 . rest . data . Source ( self . config )
Return a new raw REST interface to datasource resources
22,155
def datafeed ( self ) : import ns1 . rest . data return ns1 . rest . data . Feed ( self . config )
Return a new raw REST interface to feed resources
22,156
def monitors ( self ) : import ns1 . rest . monitoring return ns1 . rest . monitoring . Monitors ( self . config )
Return a new raw REST interface to monitors resources
22,157
def notifylists ( self ) : import ns1 . rest . monitoring return ns1 . rest . monitoring . NotifyLists ( self . config )
Return a new raw REST interface to notify list resources
22,158
def plan ( self ) : import ns1 . rest . account return ns1 . rest . account . Plan ( self . config )
Return a new raw REST interface to account plan
22,159
def loadZone ( self , zone , callback = None , errback = None ) : import ns1 . zones zone = ns1 . zones . Zone ( self . config , zone ) return zone . load ( callback = callback , errback = errback )
Load an existing zone into a high level Zone object .
22,160
def createZone ( self , zone , zoneFile = None , callback = None , errback = None , ** kwargs ) : import ns1 . zones zone = ns1 . zones . Zone ( self . config , zone ) return zone . create ( zoneFile = zoneFile , callback = callback , errback = errback , ** kwargs )
Create a new zone and return an associated high level Zone object . Several optional keyword arguments are available to configure the SOA record .
22,161
def loadRecord ( self , domain , type , zone = None , callback = None , errback = None , ** kwargs ) : import ns1 . zones if zone is None : parts = domain . split ( '.' ) if len ( parts ) <= 2 : zone = '.' . join ( parts ) else : zone = '.' . join ( parts [ 1 : ] ) z = ns1 . zones . Zone ( self . config , zone ) return z . loadRecord ( domain , type , callback = callback , errback = errback , ** kwargs )
Load an existing record into a high level Record object .
22,162
def loadMonitors ( self , callback = None , errback = None , ** kwargs ) : import ns1 . monitoring monitors_list = self . monitors ( ) . list ( callback , errback ) return [ ns1 . monitoring . Monitor ( self . config , m ) for m in monitors_list ]
Load all monitors
22,163
def createMonitor ( self , callback = None , errback = None , ** kwargs ) : import ns1 . monitoring monitor = ns1 . monitoring . Monitor ( self . config ) return monitor . create ( callback = callback , errback = errback , ** kwargs )
Create a monitor
22,164
def loadNetworkbyID ( self , id , callback = None , errback = None ) : import ns1 . ipam network = ns1 . ipam . Network ( self . config , id = id ) return network . load ( callback = callback , errback = errback )
Load an existing Network by ID into a high level Network object
22,165
def loadNetworkbyName ( self , name , callback = None , errback = None ) : import ns1 . ipam network = ns1 . ipam . Network ( self . config , name = name ) return network . load ( callback = callback , errback = errback )
Load an existing Network by name into a high level Network object
22,166
def loadAddressbyID ( self , id , callback = None , errback = None ) : import ns1 . ipam address = ns1 . ipam . Address ( self . config , id = id ) return address . load ( callback = callback , errback = errback )
Load an existing address by ID into a high level Address object
22,167
def loadAddressbyPrefix ( self , prefix , type , network_id , callback = None , errback = None ) : import ns1 . ipam network = ns1 . ipam . Network ( self . config , id = network_id ) . load ( ) address = ns1 . ipam . Address ( self . config , prefix = prefix , type = type , network = network ) return address . load ( callback = callback , errback = errback )
Load an existing address by prefix type and network into a high level Address object
22,168
def loadScopeGroupbyID ( self , id , callback = None , errback = None ) : import ns1 . ipam scope_group = ns1 . ipam . Scopegroup ( self . config , id = id ) return scope_group . load ( callback = callback , errback = errback )
Load an existing Scope Group by ID into a high level Scope Group object
22,169
def loadScopeGroupbyName ( self , name , service_group_id , callback = None , errback = None ) : import ns1 . ipam scope_group = ns1 . ipam . Scopegroup ( self . config , name = name , service_group_id = service_group_id ) return scope_group . load ( callback = callback , errback = errback )
Load an existing Scope Group by name and service group id into a high level Scope Group object
22,170
def generateDHCPOptionsTemplate ( self , address_family ) : from ns1 . ipam import DHCPOptions options = { } for option in DHCPOptions . OPTIONS [ address_family ] : options [ option ] = "" return options
Generate boilerplate dictionary to hold dhcp options
22,171
def loadDHCPOptions ( self , address_family , options ) : import ns1 . ipam return ns1 . ipam . DHCPOptions ( address_family , options )
Create a high level DHCPOptions object
22,172
def fill_tree ( comments ) : if not comments : return it = iter ( comments ) first = next ( it ) extra_path_items = imap ( _mark_as_root_path , first . root_path ) return chain ( extra_path_items , [ first ] , it )
Insert extra comments in the comments list so that the root path of the first comment is always visible . Use this in comments pagination to fill in the tree information .
22,173
def annotate_tree_properties ( comments ) : if not comments : return it = iter ( comments ) old = next ( it ) old . open = True last = set ( ) for c in it : if old . last_child_id : last . add ( old . last_child_id ) if c . pk in last : c . last = True if c . depth > old . depth : c . open = True else : old . close = list ( range ( old . depth - c . depth ) ) if old . root_id != c . root_id : old . close . append ( len ( old . close ) ) c . open = True last = set ( ) yield old old = c old . close = range ( old . depth ) yield old
iterate through nodes and adds some magic properties to each of them representing opening list of children and closing it
22,174
def run_spades ( self , stop_at_first_success = False ) : n50 = { } kmer_to_dir = { } for k in self . spades_kmers : tmpdir = tempfile . mkdtemp ( prefix = self . outdir + '.tmp.spades.' + str ( k ) + '.' , dir = os . getcwd ( ) ) kmer_to_dir [ k ] = tmpdir ok , errs = self . run_spades_once ( k , tmpdir ) if ok : contigs_fasta = os . path . join ( tmpdir , 'contigs.fasta' ) contigs_fai = contigs_fasta + '.fai' common . syscall ( self . samtools . exe ( ) + ' faidx ' + contigs_fasta , verbose = self . verbose ) stats = pyfastaq . tasks . stats_from_fai ( contigs_fai ) if stats [ 'N50' ] != 0 : n50 [ k ] = stats [ 'N50' ] if stop_at_first_success : break if len ( n50 ) > 0 : if self . verbose : print ( '[assemble]\tkmer\tN50' ) for k in sorted ( n50 ) : print ( '[assemble]' , k , n50 [ k ] , sep = '\t' ) best_k = None for k in sorted ( n50 ) : if best_k is None or n50 [ k ] >= n50 [ best_k ] : best_k = k assert best_k is not None for k , directory in kmer_to_dir . items ( ) : if k == best_k : if self . verbose : print ( '[assemble] using assembly with kmer' , k ) os . rename ( directory , self . outdir ) else : shutil . rmtree ( directory ) else : raise Error ( 'Error running SPAdes. Output directories are:\n ' + '\n ' . join ( kmer_to_dir . values ( ) ) + '\nThe reason why should be in the spades.log file in each directory.' )
Runs spades on all kmers . Each a separate run because SPAdes dies if any kmer does not work . Chooses the best assembly to be the one with the biggest N50
22,175
def run_canu ( self ) : cmd = self . _make_canu_command ( self . outdir , 'canu' ) ok , errs = common . syscall ( cmd , verbose = self . verbose , allow_fail = False ) if not ok : raise Error ( 'Error running Canu.' ) original_contigs = os . path . join ( self . outdir , 'canu.contigs.fasta' ) renamed_contigs = os . path . join ( self . outdir , 'contigs.fasta' ) Assembler . _rename_canu_contigs ( original_contigs , renamed_contigs ) original_gfa = os . path . join ( self . outdir , 'canu.contigs.gfa' ) renamed_gfa = os . path . join ( self . outdir , 'contigs.gfa' ) os . rename ( original_gfa , renamed_gfa )
Runs canu instead of spades
22,176
def aligned_read_to_read ( read , revcomp = True , qual = None , ignore_quality = False ) : if read . qual is None or ignore_quality : if qual is None or ignore_quality : seq = pyfastaq . sequences . Fasta ( read . qname , common . decode ( read . seq ) ) else : seq = pyfastaq . sequences . Fastq ( read . qname , common . decode ( read . seq ) , qual * read . query_length ) else : if qual is None : seq = pyfastaq . sequences . Fastq ( read . qname , common . decode ( read . seq ) , common . decode ( read . qual ) ) else : seq = pyfastaq . sequences . Fastq ( read . qname , common . decode ( read . seq ) , qual * read . query_length ) if read . is_reverse and revcomp : seq . revcomp ( ) return seq
Returns Fasta or Fastq sequence from pysam aligned read
22,177
def _get_ref_lengths ( self ) : sam_reader = pysam . Samfile ( self . bam , "rb" ) return dict ( zip ( sam_reader . references , sam_reader . lengths ) )
Gets the length of each reference sequence from the header of the bam . Returns dict name = > length
22,178
def _get_contigs_to_use ( self , contigs_to_use ) : if type ( contigs_to_use ) == set : return contigs_to_use elif contigs_to_use is None : return set ( ) else : f = pyfastaq . utils . open_file_read ( contigs_to_use ) contigs_to_use = set ( [ line . rstrip ( ) for line in f ] ) pyfastaq . utils . close ( f ) return contigs_to_use
If contigs_to_use is a set returns that set . If it s None returns an empty set . Otherwise assumes it s a file name and gets names from the file
22,179
def _all_reads_from_contig ( self , contig , fout ) : sam_reader = pysam . Samfile ( self . bam , "rb" ) for read in sam_reader . fetch ( contig ) : print ( mapping . aligned_read_to_read ( read , ignore_quality = not self . fastq_out ) , file = fout )
Gets all reads from contig called contig and writes to fout
22,180
def _get_all_unmapped_reads ( self , fout ) : sam_reader = pysam . Samfile ( self . bam , "rb" ) for read in sam_reader . fetch ( until_eof = True ) : if read . is_unmapped : print ( mapping . aligned_read_to_read ( read , ignore_quality = not self . fastq_out ) , file = fout )
Writes all unmapped reads to fout
22,181
def _exclude_region ( self , contig , start , end , fout ) : sam_reader = pysam . Samfile ( self . bam , "rb" ) exclude_interval = pyfastaq . intervals . Interval ( start , end - 1 ) for read in sam_reader . fetch ( contig ) : read_interval = pyfastaq . intervals . Interval ( read . pos , read . reference_end - 1 ) if not read_interval . intersects ( exclude_interval ) : print ( mapping . aligned_read_to_read ( read , ignore_quality = not self . fastq_out ) , file = fout )
Writes reads not mapping to the given region of contig start and end as per python convention
22,182
def _get_region ( self , contig , start , end , fout , min_length = 250 ) : sam_reader = pysam . Samfile ( self . bam , "rb" ) trimming_end = ( start == 0 ) for read in sam_reader . fetch ( contig , start , end ) : read_interval = pyfastaq . intervals . Interval ( read . pos , read . reference_end - 1 ) seq = mapping . aligned_read_to_read ( read , ignore_quality = not self . fastq_out , revcomp = False ) if trimming_end : bases_off_start = 0 bases_off_end = max ( 0 , read . reference_end - 1 - end ) seq = seq . subseq ( 0 , read . query_alignment_end - bases_off_end ) else : bases_off_start = max ( 0 , start - read . pos + 1 ) seq = seq . subseq ( bases_off_start + read . query_alignment_start , len ( seq ) ) if read . is_reverse : seq . revcomp ( ) if len ( seq ) >= min_length : print ( seq , file = fout )
Writes reads mapping to given region of contig trimming part of read not in the region
22,183
def _get_contigs_to_keep ( self , filename ) : if filename is None : return set ( ) with open ( filename ) as f : return { line . rstrip ( ) for line in f }
Returns a set of names from file called filename . If filename is None returns an empty set
22,184
def _remove_small_contigs ( self , infile , outfile , keep = None ) : removed = set ( ) all_names = set ( ) if keep is None : keep = set ( ) file_reader = pyfastaq . sequences . file_reader ( infile ) fout = pyfastaq . utils . open_file_write ( outfile ) for seq in file_reader : all_names . add ( seq . id ) if len ( seq ) >= self . min_contig_length or seq . id in keep : print ( seq , file = fout ) else : removed . add ( seq . id ) pyfastaq . utils . close ( fout ) return all_names , removed
Writes a new file with small contigs removed . Returns lists of all names and names of removed contigs
22,185
def _containing_contigs ( self , hits ) : return { hit . ref_name for hit in hits if self . _contains ( hit ) }
Given a list of hits all with same query returns a set of the contigs containing that query
22,186
def _expand_containing_using_transitivity ( self , containing_contigs ) : for name in containing_contigs : containing_contigs [ name ] = self . _get_all_containing ( containing_contigs , name ) return containing_contigs
This uses a contined in b and b contained in c to force a contained in c . Just in case a contained in c wasn t already found by nucmer
22,187
def _collapse_list_of_sets ( self , sets ) : found = True while found : found = False to_intersect = None for i in range ( len ( sets ) ) : for j in range ( len ( sets ) ) : if i == j : continue elif sets [ i ] . intersection ( sets [ j ] ) : to_intersect = i , j break if to_intersect is not None : break if to_intersect is not None : found = True sets [ i ] . update ( sets [ j ] ) sets . pop ( j ) return sets
Input is a list of sets . Merges any intersecting sets in the list
22,188
def _longest_contig ( self , contig_set , contig_lengths ) : longest_name = None max_length = - 1 for name in contig_set : if contig_lengths [ name ] > max_length : longest_name = name max_length = contig_lengths [ name ] assert max_length != - 1 assert longest_name is not None return longest_name
Returns the name of the longest contig from the set of names contig_set . contig_lengths is expected to be a dictionary of contig name = > length .
22,189
def check_files_exist ( filenames ) : files_not_found = [ x for x in filenames if not os . path . exists ( x ) ] if len ( files_not_found ) : for filename in files_not_found : print ( 'File not found: "' , filename , '"' , sep = '' , file = sys . stderr ) raise Error ( 'File(s) not found. Cannot continue' )
Dies if any files in the list of filenames does not exist
22,190
def get_contigs ( self ) : contigs = { } pyfastaq . tasks . file_to_dict ( self . contigs_fasta , contigs ) return contigs
Returns a dictionary of contig_name - > pyfastaq . Sequences . Fasta object
22,191
def circular_contigs ( self ) : if self . assembler == 'spades' : if self . contigs_fastg is not None : return self . _circular_contigs_from_spades_before_3_6_1 ( self . contigs_fastg ) elif None not in [ self . contigs_paths , self . assembly_graph_fastg ] : return self . _circular_contigs_from_spades_after_3_6_1 ( self . assembly_graph_fastg , self . contigs_paths ) else : return set ( ) elif self . assembler == 'canu' : return self . _circular_contigs_from_canu_gfa ( self . contigs_gfa ) else : return set ( )
Returns a set of the contig names that are circular
22,192
def _run_nucmer ( self , ref , qry , outfile ) : n = pymummer . nucmer . Runner ( ref , qry , outfile , min_id = self . nucmer_min_id , min_length = self . nucmer_min_length , diagdiff = self . nucmer_diagdiff , maxmatch = True , breaklen = self . nucmer_breaklen , simplify = True , verbose = self . verbose ) n . run ( )
Run nucmer of new assembly vs original assembly
22,193
def _load_nucmer_hits ( self , infile ) : hits = { } file_reader = pymummer . coords_file . reader ( infile ) for al in file_reader : if al . ref_name not in hits : hits [ al . ref_name ] = [ ] hits [ al . ref_name ] . append ( al ) return hits
Returns dict ref name = > list of nucmer hits from infile
22,194
def _is_at_ref_start ( self , nucmer_hit ) : hit_coords = nucmer_hit . ref_coords ( ) return hit_coords . start < self . ref_end_tolerance
Returns True iff the hit is close enough to the start of the reference sequence
22,195
def _is_at_ref_end ( self , nucmer_hit ) : hit_coords = nucmer_hit . ref_coords ( ) return hit_coords . end >= nucmer_hit . ref_length - self . ref_end_tolerance
Returns True iff the hit is close enough to the end of the reference sequence
22,196
def _is_at_qry_start ( self , nucmer_hit ) : hit_coords = nucmer_hit . qry_coords ( ) return hit_coords . start < self . qry_end_tolerance
Returns True iff the hit is close enough to the start of the query sequence
22,197
def _is_at_qry_end ( self , nucmer_hit ) : hit_coords = nucmer_hit . qry_coords ( ) return hit_coords . end >= nucmer_hit . qry_length - self . qry_end_tolerance
Returns True iff the hit is close enough to the end of the query sequence
22,198
def _get_hit_nearest_ref_start ( self , hits ) : nearest_to_start = hits [ 0 ] for hit in hits [ 1 : ] : if hit . ref_coords ( ) . start < nearest_to_start . ref_coords ( ) . start : nearest_to_start = hit return nearest_to_start
Returns the hit nearest to the start of the ref sequence from the input list of hits
22,199
def _get_hit_nearest_ref_end ( self , hits ) : nearest_to_end = hits [ 0 ] for hit in hits [ 1 : ] : if hit . ref_coords ( ) . end > nearest_to_end . ref_coords ( ) . end : nearest_to_end = hit return nearest_to_end
Returns the hit nearest to the end of the ref sequence from the input list of hits