idx int64 0 252k | question stringlengths 48 5.28k | target stringlengths 5 1.23k |
|---|---|---|
5,600 | def getArgumentParser ( self , configManager , config ) : argParser = argparse . ArgumentParser ( self . description ) for configName , configDict in configManager . configs . items ( ) : cmdName = configName . replace ( "_" , "-" ) argParser . add_argument ( '--%s' % cmdName , default = None , help = configDict [ 'des... | May be overidden to provide custom functionality . Constructs an argparse . ArgumentParser used to parse configuration options from the command line . |
5,601 | def parse ( self , configManager , config ) : argParser = self . getArgumentParser ( configManager , config ) return vars ( argParser . parse_args ( ) ) | Parses commandline arguments given a series of configuration options . |
5,602 | def candidates ( text ) : for Pmatch in finditer ( TARGET , text ) : P = Pmatch . group ( 1 ) B = bool ( match ( NEWLINE , Pmatch . group ( 5 ) ) ) start = Pmatch . start ( ) end = Pmatch . end ( ) Lmatch = search ( LTOKEN , text [ max ( 0 , start - BUFSIZE ) : start ] ) if not Lmatch : continue L = word_tokenize ( " "... | Given a text string get candidates and context for feature extraction and classification |
5,603 | def extract_one ( self , L , P , R ) : yield "*bias*" if match ( QUOTE , L ) : L = QUOTE_TOKEN elif isnumberlike ( L ) : L = NUMBER_TOKEN else : yield "len(L)={}" . format ( min ( len ( L ) , CLIP ) ) if "." in L : yield "L:*period*" if not self . nocase : cf = case_feature ( R ) if cf : yield "L:{}'" . format ( cf ) L... | Given left context L punctuation mark P and right context R extract features . Probability distributions for any quantile - based features will not be modified . |
5,604 | def fit ( self , text , epochs = EPOCHS ) : logger . debug ( "Extracting features and classifications." ) Phi = [ ] Y = [ ] for ( L , P , R , gold , _ ) in Detector . candidates ( text ) : Phi . append ( self . extract_one ( L , P , R ) ) Y . append ( gold ) self . classifier . fit ( Y , Phi , epochs ) logger . debug (... | Given a string text use it to train the segmentation classifier for epochs iterations . |
5,605 | def predict ( self , L , P , R ) : phi = self . extract_one ( L , P , R ) return self . classifier . predict ( phi ) | Given an left context L punctuation mark P and right context R return True iff this observation is hypothesized to be a sentence boundary . |
5,606 | def segments ( self , text ) : start = 0 for ( L , P , R , B , end ) in Detector . candidates ( text ) : if B : continue if self . predict ( L , P , R ) : yield text [ start : end ] . rstrip ( ) start = end yield text [ start : ] . rstrip ( ) | Given a string of text return a generator yielding each hypothesized sentence string |
5,607 | def evaluate ( self , text ) : cx = BinaryConfusion ( ) for ( L , P , R , gold , _ ) in Detector . candidates ( text ) : guess = self . predict ( L , P , R ) cx . update ( gold , guess ) if not gold and guess : logger . debug ( "False pos.: L='{}', R='{}'." . format ( L , R ) ) elif gold and not guess : logger . debug ... | Given a string of text compute confusion matrix for the classification task . |
5,608 | def scale ( mask , mag_scale , outfile = None ) : msg = "'mask.scale': ADW 2018-05-05" DeprecationWarning ( msg ) mask_new = hp . UNSEEN * np . ones ( len ( mask ) ) mask_new [ mask == 0. ] = 0. mask_new [ mask > 0. ] = mask [ mask > 0. ] + mag_scale if outfile is not None : pix = np . nonzero ( mask_new > 0. ) [ 0 ] d... | Scale the completeness depth of a mask such that mag_new = mag + mag_scale . Input is a full HEALPix map . Optionally write out the scaled mask as an sparse HEALPix map . |
5,609 | def mask_roi_unique ( self ) : A = np . vstack ( [ self . mask_1 . mask_roi_sparse , self . mask_2 . mask_roi_sparse ] ) . T B = A [ np . lexsort ( A . T [ : : - 1 ] ) ] return B [ np . concatenate ( ( [ True ] , np . any ( B [ 1 : ] != B [ : - 1 ] , axis = 1 ) ) ) ] | Assemble a set of unique magnitude tuples for the ROI |
5,610 | def mask_roi_digi ( self ) : A = np . vstack ( [ self . mask_1 . mask_roi_sparse , self . mask_2 . mask_roi_sparse ] ) . T B = self . mask_roi_unique AA = np . ascontiguousarray ( A ) BB = np . ascontiguousarray ( B ) dt = np . dtype ( ( np . void , AA . dtype . itemsize * AA . shape [ 1 ] ) ) a = AA . view ( dt ) . ra... | Get the index of the unique magnitude tuple for each pixel in the ROI . |
5,611 | def _fracRoiSparse ( self ) : self . frac_roi_sparse = np . min ( [ self . mask_1 . frac_roi_sparse , self . mask_2 . frac_roi_sparse ] , axis = 0 ) return self . frac_roi_sparse | Calculate an approximate pixel coverage fraction from the two masks . |
5,612 | def _pruneMMD ( self , minimum_solid_angle ) : logger . info ( 'Pruning mask based on minimum solid angle of %.2f deg^2' % ( minimum_solid_angle ) ) solid_angle_mmd = self . solid_angle_mmd * ( self . solid_angle_mmd > minimum_solid_angle ) if solid_angle_mmd . sum ( ) == 0 : msg = "Pruned mask contains no solid angle.... | Remove regions of magnitude - magnitude space where the unmasked solid angle is statistically insufficient to estimate the background . |
5,613 | def _pruneCMD ( self , minimum_solid_angle ) : logger . info ( 'Pruning mask based on minimum solid angle of %.2f deg^2' % ( minimum_solid_angle ) ) self . solid_angle_cmd *= self . solid_angle_cmd > minimum_solid_angle if self . solid_angle_cmd . sum ( ) == 0 : msg = "Pruned mask contains no solid angle." logger . err... | Remove regions of color - magnitude space where the unmasked solid angle is statistically insufficient to estimate the background . |
5,614 | def plot ( self ) : msg = "'%s.plot': ADW 2018-05-05" % self . __class__ . __name__ DeprecationWarning ( msg ) import ugali . utils . plotting mask = hp . UNSEEN * np . ones ( hp . nside2npix ( self . nside ) ) mask [ self . roi . pixels ] = self . mask_roi_sparse mask [ mask == 0. ] = hp . UNSEEN ugali . utils . plott... | Plot the magnitude depth . |
5,615 | def build_expected ( dynamizer , expected ) : ret = { } for k , v in six . iteritems ( expected ) : if is_null ( v ) : ret [ k ] = { 'Exists' : False , } else : ret [ k ] = { 'Exists' : True , 'Value' : dynamizer . encode ( v ) , } return ret | Build the Expected parameters from a dict |
5,616 | def build_expression_values ( dynamizer , expr_values , kwargs ) : if expr_values : values = expr_values return dynamizer . encode_keys ( values ) elif kwargs : values = dict ( ( ( ':' + k , v ) for k , v in six . iteritems ( kwargs ) ) ) return dynamizer . encode_keys ( values ) | Build ExpresionAttributeValues from a value or kwargs |
5,617 | def connect_to_host ( cls , host = 'localhost' , port = 8000 , is_secure = False , session = None , access_key = None , secret_key = None , ** kwargs ) : warnings . warn ( "connect_to_host is deprecated and will be removed. " "Use connect instead." ) if session is None : session = botocore . session . get_session ( ) i... | Connect to a specific host . |
5,618 | def call ( self , command , ** kwargs ) : for hook in self . _hooks [ 'precall' ] : hook ( self , command , kwargs ) op = getattr ( self . client , command ) attempt = 0 while True : try : data = op ( ** kwargs ) break except ClientError as e : exc = translate_exception ( e , kwargs ) attempt += 1 if isinstance ( exc ,... | Make a request to DynamoDB using the raw botocore API |
5,619 | def subscribe ( self , event , hook ) : if hook not in self . _hooks [ event ] : self . _hooks [ event ] . append ( hook ) | Subscribe a callback to an event |
5,620 | def unsubscribe ( self , event , hook ) : if hook in self . _hooks [ event ] : self . _hooks [ event ] . remove ( hook ) | Unsubscribe a hook from an event |
5,621 | def add_rate_limit ( self , limiter ) : if limiter not in self . rate_limiters : self . subscribe ( 'capacity' , limiter . on_capacity ) self . rate_limiters . append ( limiter ) | Add a RateLimit to the connection |
5,622 | def remove_rate_limit ( self , limiter ) : if limiter in self . rate_limiters : self . unsubscribe ( 'capacity' , limiter . on_capacity ) self . rate_limiters . remove ( limiter ) | Remove a RateLimit from the connection |
5,623 | def _default_capacity ( self , value ) : if value is not None : return value if self . default_return_capacity or self . rate_limiters : return INDEXES return NONE | Get the value for ReturnConsumedCapacity from provided value |
5,624 | def _count ( self , method , limit , keywords ) : limit = limit . copy ( ) has_more = True count = None while has_more : limit . set_request_args ( keywords ) response = self . call ( method , ** keywords ) limit . post_fetch ( response ) count += Count . from_response ( response ) last_evaluated_key = response . get (... | Do a scan or query and aggregate the results into a Count |
5,625 | def describe_table ( self , tablename ) : try : response = self . call ( 'describe_table' , TableName = tablename ) [ 'Table' ] return Table . from_response ( response ) except DynamoDBError as e : if e . kwargs [ 'Code' ] == 'ResourceNotFoundException' : return None else : raise | Get the details about a table |
5,626 | def put_item ( self , tablename , item , expected = None , returns = NONE , return_capacity = None , expect_or = False , ** kwargs ) : keywords = { } if kwargs : keywords [ 'Expected' ] = encode_query_kwargs ( self . dynamizer , kwargs ) if len ( keywords [ 'Expected' ] ) > 1 : keywords [ 'ConditionalOperator' ] = 'OR'... | Store an item overwriting existing data |
5,627 | def delete_item2 ( self , tablename , key , expr_values = None , alias = None , condition = None , returns = NONE , return_capacity = None , return_item_collection_metrics = NONE , ** kwargs ) : keywords = { 'TableName' : tablename , 'Key' : self . dynamizer . encode_keys ( key ) , 'ReturnValues' : returns , 'ReturnCon... | Delete an item from a table |
5,628 | def batch_write ( self , tablename , return_capacity = None , return_item_collection_metrics = NONE ) : return_capacity = self . _default_capacity ( return_capacity ) return BatchWriter ( self , tablename , return_capacity = return_capacity , return_item_collection_metrics = return_item_collection_metrics ) | Perform a batch write on a table |
5,629 | def batch_get ( self , tablename , keys , attributes = None , alias = None , consistent = False , return_capacity = None ) : keys = [ self . dynamizer . encode_keys ( k ) for k in keys ] return_capacity = self . _default_capacity ( return_capacity ) ret = GetResultSet ( self , tablename , keys , consistent = consistent... | Perform a batch get of many items in a table |
5,630 | def update_item ( self , tablename , key , updates , returns = NONE , return_capacity = None , expect_or = False , ** kwargs ) : key = self . dynamizer . encode_keys ( key ) attr_updates = { } expected = { } keywords = { 'ReturnConsumedCapacity' : self . _default_capacity ( return_capacity ) , } for update in updates :... | Update a single item in a table |
5,631 | def query ( self , tablename , attributes = None , consistent = False , count = False , index = None , limit = None , desc = False , return_capacity = None , filter = None , filter_or = False , exclusive_start_key = None , ** kwargs ) : keywords = { 'TableName' : tablename , 'ReturnConsumedCapacity' : self . _default_c... | Perform an index query on a table |
5,632 | def read_logodata ( handle ) : seqs = weblogolib . read_seq_data ( handle , alphabet = unambiguous_protein_alphabet ) ldata = weblogolib . LogoData . from_seqs ( seqs ) letters = ldata . alphabet . letters ( ) counts = ldata . counts . array logodata = [ ] for i , coldata , entropy , weight in zip ( range ( len ( count... | Get weblogo data for a sequence alignment . |
5,633 | def aln2logodata ( aln ) : handle = StringIO ( aln . format ( 'fasta' ) ) logodata = read_logodata ( handle ) handle . close ( ) return logodata | Get weblogo data for an alignment object . |
5,634 | def letter_scales ( counts ) : try : scale = 1.0 / sum ( counts . values ( ) ) except ZeroDivisionError : return [ ] freqs = [ ( aa , cnt * scale ) for aa , cnt in counts . iteritems ( ) if cnt ] freqs . sort ( key = lambda pair : pair [ 1 ] ) return freqs | Convert letter counts to frequencies sorted increasing . |
5,635 | def replace ( doc , pointer , value ) : return Target ( doc ) . replace ( pointer , value ) . document | Replace element from sequence member from mapping . |
5,636 | def set_options ( self , ** kw ) : r for k , v in kw . iteritems ( ) : if k in self . __options : self . __options [ k ] = v | r Set Parser options . |
5,637 | def xml2object ( self , content ) : r content = self . xml_filter ( content ) element = ET . fromstring ( content ) tree = self . parse ( element ) if self . __options [ 'strip_attr' ] else self . parse_full ( element ) if not self . __options [ 'strip_root' ] : node = self . get_node ( element ) if not self . __option... | r Convert xml content to python object . |
5,638 | def xml_filter ( self , content ) : r content = utils . strip_whitespace ( content , True ) if self . __options [ 'strip' ] else content . strip ( ) if not self . __options [ 'encoding' ] : encoding = self . guess_xml_encoding ( content ) or self . __encoding self . set_options ( encoding = encoding ) if self . __optio... | r Filter and preprocess xml content |
5,639 | def guess_xml_encoding ( self , content ) : r matchobj = self . __regex [ 'xml_encoding' ] . match ( content ) return matchobj and matchobj . group ( 1 ) . lower ( ) | r Guess encoding from xml header declaration . |
5,640 | def parse ( self , element ) : r values = { } for child in element : node = self . get_node ( child ) subs = self . parse ( child ) value = subs or node [ 'value' ] if node [ 'tag' ] not in values : values [ node [ 'tag' ] ] = value else : if not isinstance ( values [ node [ 'tag' ] ] , list ) : values [ node [ 'tag' ]... | r Parse xml element . |
5,641 | def parse_full ( self , element ) : r values = collections . defaultdict ( dict ) for child in element : node = self . get_node ( child ) subs = self . parse_full ( child ) value = subs or { 'values' : node [ 'value' ] } value [ 'attrs' ] = node [ 'attr' ] if node [ 'tag' ] not in values [ 'values' ] : values [ 'values... | r Parse xml element include the node attributes . |
5,642 | def get_node ( self , element ) : r ns , tag = self . split_namespace ( element . tag ) return { 'tag' : tag , 'value' : ( element . text or '' ) . strip ( ) , 'attr' : element . attrib , 'namespace' : ns } | r Get node info . |
5,643 | def split_namespace ( self , tag ) : r matchobj = self . __regex [ 'xml_ns' ] . search ( tag ) return matchobj . groups ( ) if matchobj else ( '' , tag ) | r Split tag namespace . |
5,644 | def instantiate_probes ( probes , instruments ) : probe_instances = { } for name , sub_dict in probes . items ( ) : assert isinstance ( sub_dict , dict ) assert "probe_name" in sub_dict assert "instrument_name" in sub_dict probe_name = sub_dict [ 'probe_name' ] instrument_name = sub_dict [ 'instrument_name' ] if "probe... | Creates instances of the probes inputed ; |
5,645 | def compressed ( self ) : order = ecdsa . SECP256k1 . generator . order ( ) p = ecdsa . VerifyingKey . from_string ( compat_bytes ( self ) , curve = ecdsa . SECP256k1 ) . pubkey . point x_str = ecdsa . util . number_to_string ( p . x ( ) , order ) compressed = hexlify ( compat_bytes ( chr ( 2 + ( p . y ( ) & 1 ) ) , 'a... | Derive compressed public key |
5,646 | def unCompressed ( self ) : public_key = repr ( self . _pk ) prefix = public_key [ 0 : 2 ] if prefix == "04" : return public_key assert prefix == "02" or prefix == "03" x = int ( public_key [ 2 : ] , 16 ) y = self . _derive_y_from_x ( x , ( prefix == "02" ) ) key = '04' + '%064x' % x + '%064x' % y return key | Derive uncompressed key |
5,647 | def compressedpubkey ( self ) : secret = unhexlify ( repr ( self . _wif ) ) order = ecdsa . SigningKey . from_string ( secret , curve = ecdsa . SECP256k1 ) . curve . generator . order ( ) p = ecdsa . SigningKey . from_string ( secret , curve = ecdsa . SECP256k1 ) . verifying_key . pubkey . point x_str = ecdsa . util . ... | Derive uncompressed public key |
5,648 | def get_private ( self ) : a = compat_bytes ( self . account + self . role + self . password , 'utf8' ) s = hashlib . sha256 ( a ) . digest ( ) return PrivateKey ( hexlify ( s ) . decode ( 'ascii' ) ) | Derive private key from the brain key and the current sequence number |
5,649 | def downstream_index ( dir_value , i , j , alg = 'taudem' ) : assert alg . lower ( ) in FlowModelConst . d8_deltas delta = FlowModelConst . d8_deltas . get ( alg . lower ( ) ) drow , dcol = delta [ int ( dir_value ) ] return i + drow , j + dcol | find downslope coordinate for D8 direction . |
5,650 | def convert_group ( tokens ) : tok = tokens . asList ( ) dic = dict ( tok ) if not ( len ( dic ) == len ( tok ) ) : raise ParseFatalException ( "Names in group must be unique: %s" % tokens ) return ConfGroup ( dic ) | Converts parseResult from to ConfGroup type . |
5,651 | def load_elements ( self , filename ) : input_data = load_b26_file ( filename ) if isinstance ( input_data , dict ) and self . elements_type in input_data : return input_data [ self . elements_type ] else : return { } | loads the elements from file filename |
5,652 | def add_script_sequence ( self ) : def empty_tree ( tree_model ) : def add_children_to_list ( item , somelist ) : if item . hasChildren ( ) : for rownum in range ( 0 , item . rowCount ( ) ) : somelist . append ( str ( item . child ( rownum , 0 ) . text ( ) ) ) output_list = [ ] root = tree_model . invisibleRootItem ( )... | creates a script sequence based on the script iterator type selected and the selected scripts and sends it to the tree self . tree_loaded |
5,653 | def cli ( family_file , family_type , to_json , to_madeline , to_ped , to_dict , outfile , logfile , loglevel ) : from pprint import pprint as pp my_parser = FamilyParser ( family_file , family_type ) if to_json : if outfile : outfile . write ( my_parser . to_json ( ) ) else : print ( my_parser . to_json ( ) ) elif to_... | Cli for testing the ped parser . |
5,654 | def check_line_length ( self , splitted_line , expected_length ) : if len ( splitted_line ) != expected_length : raise WrongLineFormat ( message = 'WRONG FORMATED PED LINE!' , ped_line = '\t' . join ( splitted_line ) ) return | Check if the line is correctly formated . Throw a SyntaxError if it is not . |
5,655 | def deck_vote_tag ( deck : Deck ) -> str : if deck . id is None : raise Exception ( "deck.id is required" ) deck_vote_tag_privkey = sha256 ( unhexlify ( deck . id ) + b"vote_init" ) . hexdigest ( ) deck_vote_tag_address = Kutil ( network = deck . network , privkey = bytearray . fromhex ( deck_vote_tag_privkey ) ) retur... | deck vote tag address |
5,656 | def parse_vote_info ( protobuf : bytes ) -> dict : vote = pavoteproto . Vote ( ) vote . ParseFromString ( protobuf ) assert vote . version > 0 , { "error" : "Vote info incomplete, version can't be 0." } assert vote . start_block < vote . end_block , { "error" : "vote can't end in the past." } return { "version" : vote ... | decode vote init tx op_return protobuf message and validate it . |
5,657 | def vote_init ( vote : Vote , inputs : dict , change_address : str ) -> bytes : network_params = net_query ( vote . deck . network ) deck_vote_tag_address = deck_vote_tag ( vote . deck ) tx_fee = network_params . min_tx_fee for utxo in inputs [ 'utxos' ] : utxo [ 'txid' ] = unhexlify ( utxo [ 'txid' ] ) utxo [ 'scriptS... | initialize vote transaction must be signed by the deck_issuer privkey |
5,658 | def find_vote_inits ( provider : Provider , deck : Deck ) -> Iterable [ Vote ] : vote_ints = provider . listtransactions ( deck_vote_tag ( deck ) ) for txid in vote_ints : try : raw_vote = provider . getrawtransaction ( txid ) vote = parse_vote_info ( read_tx_opreturn ( raw_vote ) ) vote [ "vote_id" ] = txid vote [ "se... | find vote_inits on this deck |
5,659 | def vote_cast ( vote : Vote , choice_index : int , inputs : dict , change_address : str ) -> bytes : network_params = net_query ( vote . deck . network ) vote_cast_addr = vote . vote_choice_address [ choice_index ] tx_fee = network_params . min_tx_fee for utxo in inputs [ 'utxos' ] : utxo [ 'txid' ] = unhexlify ( utxo ... | vote cast transaction |
5,660 | def find_vote_casts ( provider : Provider , vote : Vote , choice_index : int ) -> Iterable [ VoteCast ] : vote_casts = provider . listtransactions ( vote . vote_choice_address [ choice_index ] ) for tx in vote_casts : raw_tx = provider . getrawtransaction ( tx , 1 ) sender = find_tx_sender ( provider , raw_tx ) confirm... | find and verify vote_casts on this vote_choice_address |
5,661 | def to_protobuf ( self ) -> str : vote = pavoteproto . Vote ( ) vote . version = self . version vote . description = self . description vote . count_mode = vote . MODE . Value ( self . count_mode ) vote . start_block = self . start_block vote . end_block = self . end_block vote . choices . extend ( self . choices ) if ... | encode vote into protobuf |
5,662 | def to_dict ( self ) -> dict : return { "version" : self . version , "description" : self . description , "count_mode" : self . count_mode , "start_block" : self . start_block , "end_block" : self . end_block , "choices" : self . choices , "vote_metainfo" : self . vote_metainfo } | vote info as dict |
5,663 | def vote_choice_address ( self ) -> List [ str ] : if self . vote_id is None : raise Exception ( "vote_id is required" ) addresses = [ ] vote_init_txid = unhexlify ( self . vote_id ) for choice in self . choices : vote_cast_privkey = sha256 ( vote_init_txid + bytes ( list ( self . choices ) . index ( choice ) ) ) . hex... | calculate the addresses on which the vote is casted . |
5,664 | def is_valid ( self ) -> bool : if not ( self . blocknum >= self . vote . start_block and self . blocknum <= self . vote . end_block ) : return False if not self . confirmations >= 6 : return False return True | check if VoteCast is valid |
5,665 | def search_refdata ( self , seq , locus ) : if self . server_avail : hla , loc = locus . split ( '-' ) p1 = "SELECT ent.name " p2 = "FROM bioentry ent,biosequence seq,biodatabase dbb " p3 = "WHERE dbb.biodatabase_id = ent.biodatabase_id AND seq.bioentry_id = ent.bioentry_id " p4 = " AND dbb.name = \"" + self . dbversio... | This checks to see if a sequence already exists in the reference data . If it does then it ll return the known annotation . |
5,666 | def update ( self , * args ) : for d in args : for ( key , value ) in d . items ( ) : self . __setitem__ ( key , value ) | updates the values of the parameter just as a regular dictionary |
5,667 | def error ( msg , log_file = None ) : UtilClass . print_msg ( msg + os . linesep ) if log_file is not None : UtilClass . writelog ( log_file , msg , 'append' ) raise RuntimeError ( msg ) | Print output error message and raise RuntimeError . |
5,668 | def log ( lines , log_file = None ) : err = False for line in lines : print ( line ) if log_file is not None : UtilClass . writelog ( log_file , line , 'append' ) if 'BAD TERMINATION' in line . upper ( ) : err = True break if err : TauDEM . error ( 'Error occurred when calling TauDEM function, please check!' , log_file... | Output log message . |
5,669 | def write_time_log ( logfile , time ) : if os . path . exists ( logfile ) : log_status = open ( logfile , 'a' , encoding = 'utf-8' ) else : log_status = open ( logfile , 'w' , encoding = 'utf-8' ) log_status . write ( 'Function Name\tRead Time\tCompute Time\tWrite Time\tTotal Time\t\n' ) log_status . write ( '%s\t%.5f\... | Write time log . |
5,670 | def check_infile_and_wp ( curinf , curwp ) : if not os . path . exists ( curinf ) : if curwp is None : TauDEM . error ( 'You must specify one of the workspace and the ' 'full path of input file!' ) curinf = curwp + os . sep + curinf curinf = os . path . abspath ( curinf ) if not os . path . exists ( curinf ) : TauDEM .... | Check the existence of the given file and directory path . 1 . Raise Runtime exception of both not existed . 2 . If the curwp is None the set the base folder of curinf to it . |
5,671 | def pitremove ( np , dem , filleddem , workingdir = None , mpiexedir = None , exedir = None , log_file = None , runtime_file = None , hostfile = None ) : fname = TauDEM . func_name ( 'pitremove' ) return TauDEM . run ( FileClass . get_executable_fullpath ( fname , exedir ) , { '-z' : dem } , workingdir , None , { '-fel... | Run pit remove using the flooding approach |
5,672 | def d8flowdir ( np , filleddem , flowdir , slope , workingdir = None , mpiexedir = None , exedir = None , log_file = None , runtime_file = None , hostfile = None ) : fname = TauDEM . func_name ( 'd8flowdir' ) return TauDEM . run ( FileClass . get_executable_fullpath ( fname , exedir ) , { '-fel' : filleddem } , working... | Run D8 flow direction |
5,673 | def dinfflowdir ( np , filleddem , flowangle , slope , workingdir = None , mpiexedir = None , exedir = None , log_file = None , runtime_file = None , hostfile = None ) : fname = TauDEM . func_name ( 'dinfflowdir' ) return TauDEM . run ( FileClass . get_executable_fullpath ( fname , exedir ) , { '-fel' : filleddem } , w... | Run Dinf flow direction |
5,674 | def aread8 ( np , flowdir , acc , outlet = None , streamskeleton = None , edgecontaimination = False , workingdir = None , mpiexedir = None , exedir = None , log_file = None , runtime_file = None , hostfile = None ) : if not edgecontaimination : in_params = { '-nc' : None } else : in_params = None fname = TauDEM . func... | Run Accumulate area according to D8 flow direction |
5,675 | def areadinf ( np , angfile , sca , outlet = None , wg = None , edgecontaimination = False , workingdir = None , mpiexedir = None , exedir = None , log_file = None , runtime_file = None , hostfile = None ) : if edgecontaimination : in_params = { '-nc' : None } else : in_params = None fname = TauDEM . func_name ( 'aread... | Run Accumulate area according to Dinf flow direction |
5,676 | def connectdown ( np , p , acc , outlet , wtsd = None , workingdir = None , mpiexedir = None , exedir = None , log_file = None , runtime_file = None , hostfile = None ) : if wtsd is None or not os . path . isfile ( wtsd ) : p , workingdir = TauDEM . check_infile_and_wp ( p , workingdir ) wtsd = workingdir + os . sep + ... | Reads an ad8 contributing area file identifies the location of the largest ad8 value as the outlet of the largest watershed |
5,677 | def threshold ( np , acc , stream_raster , threshold = 100. , workingdir = None , mpiexedir = None , exedir = None , log_file = None , runtime_file = None , hostfile = None ) : fname = TauDEM . func_name ( 'threshold' ) return TauDEM . run ( FileClass . get_executable_fullpath ( fname , exedir ) , { '-ssa' : acc } , wo... | Run threshold for stream raster |
5,678 | def moveoutletstostrm ( np , flowdir , streamRaster , outlet , modifiedOutlet , workingdir = None , mpiexedir = None , exedir = None , log_file = None , runtime_file = None , hostfile = None ) : fname = TauDEM . func_name ( 'moveoutletstostrm' ) return TauDEM . run ( FileClass . get_executable_fullpath ( fname , exedir... | Run move the given outlets to stream |
5,679 | def convertdistmethod ( method_str ) : if StringClass . string_match ( method_str , 'Horizontal' ) : return 'h' elif StringClass . string_match ( method_str , 'Vertical' ) : return 'v' elif StringClass . string_match ( method_str , 'Pythagoras' ) : return 'p' elif StringClass . string_match ( method_str , 'Surface' ) :... | Convert distance method to h v p and s . |
5,680 | def convertstatsmethod ( method_str ) : if StringClass . string_match ( method_str , 'Average' ) : return 'ave' elif StringClass . string_match ( method_str , 'Maximum' ) : return 'max' elif StringClass . string_match ( method_str , 'Minimum' ) : return 'min' elif method_str . lower ( ) in [ 'ave' , 'max' , 'min' ] : r... | Convert statistics method to ave min and max . |
5,681 | def d8hdisttostrm ( np , p , src , dist , thresh , workingdir = None , mpiexedir = None , exedir = None , log_file = None , runtime_file = None , hostfile = None ) : fname = TauDEM . func_name ( 'd8hdisttostrm' ) return TauDEM . run ( FileClass . get_executable_fullpath ( fname , exedir ) , { '-p' : p , '-src' : src } ... | Run D8 horizontal distance down to stream . |
5,682 | def d8distdowntostream ( np , p , fel , src , dist , distancemethod , thresh , workingdir = None , mpiexedir = None , exedir = None , log_file = None , runtime_file = None , hostfile = None ) : fname = TauDEM . func_name ( 'd8distdowntostream' ) return TauDEM . run ( FileClass . get_executable_fullpath ( fname , exedir... | Run D8 distance down to stream by different method for distance . This function is extended from d8hdisttostrm by Liangjun . |
5,683 | def dinfdistdown ( np , ang , fel , slp , src , statsm , distm , edgecontamination , wg , dist , workingdir = None , mpiexedir = None , exedir = None , log_file = None , runtime_file = None , hostfile = None ) : in_params = { '-m' : '%s %s' % ( TauDEM . convertstatsmethod ( statsm ) , TauDEM . convertdistmethod ( distm... | Run D - inf distance down to stream |
5,684 | def peukerdouglas ( np , fel , streamSkeleton , workingdir = None , mpiexedir = None , exedir = None , log_file = None , runtime_file = None , hostfile = None ) : fname = TauDEM . func_name ( 'peukerdouglas' ) return TauDEM . run ( FileClass . get_executable_fullpath ( fname , exedir ) , { '-fel' : fel } , workingdir ,... | Run peuker - douglas function |
5,685 | def dropanalysis ( np , fel , p , ad8 , ssa , outlet , minthresh , maxthresh , numthresh , logspace , drp , workingdir = None , mpiexedir = None , exedir = None , log_file = None , runtime_file = None , hostfile = None ) : parstr = '%f %f %f' % ( minthresh , maxthresh , numthresh ) if logspace == 'false' : parstr += ' ... | Drop analysis for optimal threshold for extracting stream . |
5,686 | def resolve ( self , pointer ) : dp = DocumentPointer ( pointer ) obj , fetcher = self . prototype ( dp ) for token in dp . pointer : obj = token . extract ( obj , bypass_ref = True ) reference = ref ( obj ) if reference : obj = fetcher . resolve ( reference ) return obj | Resolve from documents . |
5,687 | def count_diffs ( align , feats , inseq , locus , cutoff , verbose = False , verbosity = 0 ) : nfeats = len ( feats . keys ( ) ) mm = 0 insr = 0 dels = 0 gaps = 0 match = 0 lastb = '' l = len ( align [ 0 ] ) if len ( align [ 0 ] ) > len ( align [ 1 ] ) else len ( align [ 1 ] ) for i in range ( 0 , l ) : if align [ 0 ] ... | count_diffs - Counts the number of mismatches gaps and insertions and then determines if those are within an acceptable range . |
5,688 | def cmd_stop ( self , argv , help ) : parser = argparse . ArgumentParser ( prog = "%s stop" % self . progname , description = help , ) instances = self . get_instances ( command = 'stop' ) parser . add_argument ( "instance" , nargs = 1 , metavar = "instance" , help = "Name of the instance from the config." , choices = ... | Stops the instance |
5,689 | def cmd_terminate ( self , argv , help ) : from ploy . common import yesno parser = argparse . ArgumentParser ( prog = "%s terminate" % self . progname , description = help , ) instances = self . get_instances ( command = 'terminate' ) parser . add_argument ( "instance" , nargs = 1 , metavar = "instance" , help = "Name... | Terminates the instance |
5,690 | def cmd_start ( self , argv , help ) : parser = argparse . ArgumentParser ( prog = "%s start" % self . progname , description = help , ) instances = self . get_instances ( command = 'start' ) parser . add_argument ( "instance" , nargs = 1 , metavar = "instance" , help = "Name of the instance from the config." , choices... | Starts the instance |
5,691 | def cmd_annotate ( self , argv , help ) : parser = argparse . ArgumentParser ( prog = "%s annotate" % self . progname , description = help , ) parser . parse_args ( argv ) list ( self . instances . values ( ) ) for global_section in sorted ( self . config ) : for sectionname in sorted ( self . config [ global_section ]... | Prints annotated config |
5,692 | def cmd_debug ( self , argv , help ) : parser = argparse . ArgumentParser ( prog = "%s debug" % self . progname , description = help , ) instances = self . instances parser . add_argument ( "instance" , nargs = 1 , metavar = "instance" , help = "Name of the instance from the config." , choices = sorted ( instances ) ) ... | Prints some debug info for this script |
5,693 | def cmd_list ( self , argv , help ) : parser = argparse . ArgumentParser ( prog = "%s list" % self . progname , description = help , ) parser . add_argument ( "list" , nargs = 1 , metavar = "listname" , help = "Name of list to show." , choices = sorted ( self . list_cmds ) ) parser . add_argument ( "listopts" , metavar... | Return a list of various things |
5,694 | def cmd_ssh ( self , argv , help ) : parser = argparse . ArgumentParser ( prog = "%s ssh" % self . progname , description = help , ) instances = self . get_instances ( command = 'init_ssh_key' ) parser . add_argument ( "instance" , nargs = 1 , metavar = "instance" , help = "Name of the instance from the config." , choi... | Log into the instance with ssh using the automatically generated known hosts |
5,695 | def initialize ( self , store ) : assert isinstance ( store , stores . BaseStore ) self . messages = Queue ( ) self . store = store self . store . register ( self ) | Common initialization of handlers happens here . If additional initialization is required this method must either be called with super or the child class must assign the store attribute and register itself with the store . |
5,696 | def prepare ( self ) : request_time = 1000.0 * self . request . request_time ( ) access_log . info ( "%d %s %.2fms" , self . get_status ( ) , self . _request_summary ( ) , request_time ) | Log access . |
5,697 | async def publish ( self , message ) : try : self . write ( 'data: {}\n\n' . format ( message ) ) await self . flush ( ) except StreamClosedError : self . finished = True | Pushes data to a listener . |
5,698 | async def open ( self ) : self . store . register ( self ) while not self . finished : message = await self . messages . get ( ) await self . publish ( message ) | Register with the publisher . |
5,699 | async def publish ( self , message ) : try : self . write_message ( dict ( data = message ) ) except WebSocketClosedError : self . _close ( ) | Push a new message to the client . The data will be available as a JSON object with the key data . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.