idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
6,700
def add_clink ( self , my_clink ) : if self . causalRelations_layer is None : self . causalRelations_layer = CcausalRelations ( ) self . root . append ( self . causalRelations_layer . get_node ( ) ) self . causalRelations_layer . add_clink ( my_clink )
Adds a clink to the causalRelations layer
73
9
6,701
def add_factuality ( self , my_fact ) : if self . factuality_layer is None : self . factuality_layer = Cfactualities ( ) self . root . append ( self . factuality_layer . get_node ( ) ) self . factuality_layer . add_factuality ( my_fact )
Adds a factuality to the factuality layer
71
9
6,702
def add_entity ( self , entity ) : if self . entity_layer is None : self . entity_layer = Centities ( type = self . type ) self . root . append ( self . entity_layer . get_node ( ) ) self . entity_layer . add_entity ( entity )
Adds an entity to the entity layer
64
7
6,703
def add_coreference ( self , coreference ) : if self . coreference_layer is None : self . coreference_layer = Ccoreferences ( type = self . type ) self . root . append ( self . coreference_layer . get_node ( ) ) self . coreference_layer . add_coreference ( coreference )
Adds an coreference to the coreference layer
74
9
6,704
def create_coreference ( self , coref_type , term_ids , id = None ) : if id is None : if self . coreference_layer is None : i = 1 else : corefs = ( l for l in self . coreference_layer . get_corefs ( ) if l . get_type == coref_type ) i = len ( list ( corefs ) ) + 1 id = "co{coref_type}{i}" . format ( * * locals ( ) ) new_coref = Ccoreference ( type = self . type ) new_coref . set_id ( id ) new_coref . set_type ( coref_type ) new_coref . add_span ( term_ids ) self . add_coreference ( new_coref ) return new_coref
Create a new coreference object and add it to the coreferences layer
178
15
6,705
def add_constituency_tree ( self , my_tree ) : if self . constituency_layer is None : self . constituency_layer = Cconstituency ( ) self . root . append ( self . constituency_layer . get_node ( ) ) self . constituency_layer . add_tree ( my_tree )
Adds a constituency tree to the constituency layer
69
8
6,706
def add_property ( self , label , term_span , pid = None ) : if self . features_layer is None : self . features_layer = Cfeatures ( type = self . type ) self . root . append ( self . features_layer . get_node ( ) ) self . features_layer . add_property ( pid , label , term_span )
Adds a property to the property layer
78
7
6,707
def get_dict_tokens_for_termid ( self , term_id ) : if self . dict_tokens_for_tid is None : self . dict_tokens_for_tid = { } for term in self . get_terms ( ) : self . dict_tokens_for_tid [ term . get_id ( ) ] = term . get_span ( ) . get_span_ids ( ) return self . dict_tokens_for_tid . get ( term_id , [ ] )
Returns the tokens ids that are the span of the term specified
123
13
6,708
def map_tokens_to_terms ( self , list_tokens ) : if self . terms_for_token is None : self . terms_for_token = { } for term in self . get_terms ( ) : termid = term . get_id ( ) token_ids = term . get_span ( ) . get_span_ids ( ) for tokid in token_ids : if tokid not in self . terms_for_token : self . terms_for_token [ tokid ] = [ termid ] else : self . terms_for_token [ tokid ] . append ( termid ) ret = set ( ) for my_id in list_tokens : term_ids = self . terms_for_token . get ( my_id , [ ] ) ret |= set ( term_ids ) return sorted ( list ( ret ) )
Maps a list of token ids to the corresponding term ids
191
13
6,709
def add_external_reference_to_term ( self , term_id , external_ref ) : if self . term_layer is not None : self . term_layer . add_external_reference ( term_id , external_ref )
Adds an external reference to the given term identifier
52
9
6,710
def add_external_reference_to_role ( self , role_id , external_ref ) : if self . srl_layer is not None : self . srl_layer . add_external_reference_to_role ( role_id , external_ref )
Adds an external reference to the given role identifier in the SRL layer
58
14
6,711
def remove_external_references_from_srl_layer ( self ) : if self . srl_layer is not None : for pred in self . srl_layer . get_predicates ( ) : pred . remove_external_references ( ) pred . remove_external_references_from_roles ( )
Removes all external references present in the term layer
71
10
6,712
def add_external_reference_to_entity ( self , entity_id , external_ref ) : if self . entity_layer is not None : self . entity_layer . add_external_reference_to_entity ( entity_id , external_ref )
Adds an external reference to the given entity identifier in the entity layer
56
13
6,713
def read ( self , size = - 1 ) : chunksize = size < 0 and self . CHUNKSIZE or min ( self . CHUNKSIZE , size ) buf = self . _rbuf buf . seek ( 0 , os . SEEK_END ) collected = buf . tell ( ) while 1 : if size >= 0 and collected >= size : # we have read enough already break output = self . _read_chunk ( chunksize ) if output is None : continue if not output : # nothing more to read break collected += len ( output ) buf . write ( output ) # get rid of the old buffer rc = buf . getvalue ( ) buf . seek ( 0 ) buf . truncate ( ) if size >= 0 : # leave the overflow in the buffer buf . write ( rc [ size : ] ) return rc [ : size ] return rc
read a number of bytes from the file and return it as a string
180
14
6,714
def readline ( self , max_len = - 1 ) : buf = self . _rbuf newline , chunksize = self . NEWLINE , self . CHUNKSIZE buf . seek ( 0 ) text = buf . read ( ) if len ( text ) >= max_len >= 0 : buf . seek ( 0 ) buf . truncate ( ) buf . write ( text [ max_len : ] ) return text [ : max_len ] while text . find ( newline ) < 0 : text = self . _read_chunk ( chunksize ) if text is None : text = '' continue if buf . tell ( ) + len ( text ) >= max_len >= 0 : text = buf . getvalue ( ) + text buf . seek ( 0 ) buf . truncate ( ) buf . write ( text [ max_len : ] ) return text [ : max_len ] if not text : break buf . write ( text ) else : # found a newline rc = buf . getvalue ( ) index = rc . find ( newline ) + len ( newline ) buf . seek ( 0 ) buf . truncate ( ) buf . write ( rc [ index : ] ) return rc [ : index ] # hit the end of the file, no more newlines rc = buf . getvalue ( ) buf . seek ( 0 ) buf . truncate ( ) return rc
read from the file until a newline is encountered
291
10
6,715
def write ( self , data ) : while data : went = self . _write_chunk ( data ) if went is None : continue data = data [ went : ]
write data to the file
36
5
6,716
def _wait_event ( self , reading ) : with self . _registered ( reading , not reading ) : ( self . _readable if reading else self . _writable ) . wait ( ) if scheduler . state . interrupted : raise IOError ( errno . EINTR , "interrupted system call" )
wait on our events
66
4
6,717
def fromfd ( cls , fd , mode = 'rb' , bufsize = - 1 ) : fp = object . __new__ ( cls ) # bypass __init__ fp . _rbuf = StringIO ( ) fp . encoding = None fp . mode = mode fp . _fileno = fd fp . _closed = False cls . _add_flags ( fd , cls . _mode_to_flags ( mode ) ) fp . _set_up_waiting ( ) return fp
create a cooperating greenhouse file from an existing descriptor
118
9
6,718
def isatty ( self ) : try : return os . isatty ( self . _fileno ) except OSError , e : raise IOError ( * e . args )
return whether the file is connected to a tty or not
40
12
6,719
def seek ( self , position , modifier = 0 ) : os . lseek ( self . _fileno , position , modifier ) # clear out the buffer buf = self . _rbuf buf . seek ( 0 ) buf . truncate ( )
move the cursor on the file descriptor to a different location
51
11
6,720
def tell ( self ) : with _fdopen ( os . dup ( self . _fileno ) ) as fp : return fp . tell ( )
get the file descriptor s position relative to the file s beginning
33
12
6,721
def fit ( self , labels , samples , pstates ) : assert len ( labels ) == len ( samples ) == len ( pstates ) for label in set ( labels ) : label_samples = [ s for l , s in zip ( labels , samples ) if l == label ] label_pstates = [ p for l , p in zip ( labels , pstates ) if l == label ] pohmm = self . pohmm_factory ( ) pohmm . fit ( label_samples , label_pstates ) self . pohmms [ label ] = pohmm return self
Fit the classifier with labels y and observations X
128
10
6,722
def fit_df ( self , labels , dfs , pstate_col = PSTATE_COL ) : assert len ( labels ) == len ( dfs ) for label in set ( labels ) : label_dfs = [ s for l , s in zip ( labels , dfs ) if l == label ] pohmm = self . pohmm_factory ( ) pohmm . fit_df ( label_dfs , pstate_col = pstate_col ) self . pohmms [ label ] = pohmm return self
Fit the classifier with labels y and DataFrames dfs
117
12
6,723
def predict ( self , sample , pstates ) : scores = { } for label , pohmm in self . pohmms . items ( ) : scores [ label ] = pohmm . score ( sample , pstates ) max_score_label = max ( scores . items ( ) , key = itemgetter ( 1 ) ) [ 0 ] return max_score_label , scores
Predict the class label of X
82
7
6,724
def predict_df ( self , df , pstate_col = PSTATE_COL ) : scores = { } for label , pohmm in self . pohmms . items ( ) : scores [ label ] = pohmm . score_df ( df , pstate_col = pstate_col ) max_score_label = max ( scores . items ( ) , key = itemgetter ( 1 ) ) [ 0 ] return max_score_label , scores
Predict the class label of DataFrame df
100
9
6,725
def load_secrets ( self , secret_path ) : self . _config = p_config . render_secrets ( self . config_path , secret_path )
render secrets into config object
37
5
6,726
def logger ( self ) : if self . _logger : return self . _logger else : log_builder = p_logging . ProsperLogger ( self . PROGNAME , self . config . get_option ( 'LOGGING' , 'log_path' ) , config_obj = self . config ) if self . verbose : log_builder . configure_debug_logger ( ) else : id_string = '({platform}--{version})' . format ( platform = platform . node ( ) , version = self . VERSION ) if self . config . get_option ( 'LOGGING' , 'discord_webhook' ) : log_builder . configure_discord_logger ( custom_args = id_string ) if self . config . get_option ( 'LOGGING' , 'slack_webhook' ) : log_builder . configure_slack_logger ( custom_args = id_string ) if self . config . get_option ( 'LOGGING' , 'hipchat_webhook' ) : log_builder . configure_hipchat_logger ( custom_args = id_string ) self . _logger = log_builder . get_logger ( ) return self . _logger
uses global logger for logging
273
5
6,727
def config ( self ) : if self . _config : return self . _config else : self . _config = p_config . ProsperConfig ( self . config_path ) return self . _config
uses global config for cfg
42
6
6,728
def notify_launch ( self , log_level = 'ERROR' ) : if not self . debug : self . logger . log ( logging . getLevelName ( log_level ) , 'LAUNCHING %s -- %s' , self . PROGNAME , platform . node ( ) ) flask_options = { key : getattr ( self , key ) for key in OPTION_ARGS } flask_options [ 'host' ] = self . get_host ( ) self . logger . info ( 'OPTIONS: %s' , flask_options )
logs launcher message before startup
119
6
6,729
def landsat_c1_toa_cloud_mask ( input_img , snow_flag = False , cirrus_flag = False , cloud_confidence = 2 , shadow_confidence = 3 , snow_confidence = 3 , cirrus_confidence = 3 ) : qa_img = input_img . select ( [ 'BQA' ] ) cloud_mask = qa_img . rightShift ( 4 ) . bitwiseAnd ( 1 ) . neq ( 0 ) . And ( qa_img . rightShift ( 5 ) . bitwiseAnd ( 3 ) . gte ( cloud_confidence ) ) . Or ( qa_img . rightShift ( 7 ) . bitwiseAnd ( 3 ) . gte ( shadow_confidence ) ) if snow_flag : cloud_mask = cloud_mask . Or ( qa_img . rightShift ( 9 ) . bitwiseAnd ( 3 ) . gte ( snow_confidence ) ) if cirrus_flag : cloud_mask = cloud_mask . Or ( qa_img . rightShift ( 11 ) . bitwiseAnd ( 3 ) . gte ( cirrus_confidence ) ) # Set cloudy pixels to 0 and clear to 1 return cloud_mask . Not ( )
Extract cloud mask from the Landsat Collection 1 TOA BQA band
263
16
6,730
def landsat_c1_sr_cloud_mask ( input_img , cloud_confidence = 3 , snow_flag = False ) : qa_img = input_img . select ( [ 'pixel_qa' ] ) cloud_mask = qa_img . rightShift ( 5 ) . bitwiseAnd ( 1 ) . neq ( 0 ) . And ( qa_img . rightShift ( 6 ) . bitwiseAnd ( 3 ) . gte ( cloud_confidence ) ) . Or ( qa_img . rightShift ( 3 ) . bitwiseAnd ( 1 ) . neq ( 0 ) ) if snow_flag : cloud_mask = cloud_mask . Or ( qa_img . rightShift ( 4 ) . bitwiseAnd ( 1 ) . neq ( 0 ) ) # Set cloudy pixels to 0 and clear to 1 return cloud_mask . Not ( )
Extract cloud mask from the Landsat Collection 1 SR pixel_qa band
189
15
6,731
def sentinel2_toa_cloud_mask ( input_img ) : qa_img = input_img . select ( [ 'QA60' ] ) cloud_mask = qa_img . rightShift ( 10 ) . bitwiseAnd ( 1 ) . neq ( 0 ) . Or ( qa_img . rightShift ( 11 ) . bitwiseAnd ( 1 ) . neq ( 0 ) ) # Set cloudy pixels to 0 and clear to 1 return cloud_mask . Not ( )
Extract cloud mask from the Sentinel 2 TOA QA60 band
108
14
6,732
def where ( cmd , path = None ) : raw_result = shutil . which ( cmd , os . X_OK , path ) if raw_result : return os . path . abspath ( raw_result ) else : raise ValueError ( "Could not find '{}' in the path" . format ( cmd ) )
A function to wrap shutil . which for universal usage
70
11
6,733
def search_file ( pattern , file_path ) : try : with open ( file_path ) as file : string = file . read ( ) except PermissionError : return [ ] matches = re . findall ( pattern , string ) return matches
Search a given file s contents for the regex pattern given as pattern
52
13
6,734
def call ( commands , * , print_result = False , raise_exception = False , print_commands = False ) : if isinstance ( commands , str ) : commands = commands . split ( ) if not ( isinstance ( commands , tuple ) or isinstance ( commands , list ) ) : raise ValueError ( "Function 'call' does not accept a 'commands'" "argument of type '{}'" . format ( type ( commands ) ) ) if raise_exception : print_result = False try : process = subprocess . Popen ( commands , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) if print_commands : _print_commands ( commands ) except : # TODO Why couldn't we just do 'except Exception as output' output = traceback . format_exc ( ) result = Result ( 1 , stderr = output ) if print_result and not raise_exception : print ( output , file = sys . stderr ) else : result = _extract_output ( process , print_result , raise_exception ) if raise_exception and ( result . returncode == 1 ) : message = "An error occurred in an external process:\n\n{}" raise Exception ( message . format ( result . getStderr ( ) ) ) return result
Will call a set of commands and wrangle the output how you choose
289
14
6,735
def multiCall ( * commands , dependent = True , bundle = False , print_result = False , print_commands = False ) : results = [ ] dependent_failed = False for command in commands : if not dependent_failed : response = call ( command , print_result = print_result , print_commands = print_commands ) # TODO Will an error ever return a code other than '1'? if ( response . returncode == 1 ) and dependent : dependent_failed = True else : response = None results . append ( response ) if bundle : result = Result ( ) for response in results : if not response : continue elif response . returncode == 1 : result . returncode = 1 result . extendInformation ( response ) processed_response = result else : processed_response = results return processed_response
Calls the function call multiple times given sets of commands
172
11
6,736
def distribute ( function , iterable , * , workers = 4 ) : with multiprocessing . Pool ( workers ) as pool : processes = [ ] for item in iterable : pickled = dill . dumps ( ( function , item ) ) process = pool . apply_async ( _run_pickled , ( pickled , ) ) processes . append ( process ) results = [ process . get ( ) for process in processes ] return results
A version of multiprocessing . Pool . map that works using dill to pickle the function and iterable
94
24
6,737
def getOutput ( self ) : output = self . stdout if self . stdout : output += '\r\n' output += self . stderr return output
Returns the combined output of stdout and stderr
36
11
6,738
def extendInformation ( self , response ) : if response . stdout : self . stdout += '\r\n' + response . stdout if response . stderr : self . stderr += '\r\n' + response . stderr
This extends the objects stdout and stderr by response s stdout and stderr
56
19
6,739
def wait_socks ( sock_events , inmask = 1 , outmask = 2 , timeout = None ) : results = [ ] for sock , mask in sock_events : if isinstance ( sock , zmq . backend . Socket ) : mask = _check_events ( sock , mask , inmask , outmask ) if mask : results . append ( ( sock , mask ) ) if results : return results fd_map = { } fd_events = [ ] for sock , mask in sock_events : if isinstance ( sock , zmq . backend . Socket ) : fd = sock . getsockopt ( zmq . FD ) elif isinstance ( sock , int ) : fd = sock else : fd = sock . fileno ( ) fd_map [ fd ] = sock fd_events . append ( ( fd , mask ) ) while 1 : started = time . time ( ) active = descriptor . wait_fds ( fd_events , inmask , outmask , timeout ) if not active : # timed out return [ ] results = [ ] for fd , mask in active : sock = fd_map [ fd ] if isinstance ( sock , zmq . backend . Socket ) : mask = _check_events ( sock , mask , inmask , outmask ) if not mask : continue results . append ( ( sock , mask ) ) if results : return results timeout -= time . time ( ) - started
wait on a combination of zeromq sockets normal sockets and fds
315
15
6,740
def remove_this_factuality ( self , factuality_id ) : for fact in self . get_factualities ( ) : if fact . get_id ( ) == factuality_id : self . node . remove ( fact . get_node ( ) ) break
Removes the factuality for the given factuality identifier
58
11
6,741
def remove_factuality ( self , fid ) : for node_pre in self . node . findall ( 'factuality' ) : if node_pre . get ( 'id' ) == fid : self . node . remove ( node_pre )
Removes a factuality element with a specific id from the layer
53
13
6,742
def remove_this_factvalue ( self , factvalue_id ) : for fact in self . get_factvalues ( ) : if fact . get_id ( ) == factvalue_id : self . node . remove ( fact . get_node ( ) ) break
Removes the factvalue for the given factvalue identifier
57
11
6,743
def _add_version_to_request ( self , url , headers , version ) : if self . _has_capability ( SERVER_REQUIRES_VERSION_HEADER ) : new_headers = headers . copy ( ) new_headers [ 'Last-Modified' ] = email . utils . formatdate ( version ) return url , new_headers else : url_params = { 'last_modified' : email . utils . formatdate ( version ) } new_url = url + "?" + urlencode ( url_params ) return new_url , headers
Adds version to either url or headers depending on protocol .
126
11
6,744
def _protocol_version ( self ) : if hasattr ( self , '_protocol_ver' ) : return self . _protocol_ver response = requests . get ( self . base_url + '/version/' ) if response . status_code == 404 : server_versions = { 1 } elif response . status_code == 200 : server_versions = set ( response . json ( ) [ 'protocol_versions' ] ) if not server_versions : raise FiletrackerError ( 'Server hasn\'t reported any supported protocols' ) else : response . raise_for_status ( ) common_versions = _SUPPORTED_VERSIONS . intersection ( server_versions ) if not common_versions : raise FiletrackerError ( 'Couldn\'t agree on protocol version: client supports ' '{}, server supports {}.' . format ( _PROTOCOL_CAPABILITIES , server_versions ) ) self . _protocol_ver = max ( common_versions ) print ( 'Settled for protocol version {}' . format ( self . _protocol_ver ) ) return self . _protocol_ver
Returns the protocol version that should be used .
243
9
6,745
def run ( self ) : logger . info ( "Starting daemon." ) while True : try : self . _scan_disk ( ) do_cleaning , delete_from_index = self . _analyze_file_index ( ) if do_cleaning : self . _clean_cache ( delete_from_index ) except Exception : logger . exception ( "Following exception occurred:" ) sleeping_until_time = datetime . datetime . now ( ) + self . scan_interval logger . info ( "Sleeping until %s." , sleeping_until_time ) time . sleep ( self . scan_interval . total_seconds ( ) )
Starts cleaning cache in infinite loop .
140
8
6,746
def normalize ( path_name , override = None ) : identity = identify ( path_name , override = override ) new_path_name = os . path . normpath ( os . path . expanduser ( path_name ) ) return new_path_name , identity
Prepares a path name to be worked with . Path name must not be empty . This function will return the normpath ed path and the identity of the path . This function takes an optional overriding argument for the identity .
58
44
6,747
def join_ext ( name , extension ) : if extension [ 0 ] == EXT : ret = name + extension else : ret = name + EXT + extension return ret
Joins a given name with an extension . If the extension doesn t have a . it will add it for you
34
23
6,748
def has_ext ( path_name , * , multiple = None , if_all_ext = False ) : base = os . path . basename ( path_name ) count = base . count ( EXT ) if not if_all_ext and base [ 0 ] == EXT and count != 0 : count -= 1 if multiple is None : return count >= 1 elif multiple : return count > 1 else : return count == 1
Determine if the given path name has an extension
90
11
6,749
def get_ext ( path_name , * , if_all_ext = False ) : if has_ext ( path_name ) : return os . path . splitext ( path_name ) [ EXTENSION ] elif if_all_ext and has_ext ( path_name , if_all_ext = True ) : return os . path . splitext ( path_name ) [ NAME ] else : return ''
Get an extension from the given path name . If an extension cannot be found it will return an empty string
93
21
6,750
def get_dir ( path_name , * , greedy = False , override = None , identity = None ) : if identity is None : identity = identify ( path_name , override = override ) path_name = os . path . normpath ( path_name ) if greedy and identity == ISDIR : return path_name else : return os . path . dirname ( path_name )
Gets the directory path of the given path name . If the argument greedy is specified as True then if the path name represents a directory itself the function will return the whole path
82
35
6,751
def get_system_drives ( ) : drives = [ ] if os . name == 'nt' : import ctypes bitmask = ctypes . windll . kernel32 . GetLogicalDrives ( ) letter = ord ( 'A' ) while bitmask > 0 : if bitmask & 1 : name = chr ( letter ) + ':' + os . sep if os . path . isdir ( name ) : drives . append ( name ) bitmask >>= 1 letter += 1 else : current_drive = get_drive ( os . getcwd ( ) ) if current_drive : drive = current_drive else : drive = os . sep drives . append ( drive ) return drives
Get the available drive names on the system . Always returns a list .
147
14
6,752
def has_suffix ( path_name , suffix ) : if isinstance ( suffix , str ) : suffix = disintegrate ( suffix ) components = disintegrate ( path_name ) for i in range ( - 1 , - ( len ( suffix ) + 1 ) , - 1 ) : if components [ i ] != suffix [ i ] : break else : return True return False
Determines if path_name has a suffix of at least suffix
78
14
6,753
def path ( path_name = None , override = None , * , root = None , name = None , ext = None , inject = None , relpath = None , reduce = False ) : path_name , identity , root = _initialize ( path_name , override , root , inject ) new_name = _process_name ( path_name , identity , name , ext ) new_directory = _process_directory ( path_name , identity , root , inject ) full_path = os . path . normpath ( os . path . join ( new_directory , new_name ) ) if APPEND_SEP_TO_DIRS and not new_name and full_path [ - 1 ] != os . sep : full_path += os . sep final_path = _format_path ( full_path , root , relpath , reduce ) return final_path
Path manipulation black magic
187
4
6,754
def path ( self , * * kwargs ) : new_path = path ( self . getPath ( ) , * * kwargs ) return File ( new_path )
Returns a different object with the specified changes applied to it . This object is not changed in the process .
38
21
6,755
def isOutDated ( self , output_file ) : if output_file . exists ( ) : source_time = self . getmtime ( ) output_time = output_file . getmtime ( ) return source_time > output_time else : return True
Figures out if Cyther should compile the given FileInfo object by checking the both of the modified times
58
21
6,756
def isUpdated ( self ) : modified_time = self . getmtime ( ) valid = modified_time > self . __stamp return valid
Figures out if the file had previously errored and hasn t been fixed since given a numerical time
31
20
6,757
def get_translations ( self , status = None ) : if status is not None : result = self . api_call ( 'translation/?status=%s' % status ) else : result = self . api_call ( 'translation/' ) if result . status_code == 200 : translations_json = json . loads ( result . content ) [ "objects" ] translations = [ Translation ( * * tj ) for tj in translations_json ] else : log . critical ( 'Error status when fetching translation from server: {' '}!' . format ( result . status_code ) ) translations = [ ] return translations
Returns the translations requested by the user
134
7
6,758
def get_translation ( self , uid ) : result = self . api_call ( 'translation/{}/' . format ( uid ) ) if result . status_code == 200 : translation = Translation ( * * json . loads ( result . content ) ) else : log . critical ( 'Error status when fetching translation from server: {' '}!' . format ( result . status_code ) ) raise ValueError ( result . content ) return translation
Returns a translation with the given id
98
7
6,759
def get_language_pairs ( self , train_langs = None ) : if train_langs is None : result = self . api_call ( 'language_pair/' ) else : result = self . api_call ( 'language_pair/?train_langs={}' . format ( train_langs ) ) try : langs_json = json . loads ( result . content ) if 'error' in langs_json : return [ ] languages = [ LangPair ( Language ( shortname = lang_json [ "lang_pair" ] [ "source_language" ] [ "shortname" ] , name = lang_json [ "lang_pair" ] [ "source_language" ] [ "name" ] ) , Language ( shortname = lang_json [ "lang_pair" ] [ "target_language" ] [ "shortname" ] , name = lang_json [ "lang_pair" ] [ "target_language" ] [ "name" ] ) ) for lang_json in langs_json [ "objects" ] ] except Exception , e : log . exception ( "Error decoding get language pairs" ) raise e return languages
Returns the language pairs available on unbabel
253
8
6,760
def get_tones ( self ) : result = self . api_call ( 'tone/' ) tones_json = json . loads ( result . content ) tones = [ Tone ( name = tone_json [ "tone" ] [ "name" ] , description = tone_json [ "tone" ] [ "description" ] ) for tone_json in tones_json [ "objects" ] ] return tones
Returns the tones available on unbabel
86
7
6,761
def get_topics ( self ) : result = self . api_call ( 'topic/' ) topics_json = json . loads ( result . content ) topics = [ Topic ( name = topic_json [ "topic" ] [ "name" ] ) for topic_json in topics_json [ "objects" ] ] return topics
Returns the topics available on unbabel
71
7
6,762
def rand ( self , unique_pstates , random_state = None ) : self . _init_pstates ( unique_pstates ) self . _init_random ( random_state = random_state ) self . _compute_marginals ( ) return self
Randomize the POHMM parameters
57
7
6,763
def score_events ( self , obs , pstates ) : pstates_idx = np . array ( [ self . e [ p ] for p in pstates ] ) framelogprob = self . _compute_log_likelihood ( obs , pstates_idx ) _ , fwdlattice = self . _do_forward_pass ( framelogprob , pstates_idx ) L = logsumexp ( fwdlattice , axis = 1 ) return np . concatenate ( [ L [ [ 0 ] ] , np . diff ( L ) ] )
Compute the log probability of each event under the model .
130
12
6,764
def predict ( self , obs , pstates , next_pstate = None ) : assert len ( obs ) == len ( pstates ) pstates_idx = np . array ( [ self . e [ ei ] for ei in pstates ] ) next_pstate_idx = self . e [ next_pstate ] if len ( obs ) == 0 : # No history, use the starting probas next_hstate_prob = self . startprob [ next_pstate_idx ] else : # With a history, determine the hidden state posteriors using # the last posteriors and transition matrix framelogprob = self . _compute_log_likelihood ( obs , pstates_idx ) _ , fwdlattice = self . _do_forward_pass ( framelogprob , pstates_idx ) next_hstate_prob = np . zeros ( self . n_hidden_states ) alpha_n = fwdlattice [ - 1 ] vmax = alpha_n . max ( axis = 0 ) alpha_n = np . exp ( alpha_n - vmax ) alpha_n = alpha_n / alpha_n . sum ( ) trans = self . transmat [ pstates_idx [ - 1 ] , next_pstate_idx ] for i in range ( self . n_hidden_states ) : next_hstate_prob [ i ] = np . sum ( [ alpha_n [ j ] * trans [ j , i ] for j in range ( self . n_hidden_states ) ] ) assert next_hstate_prob . sum ( ) - 1 < TOLERANCE # Make the prediction prediction = np . array ( [ self . expected_value ( feature , pstate = next_pstate , hstate_prob = next_hstate_prob ) for feature in self . emission_name ] ) # next_hstate = np.argmax(next_hstate_prob) # prediction = np.array( # [self.expected_value(feature, pstate=next_pstate, hstate=next_hstate) for feature in # self.emission_name]) return prediction
Predict the next observation
482
5
6,765
def fit_df ( self , dfs , pstate_col = PSTATE_COL ) : obs_cols = list ( self . emission_name ) obs = [ df [ df . columns . difference ( [ pstate_col ] ) ] [ obs_cols ] . values for df in dfs ] pstates = [ df [ pstate_col ] . values for df in dfs ] return self . fit ( obs , pstates )
Convenience function to fit a model from a list of dataframes
96
14
6,766
def sample_df ( self , pstates = None , n_obs = None , random_state = None , pstate_col = PSTATE_COL , hstate_col = HSTATE_COL ) : try : import pandas as pd except Exception as e : raise e obs , pstates , hstates = self . sample ( pstates , n_obs , random_state ) items = [ ] if pstate_col is not None : items . append ( ( pstate_col , pstates ) ) if hstate_col is not None : items . append ( ( hstate_col , hstates ) ) items = items + [ ( self . emission_name [ i ] , obs [ : , i ] ) for i in range ( self . n_features ) ] df = pd . DataFrame . from_items ( items ) return df
Convenience function to generate samples a model and create a dataframe
183
14
6,767
def main ( args = None ) : if args is None : args = sys . argv [ 1 : ] if not args : args = [ '-h' ] namespace = parser . parse_args ( args ) entry_function = namespace . func del namespace . func kwargs = namespace . __dict__ return entry_function ( * * kwargs )
Entry point for cyther - script generated by setup . py on installation
76
14
6,768
def get_ldap_users ( self ) : if ( not self . conf_LDAP_SYNC_USER ) : return ( None , None ) user_keys = set ( self . conf_LDAP_SYNC_USER_ATTRIBUTES . keys ( ) ) user_keys . update ( self . conf_LDAP_SYNC_USER_EXTRA_ATTRIBUTES ) uri_users_server , users = self . ldap_search ( self . conf_LDAP_SYNC_USER_FILTER , user_keys , self . conf_LDAP_SYNC_USER_INCREMENTAL , self . conf_LDAP_SYNC_USER_FILTER_INCREMENTAL ) logger . debug ( "Retrieved %d users from %s LDAP server" % ( len ( users ) , uri_users_server ) ) return ( uri_users_server , users )
Retrieve user data from LDAP server .
204
9
6,769
def get_ldap_groups ( self ) : if ( not self . conf_LDAP_SYNC_GROUP ) : return ( None , None ) uri_groups_server , groups = self . ldap_search ( self . conf_LDAP_SYNC_GROUP_FILTER , self . conf_LDAP_SYNC_GROUP_ATTRIBUTES . keys ( ) , self . conf_LDAP_SYNC_GROUP_INCREMENTAL , self . conf_LDAP_SYNC_GROUP_FILTER_INCREMENTAL ) logger . debug ( "Retrieved %d groups from %s LDAP server" % ( len ( groups ) , uri_groups_server ) ) return ( uri_groups_server , groups )
Retrieve groups from LDAP server .
167
8
6,770
def get_ldap_user_membership ( self , user_dn ) : #Escape parenthesis in DN membership_filter = self . conf_LDAP_SYNC_GROUP_MEMBERSHIP_FILTER . replace ( '{distinguishedName}' , user_dn . replace ( '(' , "\(" ) . replace ( ')' , "\)" ) ) try : uri , groups = self . ldap_search ( membership_filter , self . conf_LDAP_SYNC_GROUP_ATTRIBUTES . keys ( ) , False , membership_filter ) except Exception as e : logger . error ( "Error reading membership: Filter %s, Keys %s" % ( membership_filter , str ( self . conf_LDAP_SYNC_GROUP_ATTRIBUTES . keys ( ) ) ) ) return None #logger.debug("AD Membership: Retrieved %d groups for user '%s'" % (len(groups), user_dn)) return ( uri , groups )
Retrieve user membership from LDAP server .
222
9
6,771
def sync_ldap_user_membership ( self , user , ldap_groups ) : groupname_field = 'name' actualGroups = user . groups . values_list ( 'name' , flat = True ) user_Membership_total = len ( ldap_groups ) user_Membership_added = 0 user_Membership_deleted = 0 user_Membership_errors = 0 ldap_groups += self . conf_LDAP_SYNC_GROUP_MEMBERSHIP_ADD_DEFAULT ldap_name_groups = [ ] for cname , ldap_attributes in ldap_groups : defaults = { } try : for name , attribute in ldap_attributes . items ( ) : defaults [ self . conf_LDAP_SYNC_GROUP_ATTRIBUTES [ name ] ] = attribute [ 0 ] . decode ( 'utf-8' ) except AttributeError : # In some cases attrs is a list instead of a dict; skip these invalid groups continue try : groupname = defaults [ groupname_field ] ldap_name_groups . append ( groupname ) except KeyError : logger . warning ( "Group is missing a required attribute '%s'" % groupname_field ) user_Membership_errors += 1 continue if ( groupname not in actualGroups ) : kwargs = { groupname_field + '__iexact' : groupname , 'defaults' : defaults , } #Adding Group Membership try : if ( self . conf_LDAP_SYNC_GROUP_MEMBERSHIP_CREATE_IF_NOT_EXISTS ) : group , created = Group . objects . get_or_create ( * * kwargs ) else : group = Group . objects . get ( name = groupname ) created = False except ( ObjectDoesNotExist ) : #Doesn't exist and not autocreate groups, we pass the error continue except ( IntegrityError , DataError ) as e : logger . error ( "Error creating group %s: %s" % ( groupname , e ) ) user_Membership_errors += 1 else : if created : logger . debug ( "Created group %s" % groupname ) #Now well assign the user group . user_set . add ( user ) user_Membership_added += 1 #Default Primary Group: Temporary is fixed #removing group membership for check_group in actualGroups : if ( check_group not in ldap_name_groups ) : group = Group . objects . get ( name = check_group ) group . user_set . remove ( user ) user_Membership_deleted += 1 if ( ( user_Membership_deleted > 0 ) or ( user_Membership_added > 0 ) ) : group . save ( ) logger . info ( "Group membership for user %s synchronized: %d Added, %d Removed" % ( user . username , user_Membership_added , user_Membership_deleted ) ) #Return statistics self . stats_membership_total += user_Membership_total self . stats_membership_added += user_Membership_added self . stats_membership_deleted += user_Membership_deleted self . stats_membership_errors += user_Membership_errors
Synchronize LDAP membership to Django membership
724
9
6,772
def ldap_search ( self , filter , attributes , incremental , incremental_filter ) : for uri in self . conf_LDAP_SYNC_BIND_URI : #Read record of this uri if ( self . working_uri == uri ) : adldap_sync = self . working_adldap_sync created = False else : adldap_sync , created = ADldap_Sync . objects . get_or_create ( ldap_sync_uri = uri ) if ( ( adldap_sync . syncs_to_full > 0 ) and incremental ) : filter_to_use = incremental_filter . replace ( '?' , self . whenchanged . strftime ( self . conf_LDAP_SYNC_INCREMENTAL_TIMESTAMPFORMAT ) ) logger . debug ( "Using an incremental search. Filter is:'%s'" % filter_to_use ) else : filter_to_use = filter ldap . set_option ( ldap . OPT_REFERRALS , 0 ) #ldap.set_option(ldap.OPT_NETWORK_TIMEOUT, 10) l = PagedLDAPObject ( uri ) l . protocol_version = 3 if ( uri . startswith ( 'ldaps:' ) ) : l . set_option ( ldap . OPT_X_TLS , ldap . OPT_X_TLS_DEMAND ) l . set_option ( ldap . OPT_X_TLS_REQUIRE_CERT , ldap . OPT_X_TLS_DEMAND ) l . set_option ( ldap . OPT_X_TLS_DEMAND , True ) else : l . set_option ( ldap . OPT_X_TLS , ldap . OPT_X_TLS_NEVER ) l . set_option ( ldap . OPT_X_TLS_REQUIRE_CERT , ldap . OPT_X_TLS_NEVER ) l . set_option ( ldap . OPT_X_TLS_DEMAND , False ) try : l . simple_bind_s ( self . conf_LDAP_SYNC_BIND_DN , self . conf_LDAP_SYNC_BIND_PASS ) except ldap . LDAPError as e : logger . error ( "Error connecting to LDAP server %s : %s" % ( uri , e ) ) continue results = l . paged_search_ext_s ( self . conf_LDAP_SYNC_BIND_SEARCH , ldap . SCOPE_SUBTREE , filter_to_use , attrlist = attributes , serverctrls = None ) l . unbind_s ( ) if ( self . working_uri is None ) : self . working_uri = uri self . conf_LDAP_SYNC_BIND_URI . insert ( 0 , uri ) self . working_adldap_sync = adldap_sync return ( uri , results ) # Return both the LDAP server URI used and the request. This is for incremental sync purposes #if not connected correctly, raise error raise
Query the configured LDAP server with the provided search filter and attribute list .
705
15
6,773
def sliver_reader ( filename_end_mask = "*[0-9].mhd" , sliver_reference_dir = "~/data/medical/orig/sliver07/training/" , read_orig = True , read_seg = False ) : sliver_reference_dir = op . expanduser ( sliver_reference_dir ) orig_fnames = glob . glob ( sliver_reference_dir + "*orig" + filename_end_mask ) ref_fnames = glob . glob ( sliver_reference_dir + "*seg" + filename_end_mask ) orig_fnames . sort ( ) ref_fnames . sort ( ) output = [ ] for i in range ( 0 , len ( orig_fnames ) ) : oname = orig_fnames [ i ] rname = ref_fnames [ i ] vs_mm = None ref_data = None orig_data = None if read_orig : orig_data , metadata = io3d . datareader . read ( oname ) vs_mm = metadata [ 'voxelsize_mm' ] if read_seg : ref_data , metadata = io3d . datareader . read ( rname ) vs_mm = metadata [ 'voxelsize_mm' ] import re numeric_label = re . search ( ".*g(\d+)" , oname ) . group ( 1 ) out = ( numeric_label , vs_mm , oname , orig_data , rname , ref_data ) yield out
Generator for reading sliver data from directory structure .
337
11
6,774
def make_gunicorn_config ( _gunicorn_config_path = '' , ) : gunicorn_py = '''"""AUTOGENERATED BY: prosper.common.flask_utils:gunicorn_config Based off: https://sebest.github.io/post/protips-using-gunicorn-inside-a-docker-image/ """ from os import environ for key, value in environ.items(): if key.startswith('GUNICORN_'): gunicorn_key = key.split('_', 1)[1].lower() locals()[gunicorn_key] = value ''' gunicorn_file = 'gunicorn.conf' if _gunicorn_config_path : gunicorn_file = _gunicorn_config_path with open ( gunicorn_file , 'w' ) as gunicorn_cfg : gunicorn_cfg . write ( gunicorn_py )
makes gunicorn . conf file for launching in docker
215
11
6,775
def _print_details ( extra = None ) : def print_node_handler ( name , node , depth ) : """Standard printer for a node.""" line = "{0}{1} {2} ({3}:{4})" . format ( depth , ( " " * depth ) , name , node . line , node . col ) if extra is not None : line += " [{0}]" . format ( extra ( node ) ) sys . stdout . write ( line + "\n" ) return print_node_handler
Return a function that prints node details .
113
8
6,776
def do_print ( filename ) : with open ( filename ) as cmake_file : body = ast . parse ( cmake_file . read ( ) ) word_print = _print_details ( lambda n : "{0} {1}" . format ( n . type , n . contents ) ) ast_visitor . recurse ( body , while_stmnt = _print_details ( ) , foreach = _print_details ( ) , function_def = _print_details ( ) , macro_def = _print_details ( ) , if_block = _print_details ( ) , if_stmnt = _print_details ( ) , elseif_stmnt = _print_details ( ) , else_stmnt = _print_details ( ) , function_call = _print_details ( lambda n : n . name ) , word = word_print )
Print the AST of filename .
193
6
6,777
def set ( self ) : self . _is_set = True scheduler . state . awoken_from_events . update ( self . _waiters ) del self . _waiters [ : ]
set the event to triggered
42
5
6,778
def wait ( self , timeout = None ) : if self . _is_set : return False current = compat . getcurrent ( ) # the waiting greenlet waketime = None if timeout is None else time . time ( ) + timeout if timeout is not None : scheduler . schedule_at ( waketime , current ) self . _waiters . append ( current ) scheduler . state . mainloop . switch ( ) if timeout is not None : if not scheduler . _remove_timer ( waketime , current ) : scheduler . state . awoken_from_events . discard ( current ) if current in self . _waiters : self . _waiters . remove ( current ) return True return False
pause the current coroutine until this event is set
151
10
6,779
def acquire ( self , blocking = True ) : current = compat . getcurrent ( ) if self . _owner is current : self . _count += 1 return True if self . _locked and not blocking : return False if self . _locked : self . _waiters . append ( compat . getcurrent ( ) ) scheduler . state . mainloop . switch ( ) else : self . _locked = True self . _owner = current self . _count = 1 return True
acquire ownership of the lock
99
6
6,780
def release ( self ) : if not self . _locked or self . _owner is not compat . getcurrent ( ) : raise RuntimeError ( "cannot release un-acquired lock" ) self . _count -= 1 if self . _count == 0 : self . _owner = None if self . _waiters : waiter = self . _waiters . popleft ( ) self . _locked = True self . _owner = waiter scheduler . state . awoken_from_events . add ( waiter ) else : self . _locked = False self . _owner = None
release one ownership of the lock
122
6
6,781
def wait ( self , timeout = None ) : if not self . _is_owned ( ) : raise RuntimeError ( "cannot wait on un-acquired lock" ) current = compat . getcurrent ( ) waketime = None if timeout is None else time . time ( ) + timeout if timeout is not None : scheduler . schedule_at ( waketime , current ) self . _waiters . append ( ( current , waketime ) ) self . _lock . release ( ) scheduler . state . mainloop . switch ( ) self . _lock . acquire ( ) if timeout is not None : timedout = not scheduler . _remove_timer ( waketime , current ) if timedout : self . _waiters . remove ( ( current , waketime ) ) return timedout return False
wait to be woken up by the condition
173
9
6,782
def notify ( self , num = 1 ) : if not self . _is_owned ( ) : raise RuntimeError ( "cannot wait on un-acquired lock" ) for i in xrange ( min ( num , len ( self . _waiters ) ) ) : scheduler . state . awoken_from_events . add ( self . _waiters . popleft ( ) [ 0 ] )
wake one or more waiting greenlets
86
7
6,783
def notify_all ( self ) : if not self . _is_owned ( ) : raise RuntimeError ( "cannot wait on un-acquired lock" ) scheduler . state . awoken_from_events . update ( x [ 0 ] for x in self . _waiters ) self . _waiters . clear ( )
wake all waiting greenlets
70
5
6,784
def acquire ( self , blocking = True ) : if self . _value : self . _value -= 1 return True if not blocking : return False self . _waiters . append ( compat . getcurrent ( ) ) scheduler . state . mainloop . switch ( ) return True
decrement the counter waiting if it is already at 0
58
12
6,785
def release ( self ) : if self . _waiters : scheduler . state . awoken_from_events . add ( self . _waiters . popleft ( ) ) else : self . _value += 1
increment the counter waking up a waiter if there was any
46
12
6,786
def start ( self ) : if self . _started : raise RuntimeError ( "thread already started" ) def run ( ) : try : self . run ( * self . _args , * * self . _kwargs ) except SystemExit : # only shut down the thread, not the whole process pass finally : self . _deactivate ( ) self . _glet = scheduler . greenlet ( run ) self . _ident = id ( self . _glet ) scheduler . schedule ( self . _glet ) self . _activate ( )
schedule to start the greenlet that runs this thread s function
116
13
6,787
def join ( self , timeout = None ) : if not self . _started : raise RuntimeError ( "cannot join thread before it is started" ) if compat . getcurrent ( ) is self . _glet : raise RuntimeError ( "cannot join current thread" ) self . _finished . wait ( timeout )
block until this thread terminates
67
6
6,788
def cancel ( self ) : done = self . finished . is_set ( ) self . finished . set ( ) return not done
attempt to prevent the timer from ever running its function
27
11
6,789
def wrap ( cls , secs , args = ( ) , kwargs = None ) : def decorator ( func ) : return cls ( secs , func , args , kwargs ) return decorator
a classmethod decorator to immediately turn a function into a timer
46
13
6,790
def get ( self , block = True , timeout = None ) : if not self . _data : if not block : raise Empty ( ) current = compat . getcurrent ( ) waketime = None if timeout is None else time . time ( ) + timeout if timeout is not None : scheduler . schedule_at ( waketime , current ) self . _waiters . append ( ( current , waketime ) ) scheduler . state . mainloop . switch ( ) if timeout is not None : if not scheduler . _remove_timer ( waketime , current ) : self . _waiters . remove ( ( current , waketime ) ) raise Empty ( ) if self . full ( ) and self . _waiters : scheduler . schedule ( self . _waiters . popleft ( ) [ 0 ] ) return self . _get ( )
get an item out of the queue
183
7
6,791
def put ( self , item , block = True , timeout = None ) : if self . full ( ) : if not block : raise Full ( ) current = compat . getcurrent ( ) waketime = None if timeout is None else time . time ( ) + timeout if timeout is not None : scheduler . schedule_at ( waketime , current ) self . _waiters . append ( ( current , waketime ) ) scheduler . state . mainloop . switch ( ) if timeout is not None : if not scheduler . _remove_timer ( waketime , current ) : self . _waiters . remove ( ( current , waketime ) ) raise Full ( ) if self . _waiters and not self . full ( ) : scheduler . schedule ( self . _waiters . popleft ( ) [ 0 ] ) if not self . _open_tasks : self . _jobs_done . clear ( ) self . _open_tasks += 1 self . _put ( item )
put an item into the queue
215
6
6,792
def increment ( self ) : self . _count += 1 waiters = self . _waiters . pop ( self . _count , [ ] ) if waiters : scheduler . state . awoken_from_events . update ( waiters )
increment the counter and wake anyone waiting for the new value
51
12
6,793
def wait ( self , until = 0 ) : if self . _count != until : self . _waiters . setdefault ( until , [ ] ) . append ( compat . getcurrent ( ) ) scheduler . state . mainloop . switch ( )
wait until the count has reached a particular number
53
9
6,794
def wait_callback ( connection ) : while 1 : state = connection . poll ( ) if state == extensions . POLL_OK : break elif state == extensions . POLL_READ : descriptor . wait_fds ( [ ( connection . fileno ( ) , 1 ) ] ) elif state == extensions . POLL_WRITE : descriptor . wait_fds ( [ ( connection . fileno ( ) , 2 ) ] ) else : raise psycopg2 . OperationalError ( "Bad poll result: %r" % state )
callback function suitable for psycopg2 . set_wait_callback
115
14
6,795
def get_path_to_root ( self , termid ) : # Get the sentence for the term root = None sentence = self . sentence_for_termid . get ( termid ) if sentence is None : #try with the top node top_node = self . top_relation_for_term . get ( termid ) if top_node is not None : root = top_node [ 1 ] else : return None else : if sentence in self . root_for_sentence : root = self . root_for_sentence [ sentence ] else : ##There is no root for this sentence return None # In this point top_node should be properly set path = self . get_shortest_path ( termid , root ) return path
Returns the dependency path from the term to the root
159
10
6,796
def get_full_dependents ( self , term_id , relations , counter = 0 ) : counter += 1 deps = self . relations_for_term if term_id in deps and len ( deps . get ( term_id ) ) > 0 : for dep in deps . get ( term_id ) : if not dep [ 1 ] in relations : relations . append ( dep [ 1 ] ) if dep [ 1 ] in deps : deprelations = self . get_full_dependents ( dep [ 1 ] , relations , counter ) for deprel in deprelations : if not deprel in relations : relations . append ( deprel ) return relations
Returns the complete list of dependents and embedded dependents of a certain term .
141
16
6,797
def getDirsToInclude ( string ) : dirs = [ ] a = string . strip ( ) obj = a . split ( '-' ) if len ( obj ) == 1 and obj [ 0 ] : for module in obj : try : exec ( 'import {}' . format ( module ) ) except ImportError : raise FileNotFoundError ( "The module '{}' does not" "exist" . format ( module ) ) try : dirs . append ( '-I{}' . format ( eval ( module ) . get_include ( ) ) ) except AttributeError : print ( NOT_NEEDED_MESSAGE . format ( module ) ) return dirs
Given a string of module names it will return the include directories essential to their compilation as long as the module has the conventional get_include function .
147
29
6,798
def purge_configs ( ) : user_config = path ( CONFIG_FILE_NAME , root = USER ) inplace_config = path ( CONFIG_FILE_NAME ) if os . path . isfile ( user_config ) : os . remove ( user_config ) if os . path . isfile ( inplace_config ) : os . remove ( inplace_config )
These will delete any configs found in either the current directory or the user s home directory
82
18
6,799
def find_config_file ( ) : local_config_name = path ( CONFIG_FILE_NAME ) if os . path . isfile ( local_config_name ) : return local_config_name else : user_config_name = path ( CONFIG_FILE_NAME , root = USER ) if os . path . isfile ( user_config_name ) : return user_config_name else : return None
Returns the path to the config file if found in either the current working directory or the user s home directory . If a config file is not found the function will return None .
90
35