idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
248,900
def dav_index ( context , data ) : # This is made to work with ownCloud/nextCloud, but some rumor has # it they are "standards compliant" and it should thus work for # other DAV servers. url = data . get ( 'url' ) result = context . http . request ( 'PROPFIND' , url ) for resp in result . xml . findall ( './{DAV:}response' ) : href = resp . findtext ( './{DAV:}href' ) if href is None : continue rurl = urljoin ( url , href ) rdata = data . copy ( ) rdata [ 'url' ] = rurl rdata [ 'foreign_id' ] = rurl if rdata [ 'url' ] == url : continue if resp . find ( './/{DAV:}collection' ) is not None : rdata [ 'parent_foreign_id' ] = rurl context . log . info ( "Fetching contents of folder: %s" % rurl ) context . recurse ( data = rdata ) else : rdata [ 'parent_foreign_id' ] = url # Do GET requests on the urls fetch ( context , rdata )
List files in a WebDAV directory .
265
9
248,901
def session ( context , data ) : context . http . reset ( ) user = context . get ( 'user' ) password = context . get ( 'password' ) if user is not None and password is not None : context . http . session . auth = ( user , password ) user_agent = context . get ( 'user_agent' ) if user_agent is not None : context . http . session . headers [ 'User-Agent' ] = user_agent referer = context . get ( 'url' ) if referer is not None : context . http . session . headers [ 'Referer' ] = referer proxy = context . get ( 'proxy' ) if proxy is not None : proxies = { 'http' : proxy , 'https' : proxy } context . http . session . proxies = proxies # Explictly save the session because no actual HTTP requests were made. context . http . save ( ) context . emit ( data = data )
Set some HTTP parameters for all subsequent requests .
203
9
248,902
def save ( cls , crawler , stage , level , run_id , error = None , message = None ) : event = { 'stage' : stage . name , 'level' : level , 'timestamp' : pack_now ( ) , 'error' : error , 'message' : message } data = dump_json ( event ) conn . lpush ( make_key ( crawler , "events" ) , data ) conn . lpush ( make_key ( crawler , "events" , level ) , data ) conn . lpush ( make_key ( crawler , "events" , stage ) , data ) conn . lpush ( make_key ( crawler , "events" , stage , level ) , data ) conn . lpush ( make_key ( crawler , "events" , run_id ) , data ) conn . lpush ( make_key ( crawler , "events" , run_id , level ) , data ) return event
Create an event possibly based on an exception .
208
9
248,903
def get_stage_events ( cls , crawler , stage_name , start , end , level = None ) : key = make_key ( crawler , "events" , stage_name , level ) return cls . event_list ( key , start , end )
events from a particular stage
59
5
248,904
def get_run_events ( cls , crawler , run_id , start , end , level = None ) : key = make_key ( crawler , "events" , run_id , level ) return cls . event_list ( key , start , end )
Events from a particular run
59
5
248,905
def soviet_checksum ( code ) : def sum_digits ( code , offset = 1 ) : total = 0 for digit , index in zip ( code [ : 7 ] , count ( offset ) ) : total += int ( digit ) * index summed = ( total / 11 * 11 ) return total - summed check = sum_digits ( code , 1 ) if check == 10 : check = sum_digits ( code , 3 ) if check == 10 : return code + '0' return code + str ( check )
Courtesy of Sir Vlad Lavrov .
112
6
248,906
def search_results_total ( html , xpath , check , delimiter ) : for container in html . findall ( xpath ) : if check in container . findtext ( '.' ) : text = container . findtext ( '.' ) . split ( delimiter ) total = int ( text [ - 1 ] . strip ( ) ) return total
Get the total number of results from the DOM of a search index .
74
14
248,907
def search_results_last_url ( html , xpath , label ) : for container in html . findall ( xpath ) : if container . text_content ( ) . strip ( ) == label : return container . find ( './/a' ) . get ( 'href' )
Get the URL of the last button in a search results listing .
61
13
248,908
def op_count ( cls , crawler , stage = None ) : if stage : total_ops = conn . get ( make_key ( crawler , stage ) ) else : total_ops = conn . get ( make_key ( crawler , "total_ops" ) ) return unpack_int ( total_ops )
Total operations performed for this crawler
71
7
248,909
def index ( ) : crawlers = [ ] for crawler in manager : data = Event . get_counts ( crawler ) data [ 'last_active' ] = crawler . last_run data [ 'total_ops' ] = crawler . op_count data [ 'running' ] = crawler . is_running data [ 'crawler' ] = crawler crawlers . append ( data ) return render_template ( 'index.html' , crawlers = crawlers )
Generate a list of all crawlers alphabetically with op counts .
105
14
248,910
def clean_html ( context , data ) : doc = _get_html_document ( context , data ) if doc is None : context . emit ( data = data ) return remove_paths = context . params . get ( 'remove_paths' ) for path in ensure_list ( remove_paths ) : for el in doc . findall ( path ) : el . drop_tree ( ) html_text = html . tostring ( doc , pretty_print = True ) content_hash = context . store_data ( html_text ) data [ 'content_hash' ] = content_hash context . emit ( data = data )
Clean an HTML DOM and store the changed version .
137
10
248,911
def execute ( cls , stage , state , data , next_allowed_exec_time = None ) : try : context = Context . from_state ( state , stage ) now = datetime . utcnow ( ) if next_allowed_exec_time and now < next_allowed_exec_time : # task not allowed to run yet; put it back in the queue Queue . queue ( stage , state , data , delay = next_allowed_exec_time ) elif context . crawler . disabled : pass elif context . stage . rate_limit : try : with rate_limiter ( context ) : context . execute ( data ) except RateLimitException : delay = max ( 1 , 1.0 / context . stage . rate_limit ) delay = random . randint ( 1 , int ( delay ) ) context . log . info ( "Rate limit exceeded, delaying %d sec." , delay ) Queue . queue ( stage , state , data , delay = delay ) else : context . execute ( data ) except Exception : log . exception ( "Task failed to execute:" ) finally : # Decrease the pending task count after excuting a task. Queue . decr_pending ( context . crawler ) # If we don't have anymore tasks to execute, time to clean up. if not context . crawler . is_running : context . crawler . aggregate ( context )
Execute the operation rate limiting allowing .
296
8
248,912
def _recursive_upsert ( context , params , data ) : children = params . get ( "children" , { } ) nested_calls = [ ] for child_params in children : key = child_params . get ( "key" ) child_data_list = ensure_list ( data . pop ( key ) ) if isinstance ( child_data_list , dict ) : child_data_list = [ child_data_list ] if not ( isinstance ( child_data_list , list ) and all ( isinstance ( i , dict ) for i in child_data_list ) ) : context . log . warn ( "Expecting a dict or a lost of dicts as children for key" , key ) continue if child_data_list : table_suffix = child_params . get ( "table_suffix" , key ) child_params [ "table" ] = params . get ( "table" ) + "_" + table_suffix # copy some properties over from parent to child inherit = child_params . get ( "inherit" , { } ) for child_data in child_data_list : for dest , src in inherit . items ( ) : child_data [ dest ] = data . get ( src ) nested_calls . append ( ( child_params , child_data ) ) # Insert or update data _upsert ( context , params , data ) for child_params , child_data in nested_calls : _recursive_upsert ( context , child_params , child_data )
Insert or update nested dicts recursively into db tables
334
12
248,913
def db ( context , data ) : table = context . params . get ( "table" , context . crawler . name ) params = context . params params [ "table" ] = table _recursive_upsert ( context , params , data )
Insert or update data as a row into specified db table
53
11
248,914
def cli ( debug , cache , incremental ) : settings . HTTP_CACHE = cache settings . INCREMENTAL = incremental settings . DEBUG = debug if settings . DEBUG : logging . basicConfig ( level = logging . DEBUG ) else : logging . basicConfig ( level = logging . INFO ) init_memorious ( )
Crawler framework for documents and structured scrapers .
68
10
248,915
def run ( crawler ) : crawler = get_crawler ( crawler ) crawler . run ( ) if is_sync_mode ( ) : TaskRunner . run_sync ( )
Run a specified crawler .
41
6
248,916
def index ( ) : crawler_list = [ ] for crawler in manager : is_due = 'yes' if crawler . check_due ( ) else 'no' if crawler . disabled : is_due = 'off' crawler_list . append ( [ crawler . name , crawler . description , crawler . schedule , is_due , Queue . size ( crawler ) ] ) headers = [ 'Name' , 'Description' , 'Schedule' , 'Due' , 'Pending' ] print ( tabulate ( crawler_list , headers = headers ) )
List the available crawlers .
128
6
248,917
def scheduled ( wait = False ) : manager . run_scheduled ( ) while wait : # Loop and try to run scheduled crawlers at short intervals manager . run_scheduled ( ) time . sleep ( settings . SCHEDULER_INTERVAL )
Run crawlers that are due .
55
7
248,918
def _get_directory_path ( context ) : path = os . path . join ( settings . BASE_PATH , 'store' ) path = context . params . get ( 'path' , path ) path = os . path . join ( path , context . crawler . name ) path = os . path . abspath ( os . path . expandvars ( path ) ) try : os . makedirs ( path ) except Exception : pass return path
Get the storage path fro the output .
96
8
248,919
def directory ( context , data ) : with context . http . rehash ( data ) as result : if not result . ok : return content_hash = data . get ( 'content_hash' ) if content_hash is None : context . emit_warning ( "No content hash in data." ) return path = _get_directory_path ( context ) file_name = data . get ( 'file_name' , result . file_name ) file_name = safe_filename ( file_name , default = 'raw' ) file_name = '%s.%s' % ( content_hash , file_name ) data [ '_file_name' ] = file_name file_path = os . path . join ( path , file_name ) if not os . path . exists ( file_path ) : shutil . copyfile ( result . file_path , file_path ) context . log . info ( "Store [directory]: %s" , file_name ) meta_path = os . path . join ( path , '%s.json' % content_hash ) with open ( meta_path , 'w' ) as fh : json . dump ( data , fh )
Store the collected files to a given directory .
258
9
248,920
def seed ( context , data ) : for key in ( 'url' , 'urls' ) : for url in ensure_list ( context . params . get ( key ) ) : url = url % data context . emit ( data = { 'url' : url } )
Initialize a crawler with a set of seed URLs .
58
12
248,921
def enumerate ( context , data ) : items = ensure_list ( context . params . get ( 'items' ) ) for item in items : data [ 'item' ] = item context . emit ( data = data )
Iterate through a set of items and emit each one of them .
47
14
248,922
def sequence ( context , data ) : number = data . get ( 'number' , context . params . get ( 'start' , 1 ) ) stop = context . params . get ( 'stop' ) step = context . params . get ( 'step' , 1 ) delay = context . params . get ( 'delay' ) prefix = context . params . get ( 'tag' ) while True : tag = None if prefix is None else '%s:%s' % ( prefix , number ) if tag is None or not context . check_tag ( tag ) : context . emit ( data = { 'number' : number } ) if tag is not None : context . set_tag ( tag , True ) number = number + step if step > 0 and number >= stop : break if step < 0 and number <= stop : break if delay is not None : context . recurse ( data = { 'number' : number } , delay = delay ) break
Generate a sequence of numbers .
201
7
248,923
def fetch ( self ) : if self . _file_path is not None : return self . _file_path temp_path = self . context . work_path if self . _content_hash is not None : self . _file_path = storage . load_file ( self . _content_hash , temp_path = temp_path ) return self . _file_path if self . response is not None : self . _file_path = random_filename ( temp_path ) content_hash = sha1 ( ) with open ( self . _file_path , 'wb' ) as fh : for chunk in self . response . iter_content ( chunk_size = 8192 ) : content_hash . update ( chunk ) fh . write ( chunk ) self . _remove_file = True chash = content_hash . hexdigest ( ) self . _content_hash = storage . archive_file ( self . _file_path , content_hash = chash ) if self . http . cache and self . ok : self . context . set_tag ( self . request_id , self . serialize ( ) ) self . retrieved_at = datetime . utcnow ( ) . isoformat ( ) return self . _file_path
Lazily trigger download of the data when requested .
270
11
248,924
def make_key ( * criteria ) : criteria = [ stringify ( c ) for c in criteria ] criteria = [ c for c in criteria if c is not None ] if len ( criteria ) : return ':' . join ( criteria )
Make a string key out of many criteria .
50
9
248,925
def random_filename ( path = None ) : filename = uuid4 ( ) . hex if path is not None : filename = os . path . join ( path , filename ) return filename
Make a UUID - based file name which is extremely unlikely to exist already .
39
16
248,926
def sample_vMF ( mu , kappa , num_samples ) : dim = len ( mu ) result = np . zeros ( ( num_samples , dim ) ) for nn in range ( num_samples ) : # sample offset from center (on sphere) with spread kappa w = _sample_weight ( kappa , dim ) # sample a point v on the unit sphere that's orthogonal to mu v = _sample_orthonormal_to ( mu ) # compute new point result [ nn , : ] = v * np . sqrt ( 1. - w ** 2 ) + w * mu return result
Generate num_samples N - dimensional samples from von Mises Fisher distribution around center mu \ in R^N with concentration kappa .
137
29
248,927
def _sample_weight ( kappa , dim ) : dim = dim - 1 # since S^{n-1} b = dim / ( np . sqrt ( 4. * kappa ** 2 + dim ** 2 ) + 2 * kappa ) x = ( 1. - b ) / ( 1. + b ) c = kappa * x + dim * np . log ( 1 - x ** 2 ) while True : z = np . random . beta ( dim / 2. , dim / 2. ) w = ( 1. - ( 1. + b ) * z ) / ( 1. - ( 1. - b ) * z ) u = np . random . uniform ( low = 0 , high = 1 ) if kappa * w + dim * np . log ( 1. - x * w ) - c >= np . log ( u ) : return w
Rejection sampling scheme for sampling distance from center on surface of the sphere .
184
15
248,928
def _sample_orthonormal_to ( mu ) : v = np . random . randn ( mu . shape [ 0 ] ) proj_mu_v = mu * np . dot ( mu , v ) / np . linalg . norm ( mu ) orthto = v - proj_mu_v return orthto / np . linalg . norm ( orthto )
Sample point on sphere orthogonal to mu .
83
10
248,929
def _spherical_kmeans_single_lloyd ( X , n_clusters , sample_weight = None , max_iter = 300 , init = "k-means++" , verbose = False , x_squared_norms = None , random_state = None , tol = 1e-4 , precompute_distances = True , ) : random_state = check_random_state ( random_state ) sample_weight = _check_sample_weight ( X , sample_weight ) best_labels , best_inertia , best_centers = None , None , None # init centers = _init_centroids ( X , n_clusters , init , random_state = random_state , x_squared_norms = x_squared_norms ) if verbose : print ( "Initialization complete" ) # Allocate memory to store the distances for each sample to its # closer center for reallocation in case of ties distances = np . zeros ( shape = ( X . shape [ 0 ] , ) , dtype = X . dtype ) # iterations for i in range ( max_iter ) : centers_old = centers . copy ( ) # labels assignment # TODO: _labels_inertia should be done with cosine distance # since ||a - b|| = 2(1 - cos(a,b)) when a,b are unit normalized # this doesn't really matter. labels , inertia = _labels_inertia ( X , sample_weight , x_squared_norms , centers , precompute_distances = precompute_distances , distances = distances , ) # computation of the means if sp . issparse ( X ) : centers = _k_means . _centers_sparse ( X , sample_weight , labels , n_clusters , distances ) else : centers = _k_means . _centers_dense ( X , sample_weight , labels , n_clusters , distances ) # l2-normalize centers (this is the main contibution here) centers = normalize ( centers ) if verbose : print ( "Iteration %2d, inertia %.3f" % ( i , inertia ) ) if best_inertia is None or inertia < best_inertia : best_labels = labels . copy ( ) best_centers = centers . copy ( ) best_inertia = inertia center_shift_total = squared_norm ( centers_old - centers ) if center_shift_total <= tol : if verbose : print ( "Converged at iteration %d: " "center shift %e within tolerance %e" % ( i , center_shift_total , tol ) ) break if center_shift_total > 0 : # rerun E-step in case of non-convergence so that predicted labels # match cluster centers best_labels , best_inertia = _labels_inertia ( X , sample_weight , x_squared_norms , best_centers , precompute_distances = precompute_distances , distances = distances , ) return best_labels , best_inertia , best_centers , i + 1
Modified from sklearn . cluster . k_means_ . k_means_single_lloyd .
708
24
248,930
def fit ( self , X , y = None , sample_weight = None ) : if self . normalize : X = normalize ( X ) random_state = check_random_state ( self . random_state ) # TODO: add check that all data is unit-normalized self . cluster_centers_ , self . labels_ , self . inertia_ , self . n_iter_ = spherical_k_means ( X , n_clusters = self . n_clusters , sample_weight = sample_weight , init = self . init , n_init = self . n_init , max_iter = self . max_iter , verbose = self . verbose , tol = self . tol , random_state = random_state , copy_x = self . copy_x , n_jobs = self . n_jobs , return_n_iter = True , ) return self
Compute k - means clustering .
196
8
248,931
def _inertia_from_labels ( X , centers , labels ) : n_examples , n_features = X . shape inertia = np . zeros ( ( n_examples , ) ) for ee in range ( n_examples ) : inertia [ ee ] = 1 - X [ ee , : ] . dot ( centers [ int ( labels [ ee ] ) , : ] . T ) return np . sum ( inertia )
Compute inertia with cosine distance using known labels .
98
11
248,932
def _labels_inertia ( X , centers ) : n_examples , n_features = X . shape n_clusters , n_features = centers . shape labels = np . zeros ( ( n_examples , ) ) inertia = np . zeros ( ( n_examples , ) ) for ee in range ( n_examples ) : dists = np . zeros ( ( n_clusters , ) ) for cc in range ( n_clusters ) : dists [ cc ] = 1 - X [ ee , : ] . dot ( centers [ cc , : ] . T ) labels [ ee ] = np . argmin ( dists ) inertia [ ee ] = dists [ int ( labels [ ee ] ) ] return labels , np . sum ( inertia )
Compute labels and inertia with cosine distance .
175
10
248,933
def _S ( kappa , alpha , beta ) : kappa = 1. * np . abs ( kappa ) alpha = 1. * alpha beta = 1. * np . abs ( beta ) a_plus_b = alpha + beta u = np . sqrt ( kappa ** 2 + beta ** 2 ) if alpha == 0 : alpha_scale = 0 else : alpha_scale = alpha * np . log ( ( alpha + u ) / a_plus_b ) return u - beta - alpha_scale
Compute the antiderivative of the Amos - type bound G on the modified Bessel function ratio .
109
22
248,934
def _init_unit_centers ( X , n_clusters , random_state , init ) : n_examples , n_features = np . shape ( X ) if isinstance ( init , np . ndarray ) : n_init_clusters , n_init_features = init . shape assert n_init_clusters == n_clusters assert n_init_features == n_features # ensure unit normed centers centers = init for cc in range ( n_clusters ) : centers [ cc , : ] = centers [ cc , : ] / np . linalg . norm ( centers [ cc , : ] ) return centers elif init == "spherical-k-means" : labels , inertia , centers , iters = spherical_kmeans . _spherical_kmeans_single_lloyd ( X , n_clusters , x_squared_norms = np . ones ( ( n_examples , ) ) , init = "k-means++" ) return centers elif init == "random" : centers = np . random . randn ( n_clusters , n_features ) for cc in range ( n_clusters ) : centers [ cc , : ] = centers [ cc , : ] / np . linalg . norm ( centers [ cc , : ] ) return centers elif init == "k-means++" : centers = _init_centroids ( X , n_clusters , "k-means++" , random_state = random_state , x_squared_norms = np . ones ( ( n_examples , ) ) , ) for cc in range ( n_clusters ) : centers [ cc , : ] = centers [ cc , : ] / np . linalg . norm ( centers [ cc , : ] ) return centers elif init == "random-orthonormal" : centers = np . random . randn ( n_clusters , n_features ) q , r = np . linalg . qr ( centers . T , mode = "reduced" ) return q . T elif init == "random-class" : centers = np . zeros ( ( n_clusters , n_features ) ) for cc in range ( n_clusters ) : while np . linalg . norm ( centers [ cc , : ] ) == 0 : labels = np . random . randint ( 0 , n_clusters , n_examples ) centers [ cc , : ] = X [ labels == cc , : ] . sum ( axis = 0 ) for cc in range ( n_clusters ) : centers [ cc , : ] = centers [ cc , : ] / np . linalg . norm ( centers [ cc , : ] ) return centers
Initializes unit norm centers .
599
6
248,935
def _expectation ( X , centers , weights , concentrations , posterior_type = "soft" ) : n_examples , n_features = np . shape ( X ) n_clusters , _ = centers . shape if n_features <= 50 : # works up to about 50 before numrically unstable vmf_f = _vmf_log else : vmf_f = _vmf_log_asymptotic f_log = np . zeros ( ( n_clusters , n_examples ) ) for cc in range ( n_clusters ) : f_log [ cc , : ] = vmf_f ( X , concentrations [ cc ] , centers [ cc , : ] ) posterior = np . zeros ( ( n_clusters , n_examples ) ) if posterior_type == "soft" : weights_log = np . log ( weights ) posterior = np . tile ( weights_log . T , ( n_examples , 1 ) ) . T + f_log for ee in range ( n_examples ) : posterior [ : , ee ] = np . exp ( posterior [ : , ee ] - logsumexp ( posterior [ : , ee ] ) ) elif posterior_type == "hard" : weights_log = np . log ( weights ) weighted_f_log = np . tile ( weights_log . T , ( n_examples , 1 ) ) . T + f_log for ee in range ( n_examples ) : posterior [ np . argmax ( weighted_f_log [ : , ee ] ) , ee ] = 1.0 return posterior
Compute the log - likelihood of each datapoint being in each cluster .
355
16
248,936
def _maximization ( X , posterior , force_weights = None ) : n_examples , n_features = X . shape n_clusters , n_examples = posterior . shape concentrations = np . zeros ( ( n_clusters , ) ) centers = np . zeros ( ( n_clusters , n_features ) ) if force_weights is None : weights = np . zeros ( ( n_clusters , ) ) for cc in range ( n_clusters ) : # update weights (alpha) if force_weights is None : weights [ cc ] = np . mean ( posterior [ cc , : ] ) else : weights = force_weights # update centers (mu) X_scaled = X . copy ( ) if sp . issparse ( X ) : X_scaled . data *= posterior [ cc , : ] . repeat ( np . diff ( X_scaled . indptr ) ) else : for ee in range ( n_examples ) : X_scaled [ ee , : ] *= posterior [ cc , ee ] centers [ cc , : ] = X_scaled . sum ( axis = 0 ) # normalize centers center_norm = np . linalg . norm ( centers [ cc , : ] ) if center_norm > 1e-8 : centers [ cc , : ] = centers [ cc , : ] / center_norm # update concentration (kappa) [TODO: add other kappa approximations] rbar = center_norm / ( n_examples * weights [ cc ] ) concentrations [ cc ] = rbar * n_features - np . power ( rbar , 3. ) if np . abs ( rbar - 1.0 ) < 1e-10 : concentrations [ cc ] = MAX_CONTENTRATION else : concentrations [ cc ] /= 1. - np . power ( rbar , 2. ) # let python know we can free this (good for large dense X) del X_scaled return centers , weights , concentrations
Estimate new centers weights and concentrations from
436
8
248,937
def _movMF ( X , n_clusters , posterior_type = "soft" , force_weights = None , max_iter = 300 , verbose = False , init = "random-class" , random_state = None , tol = 1e-6 , ) : random_state = check_random_state ( random_state ) n_examples , n_features = np . shape ( X ) # init centers (mus) centers = _init_unit_centers ( X , n_clusters , random_state , init ) # init weights (alphas) if force_weights is None : weights = np . ones ( ( n_clusters , ) ) weights = weights / np . sum ( weights ) else : weights = force_weights # init concentrations (kappas) concentrations = np . ones ( ( n_clusters , ) ) if verbose : print ( "Initialization complete" ) for iter in range ( max_iter ) : centers_prev = centers . copy ( ) # expectation step posterior = _expectation ( X , centers , weights , concentrations , posterior_type = posterior_type ) # maximization step centers , weights , concentrations = _maximization ( X , posterior , force_weights = force_weights ) # check convergence tolcheck = squared_norm ( centers_prev - centers ) if tolcheck <= tol : if verbose : print ( "Converged at iteration %d: " "center shift %e within tolerance %e" % ( iter , tolcheck , tol ) ) break # labels come for free via posterior labels = np . zeros ( ( n_examples , ) ) for ee in range ( n_examples ) : labels [ ee ] = np . argmax ( posterior [ : , ee ] ) inertia = _inertia_from_labels ( X , centers , labels ) return centers , weights , concentrations , posterior , labels , inertia
Mixture of von Mises Fisher clustering .
418
10
248,938
def movMF ( X , n_clusters , posterior_type = "soft" , force_weights = None , n_init = 10 , n_jobs = 1 , max_iter = 300 , verbose = False , init = "random-class" , random_state = None , tol = 1e-6 , copy_x = True , ) : if n_init <= 0 : raise ValueError ( "Invalid number of initializations." " n_init=%d must be bigger than zero." % n_init ) random_state = check_random_state ( random_state ) if max_iter <= 0 : raise ValueError ( "Number of iterations should be a positive number," " got %d instead" % max_iter ) best_inertia = np . infty X = as_float_array ( X , copy = copy_x ) tol = _tolerance ( X , tol ) if hasattr ( init , "__array__" ) : init = check_array ( init , dtype = X . dtype . type , copy = True ) _validate_center_shape ( X , n_clusters , init ) if n_init != 1 : warnings . warn ( "Explicit initial center position passed: " "performing only one init in k-means instead of n_init=%d" % n_init , RuntimeWarning , stacklevel = 2 , ) n_init = 1 # defaults best_centers = None best_labels = None best_weights = None best_concentrations = None best_posterior = None best_inertia = None if n_jobs == 1 : # For a single thread, less memory is needed if we just store one set # of the best results (as opposed to one set per run per thread). for it in range ( n_init ) : # cluster on the sphere ( centers , weights , concentrations , posterior , labels , inertia ) = _movMF ( X , n_clusters , posterior_type = posterior_type , force_weights = force_weights , max_iter = max_iter , verbose = verbose , init = init , random_state = random_state , tol = tol , ) # determine if these results are the best so far if best_inertia is None or inertia < best_inertia : best_centers = centers . copy ( ) best_labels = labels . copy ( ) best_weights = weights . copy ( ) best_concentrations = concentrations . copy ( ) best_posterior = posterior . copy ( ) best_inertia = inertia else : # parallelisation of movMF runs seeds = random_state . randint ( np . iinfo ( np . int32 ) . max , size = n_init ) results = Parallel ( n_jobs = n_jobs , verbose = 0 ) ( delayed ( _movMF ) ( X , n_clusters , posterior_type = posterior_type , force_weights = force_weights , max_iter = max_iter , verbose = verbose , init = init , random_state = random_state , tol = tol , ) for seed in seeds ) # Get results with the lowest inertia centers , weights , concentrations , posteriors , labels , inertia = zip ( * results ) best = np . argmin ( inertia ) best_labels = labels [ best ] best_inertia = inertia [ best ] best_centers = centers [ best ] best_concentrations = concentrations [ best ] best_posterior = posteriors [ best ] best_weights = weights [ best ] return ( best_centers , best_labels , best_inertia , best_weights , best_concentrations , best_posterior , )
Wrapper for parallelization of _movMF and running n_init times .
812
17
248,939
def _check_fit_data ( self , X ) : X = check_array ( X , accept_sparse = "csr" , dtype = [ np . float64 , np . float32 ] ) n_samples , n_features = X . shape if X . shape [ 0 ] < self . n_clusters : raise ValueError ( "n_samples=%d should be >= n_clusters=%d" % ( X . shape [ 0 ] , self . n_clusters ) ) for ee in range ( n_samples ) : if sp . issparse ( X ) : n = sp . linalg . norm ( X [ ee , : ] ) else : n = np . linalg . norm ( X [ ee , : ] ) if np . abs ( n - 1. ) > 1e-4 : raise ValueError ( "Data l2-norm must be 1, found {}" . format ( n ) ) return X
Verify that the number of samples given is larger than k
213
12
248,940
def fit ( self , X , y = None ) : if self . normalize : X = normalize ( X ) self . _check_force_weights ( ) random_state = check_random_state ( self . random_state ) X = self . _check_fit_data ( X ) ( self . cluster_centers_ , self . labels_ , self . inertia_ , self . weights_ , self . concentrations_ , self . posterior_ , ) = movMF ( X , self . n_clusters , posterior_type = self . posterior_type , force_weights = self . force_weights , n_init = self . n_init , n_jobs = self . n_jobs , max_iter = self . max_iter , verbose = self . verbose , init = self . init , random_state = random_state , tol = self . tol , copy_x = self . copy_x , ) return self
Compute mixture of von Mises Fisher clustering .
205
11
248,941
def transform ( self , X , y = None ) : if self . normalize : X = normalize ( X ) check_is_fitted ( self , "cluster_centers_" ) X = self . _check_test_data ( X ) return self . _transform ( X )
Transform X to a cluster - distance space . In the new space each dimension is the cosine distance to the cluster centers . Note that even if X is sparse the array returned by transform will typically be dense .
63
42
248,942
def log_likelihood ( covariance , precision ) : assert covariance . shape == precision . shape dim , _ = precision . shape log_likelihood_ = ( - np . sum ( covariance * precision ) + fast_logdet ( precision ) - dim * np . log ( 2 * np . pi ) ) log_likelihood_ /= 2. return log_likelihood_
Computes the log - likelihood between the covariance and precision estimate .
82
14
248,943
def kl_loss ( covariance , precision ) : assert covariance . shape == precision . shape dim , _ = precision . shape logdet_p_dot_c = fast_logdet ( np . dot ( precision , covariance ) ) return 0.5 * ( np . sum ( precision * covariance ) - logdet_p_dot_c - dim )
Computes the KL divergence between precision estimate and reference covariance .
79
13
248,944
def ebic ( covariance , precision , n_samples , n_features , gamma = 0 ) : l_theta = - np . sum ( covariance * precision ) + fast_logdet ( precision ) l_theta *= n_features / 2. # is something goes wrong with fast_logdet, return large value if np . isinf ( l_theta ) or np . isnan ( l_theta ) : return 1e10 mask = np . abs ( precision . flat ) > np . finfo ( precision . dtype ) . eps precision_nnz = ( np . sum ( mask ) - n_features ) / 2.0 # lower off diagonal tri return ( - 2.0 * l_theta + precision_nnz * np . log ( n_samples ) + 4.0 * precision_nnz * np . log ( n_features ) * gamma )
Extended Bayesian Information Criteria for model selection .
196
11
248,945
def lattice ( prng , n_features , alpha , random_sign = False , low = 0.3 , high = 0.7 ) : degree = int ( 1 + np . round ( alpha * n_features / 2. ) ) if random_sign : sign_row = - 1.0 * np . ones ( degree ) + 2 * ( prng . uniform ( low = 0 , high = 1 , size = degree ) > .5 ) else : sign_row = - 1.0 * np . ones ( degree ) # in the *very unlikely* event that we draw a bad row that sums to zero # (which is only possible when random_sign=True), we try again up to # MAX_ATTEMPTS=5 times. If we are still unable to draw a good set of # values something is probably wrong and we raise. MAX_ATTEMPTS = 5 attempt = 0 row = np . zeros ( ( n_features , ) ) while np . sum ( row ) == 0 and attempt < MAX_ATTEMPTS : row = np . zeros ( ( n_features , ) ) row [ 1 : 1 + degree ] = sign_row * prng . uniform ( low = low , high = high , size = degree ) attempt += 1 if np . sum ( row ) == 0 : raise Exception ( "InvalidLattice" , "Rows sum to 0." ) return # sum-normalize and keep signs row /= np . abs ( np . sum ( row ) ) return sp . linalg . toeplitz ( c = row , r = row )
Returns the adjacency matrix for a lattice network .
340
12
248,946
def _to_diagonally_dominant ( mat ) : mat += np . diag ( np . sum ( mat != 0 , axis = 1 ) + 0.01 ) return mat
Make matrix unweighted diagonally dominant using the Laplacian .
40
16
248,947
def _to_diagonally_dominant_weighted ( mat ) : mat += np . diag ( np . sum ( np . abs ( mat ) , axis = 1 ) + 0.01 ) return mat
Make matrix weighted diagonally dominant using the Laplacian .
46
14
248,948
def _rescale_to_unit_diagonals ( mat ) : d = np . sqrt ( np . diag ( mat ) ) mat /= d mat /= d [ : , np . newaxis ] return mat
Rescale matrix to have unit diagonals .
49
11
248,949
def create ( self , n_features , alpha ) : n_block_features = int ( np . floor ( 1. * n_features / self . n_blocks ) ) if n_block_features * self . n_blocks != n_features : raise ValueError ( ( "Error: n_features {} not divisible by n_blocks {}." "Use n_features = n_blocks * int" ) . format ( n_features , self . n_blocks ) ) return block_adj = self . prototype_adjacency ( n_block_features , alpha ) adjacency = blocks ( self . prng , block_adj , n_blocks = self . n_blocks , chain_blocks = self . chain_blocks ) precision = self . to_precision ( adjacency ) covariance = self . to_covariance ( precision ) return covariance , precision , adjacency
Build a new graph with block structure .
195
8
248,950
def _sample_mvn ( n_samples , cov , prng ) : n_features , _ = cov . shape return prng . multivariate_normal ( np . zeros ( n_features ) , cov , size = n_samples )
Draw a multivariate normal sample from the graph defined by cov .
56
13
248,951
def _fully_random_weights ( n_features , lam_scale , prng ) : weights = np . zeros ( ( n_features , n_features ) ) n_off_diag = int ( ( n_features ** 2 - n_features ) / 2 ) weights [ np . triu_indices ( n_features , k = 1 ) ] = 0.1 * lam_scale * prng . randn ( n_off_diag ) + ( 0.25 * lam_scale ) weights [ weights < 0 ] = 0 weights = weights + weights . T return weights
Generate a symmetric random matrix with zeros along the diagonal .
128
14
248,952
def _fix_weights ( weight_fun , * args ) : weights = weight_fun ( * args ) # TODO: fix this # disable checks for now return weights # if positive semidefinite, then we're good as is if _check_psd ( weights ) : return weights # make diagonally dominant off_diag_sums = np . sum ( weights , axis = 1 ) # NOTE: assumes diag is zero mod_mat = np . linalg . inv ( np . sqrt ( np . diag ( off_diag_sums ) ) ) return np . dot ( mod_mat , weights , mod_mat )
Ensure random weight matrix is valid .
141
8
248,953
def _fit ( indexed_params , penalization , lam , lam_perturb , lam_scale_ , estimator , penalty_name , subsample , bootstrap , prng , X = None , ) : index = indexed_params if isinstance ( X , np . ndarray ) : local_X = X else : local_X = X . value n_samples , n_features = local_X . shape prec_is_real = False while not prec_is_real : boot_lam = None if penalization == "subsampling" : pass elif penalization == "random" : boot_lam = _fix_weights ( _random_weights , n_features , lam , lam_perturb , prng ) elif penalization == "fully-random" : boot_lam = _fix_weights ( _fully_random_weights , n_features , lam_scale_ , prng ) else : raise NotImplementedError ( ( "Only penalization = 'subsampling', " "'random', and 'fully-random' have " "been implemented. Found {}." . format ( penalization ) ) ) # new instance of estimator new_estimator = clone ( estimator ) if boot_lam is not None : new_estimator . set_params ( * * { penalty_name : boot_lam } ) # fit estimator num_subsamples = int ( subsample * n_samples ) rp = bootstrap ( n_samples , num_subsamples , prng ) new_estimator . fit ( local_X [ rp , : ] ) # check that new_estimator.precision_ is real # if not, skip this boot_lam and try again if isinstance ( new_estimator . precision_ , list ) : prec_real_bools = [ ] for prec in new_estimator . precision_ : prec_real_bools . append ( np . all ( np . isreal ( prec ) ) ) prec_is_real = np . all ( np . array ( prec_real_bools ) is True ) elif isinstance ( new_estimator . precision_ , np . ndarray ) : prec_is_real = np . all ( np . isreal ( new_estimator . precision_ ) ) else : raise ValueError ( "Estimator returned invalid precision_." ) return index , ( boot_lam , rp , new_estimator )
Wrapper function outside of instance for fitting a single model average trial .
538
14
248,954
def _spark_map ( fun , indexed_param_grid , sc , seed , X_bc ) : def _wrap_random_state ( split_index , partition ) : prng = np . random . RandomState ( seed + split_index ) yield map ( partial ( fun , prng = prng , X = X_bc ) , partition ) par_param_grid = sc . parallelize ( indexed_param_grid ) indexed_results = par_param_grid . mapPartitionsWithIndex ( _wrap_random_state ) . collect ( ) return [ item for sublist in indexed_results for item in sublist ]
We cannot pass a RandomState instance to each spark worker since it will behave identically across partitions . Instead we explictly handle the partitions with a newly seeded instance .
137
34
248,955
def quic_graph_lasso_ebic_manual ( X , gamma = 0 ) : print ( "QuicGraphicalLasso (manual EBIC) with:" ) print ( " mode: path" ) print ( " gamma: {}" . format ( gamma ) ) model = QuicGraphicalLasso ( lam = 1.0 , mode = "path" , init_method = "cov" , path = np . logspace ( np . log10 ( 0.01 ) , np . log10 ( 1.0 ) , num = 100 , endpoint = True ) , ) model . fit ( X ) ebic_index = model . ebic_select ( gamma = gamma ) covariance_ = model . covariance_ [ ebic_index ] precision_ = model . precision_ [ ebic_index ] lam_ = model . lam_at_index ( ebic_index ) print ( " len(path lams): {}" . format ( len ( model . path_ ) ) ) print ( " lam_scale_: {}" . format ( model . lam_scale_ ) ) print ( " lam_: {}" . format ( lam_ ) ) print ( " ebic_index: {}" . format ( ebic_index ) ) return covariance_ , precision_ , lam_
Run QuicGraphicalLasso with mode = path and gamma ; use EBIC criteria for model selection .
282
22
248,956
def quic_graph_lasso_ebic ( X , gamma = 0 ) : print ( "QuicGraphicalLassoEBIC with:" ) print ( " mode: path" ) print ( " gamma: {}" . format ( gamma ) ) model = QuicGraphicalLassoEBIC ( lam = 1.0 , init_method = "cov" , gamma = gamma ) model . fit ( X ) print ( " len(path lams): {}" . format ( len ( model . path_ ) ) ) print ( " lam_scale_: {}" . format ( model . lam_scale_ ) ) print ( " lam_: {}" . format ( model . lam_ ) ) return model . covariance_ , model . precision_ , model . lam_
Run QuicGraphicalLassoEBIC with gamma .
167
12
248,957
def empirical ( X ) : print ( "Empirical" ) cov = np . dot ( X . T , X ) / n_samples return cov , np . linalg . inv ( cov )
Compute empirical covariance as baseline estimator .
45
10
248,958
def sk_ledoit_wolf ( X ) : print ( "Ledoit-Wolf (sklearn)" ) lw_cov_ , _ = ledoit_wolf ( X ) lw_prec_ = np . linalg . inv ( lw_cov_ ) return lw_cov_ , lw_prec_
Estimate inverse covariance via scikit - learn ledoit_wolf function .
76
17
248,959
def _nonzero_intersection ( m , m_hat ) : n_features , _ = m . shape m_no_diag = m . copy ( ) m_no_diag [ np . diag_indices ( n_features ) ] = 0 m_hat_no_diag = m_hat . copy ( ) m_hat_no_diag [ np . diag_indices ( n_features ) ] = 0 m_hat_nnz = len ( np . nonzero ( m_hat_no_diag . flat ) [ 0 ] ) m_nnz = len ( np . nonzero ( m_no_diag . flat ) [ 0 ] ) intersection_nnz = len ( np . intersect1d ( np . nonzero ( m_no_diag . flat ) [ 0 ] , np . nonzero ( m_hat_no_diag . flat ) [ 0 ] ) ) return m_nnz , m_hat_nnz , intersection_nnz
Count the number of nonzeros in and between m and m_hat .
221
16
248,960
def support_false_positive_count ( m , m_hat ) : m_nnz , m_hat_nnz , intersection_nnz = _nonzero_intersection ( m , m_hat ) return int ( ( m_hat_nnz - intersection_nnz ) / 2.0 )
Count the number of false positive support elements in m_hat in one triangle not including the diagonal .
67
20
248,961
def support_false_negative_count ( m , m_hat ) : m_nnz , m_hat_nnz , intersection_nnz = _nonzero_intersection ( m , m_hat ) return int ( ( m_nnz - intersection_nnz ) / 2.0 )
Count the number of false negative support elements in m_hat in one triangle not including the diagonal .
65
20
248,962
def support_difference_count ( m , m_hat ) : m_nnz , m_hat_nnz , intersection_nnz = _nonzero_intersection ( m , m_hat ) return int ( ( m_nnz + m_hat_nnz - ( 2 * intersection_nnz ) ) / 2.0 )
Count the number of different elements in the support in one triangle not including the diagonal .
75
17
248,963
def has_exact_support ( m , m_hat ) : m_nnz , m_hat_nnz , intersection_nnz = _nonzero_intersection ( m , m_hat ) return int ( ( m_nnz + m_hat_nnz - ( 2 * intersection_nnz ) ) == 0 )
Returns 1 if support_difference_count is zero 0 else .
73
14
248,964
def has_approx_support ( m , m_hat , prob = 0.01 ) : m_nz = np . flatnonzero ( np . triu ( m , 1 ) ) m_hat_nz = np . flatnonzero ( np . triu ( m_hat , 1 ) ) upper_diagonal_mask = np . flatnonzero ( np . triu ( np . ones ( m . shape ) , 1 ) ) not_m_nz = np . setdiff1d ( upper_diagonal_mask , m_nz ) intersection = np . in1d ( m_hat_nz , m_nz ) # true positives not_intersection = np . in1d ( m_hat_nz , not_m_nz ) # false positives true_positive_rate = 0.0 if len ( m_nz ) : true_positive_rate = 1. * np . sum ( intersection ) / len ( m_nz ) true_negative_rate = 1. - true_positive_rate false_positive_rate = 0.0 if len ( not_m_nz ) : false_positive_rate = 1. * np . sum ( not_intersection ) / len ( not_m_nz ) return int ( np . less_equal ( true_negative_rate + false_positive_rate , prob ) )
Returns 1 if model selection error is less than or equal to prob rate 0 else .
289
17
248,965
def _validate_path ( path ) : if path is None : return None new_path = np . array ( sorted ( set ( path ) , reverse = True ) ) if new_path [ 0 ] != path [ 0 ] : print ( "Warning: Path must be sorted largest to smallest." ) return new_path
Sorts path values from largest to smallest .
68
9
248,966
def ebic ( self , gamma = 0 ) : if not self . is_fitted_ : return if not isinstance ( self . precision_ , list ) : return metrics . ebic ( self . sample_covariance_ , self . precision_ , self . n_samples_ , self . n_features_ , gamma = gamma , ) ebic_scores = [ ] for lidx , lam in enumerate ( self . path_ ) : ebic_scores . append ( metrics . ebic ( self . sample_covariance_ , self . precision_ [ lidx ] , self . n_samples_ , self . n_features_ , gamma = gamma , ) ) return np . array ( ebic_scores )
Compute EBIC scores for each model . If model is not path then returns a scalar score value .
162
22
248,967
def ebic_select ( self , gamma = 0 ) : if not isinstance ( self . precision_ , list ) : raise ValueError ( "EBIC requires multiple models to select from." ) return if not self . is_fitted_ : return ebic_scores = self . ebic ( gamma = gamma ) min_indices = np . where ( np . abs ( ebic_scores - ebic_scores . min ( ) ) < 1e-10 ) return np . max ( min_indices )
Uses Extended Bayesian Information Criteria for model selection .
112
12
248,968
def quic_graph_lasso ( X , num_folds , metric ) : print ( "QuicGraphicalLasso + GridSearchCV with:" ) print ( " metric: {}" . format ( metric ) ) search_grid = { "lam" : np . logspace ( np . log10 ( 0.01 ) , np . log10 ( 1.0 ) , num = 100 , endpoint = True ) , "init_method" : [ "cov" ] , "score_metric" : [ metric ] , } model = GridSearchCV ( QuicGraphicalLasso ( ) , search_grid , cv = num_folds , refit = True ) model . fit ( X ) bmodel = model . best_estimator_ print ( " len(cv_lams): {}" . format ( len ( search_grid [ "lam" ] ) ) ) print ( " cv-lam: {}" . format ( model . best_params_ [ "lam" ] ) ) print ( " lam_scale_: {}" . format ( bmodel . lam_scale_ ) ) print ( " lam_: {}" . format ( bmodel . lam_ ) ) return bmodel . covariance_ , bmodel . precision_ , bmodel . lam_
Run QuicGraphicalLasso with mode = default and use standard scikit GridSearchCV to find the best lambda .
278
26
248,969
def quic_graph_lasso_cv ( X , metric ) : print ( "QuicGraphicalLassoCV with:" ) print ( " metric: {}" . format ( metric ) ) model = QuicGraphicalLassoCV ( cv = 2 , # cant deal w more folds at small size n_refinements = 6 , n_jobs = 1 , init_method = "cov" , score_metric = metric , ) model . fit ( X ) print ( " len(cv_lams): {}" . format ( len ( model . cv_lams_ ) ) ) print ( " lam_scale_: {}" . format ( model . lam_scale_ ) ) print ( " lam_: {}" . format ( model . lam_ ) ) return model . covariance_ , model . precision_ , model . lam_
Run QuicGraphicalLassoCV on data with metric of choice .
185
15
248,970
def graph_lasso ( X , num_folds ) : print ( "GraphLasso (sklearn)" ) model = GraphLassoCV ( cv = num_folds ) model . fit ( X ) print ( " lam_: {}" . format ( model . alpha_ ) ) return model . covariance_ , model . precision_ , model . alpha_
Estimate inverse covariance via scikit - learn GraphLassoCV class .
79
17
248,971
def _quic_path ( X , path , X_test = None , lam = 0.5 , tol = 1e-6 , max_iter = 1000 , Theta0 = None , Sigma0 = None , method = "quic" , verbose = 0 , score_metric = "log_likelihood" , init_method = "corrcoef" , ) : S , lam_scale_ = _init_coefs ( X , method = init_method ) path = path . copy ( order = "C" ) if method == "quic" : ( precisions_ , covariances_ , opt_ , cputime_ , iters_ , duality_gap_ ) = quic ( S , lam , mode = "path" , tol = tol , max_iter = max_iter , Theta0 = Theta0 , Sigma0 = Sigma0 , path = path , msg = verbose , ) else : raise NotImplementedError ( "Only method='quic' has been implemented." ) if X_test is not None : S_test , lam_scale_test = _init_coefs ( X_test , method = init_method ) path_errors = [ ] for lidx , lam in enumerate ( path ) : path_errors . append ( _compute_error ( S_test , covariances_ [ lidx ] , precisions_ [ lidx ] , score_metric = score_metric , ) ) scores_ = [ - e for e in path_errors ] return covariances_ , precisions_ , scores_ return covariances_ , precisions_
Wrapper to compute path for example X .
360
9
248,972
def lam_at_index ( self , lidx ) : if self . path_ is None : return self . lam * self . lam_scale_ return self . lam * self . lam_scale_ * self . path_ [ lidx ]
Compute the scaled lambda used at index lidx .
52
11
248,973
def _compute_ranks ( X , winsorize = False , truncation = None , verbose = True ) : n_samples , n_features = X . shape Xrank = np . zeros ( shape = X . shape ) if winsorize : if truncation is None : truncation = 1 / ( 4 * np . power ( n_samples , 0.25 ) * np . sqrt ( np . pi * np . log ( n_samples ) ) ) elif truncation > 1 : truncation = np . min ( 1.0 , truncation ) for col in np . arange ( n_features ) : Xrank [ : , col ] = rankdata ( X [ : , col ] , method = "average" ) Xrank [ : , col ] /= n_samples if winsorize : if n_samples > 100 * n_features : Xrank [ : , col ] = n_samples * Xrank [ : , col ] / ( n_samples + 1 ) else : lower_truncate = Xrank [ : , col ] <= truncation upper_truncate = Xrank [ : , col ] > 1 - truncation Xrank [ lower_truncate , col ] = truncation Xrank [ upper_truncate , col ] = 1 - truncation return Xrank
Transform each column into ranked data . Tied ranks are averaged . Ranks can optionally be winsorized as described in Liu 2009 otherwise this returns Tsukahara s scaled rank based Z - estimator .
292
41
248,974
def spearman_correlation ( X , rowvar = False ) : Xrank = _compute_ranks ( X ) rank_correlation = np . corrcoef ( Xrank , rowvar = rowvar ) return 2 * np . sin ( rank_correlation * np . pi / 6 )
Computes the spearman correlation estimate . This is effectively a bias corrected pearson correlation between rank transformed columns of X .
66
24
248,975
def kendalltau_correlation ( X , rowvar = False , weighted = False ) : if rowvar : X = X . T _ , n_features = X . shape rank_correlation = np . eye ( n_features ) for row in np . arange ( n_features ) : for col in np . arange ( 1 + row , n_features ) : if weighted : rank_correlation [ row , col ] , _ = weightedtau ( X [ : , row ] , X [ : , col ] , rank = False ) else : rank_correlation [ row , col ] , _ = kendalltau ( X [ : , row ] , X [ : , col ] ) rank_correlation = np . triu ( rank_correlation , 1 ) + rank_correlation . T return np . sin ( rank_correlation * np . pi / 2 )
Computes kendall s tau correlation estimate . The option to use scipy . stats . weightedtau is not recommended as the implementation does not appear to handle ties correctly .
194
38
248,976
def version ( self ) : request_params = dict ( self . request_params ) request_url = str ( self . request_url ) result = self . do_http_request ( 'get' , request_url , data = request_params , custom_header = str ( self . etiquette ) ) . json ( ) return result [ 'message-version' ]
This attribute retrieve the API version .
78
7
248,977
def count ( self ) : request_params = dict ( self . request_params ) request_url = str ( self . request_url ) request_params [ 'rows' ] = 0 result = self . do_http_request ( 'get' , request_url , data = request_params , custom_header = str ( self . etiquette ) ) . json ( ) return int ( result [ 'message' ] [ 'total-results' ] )
This method retrieve the total of records resulting from a given query .
96
13
248,978
def url ( self ) : request_params = self . _escaped_pagging ( ) sorted_request_params = sorted ( [ ( k , v ) for k , v in request_params . items ( ) ] ) req = requests . Request ( 'get' , self . request_url , params = sorted_request_params ) . prepare ( ) return req . url
This attribute retrieve the url that will be used as a HTTP request to the Crossref API .
80
18
248,979
def doi ( self , doi , only_message = True ) : request_url = build_url_endpoint ( '/' . join ( [ self . ENDPOINT , doi ] ) ) request_params = { } result = self . do_http_request ( 'get' , request_url , data = request_params , custom_header = str ( self . etiquette ) ) if result . status_code == 404 : return result = result . json ( ) return result [ 'message' ] if only_message is True else result
This method retrieve the DOI metadata related to a given DOI number .
115
13
248,980
def doi_exists ( self , doi ) : request_url = build_url_endpoint ( '/' . join ( [ self . ENDPOINT , doi ] ) ) request_params = { } result = self . do_http_request ( 'get' , request_url , data = request_params , only_headers = True , custom_header = str ( self . etiquette ) ) if result . status_code == 404 : return False return True
This method retrieve a boolean according to the existence of a crossref DOI number . It returns False if the API results a 404 status code .
99
28
248,981
def works ( self , funder_id ) : context = '%s/%s' % ( self . ENDPOINT , str ( funder_id ) ) return Works ( context = context )
This method retrieve a iterable of Works of the given funder .
44
14
248,982
def works ( self , member_id ) : context = '%s/%s' % ( self . ENDPOINT , str ( member_id ) ) return Works ( context = context )
This method retrieve a iterable of Works of the given member .
42
13
248,983
def all ( self ) : request_url = build_url_endpoint ( self . ENDPOINT , self . context ) request_params = dict ( self . request_params ) result = self . do_http_request ( 'get' , request_url , data = request_params , custom_header = str ( self . etiquette ) ) if result . status_code == 404 : raise StopIteration ( ) result = result . json ( ) for item in result [ 'message' ] [ 'items' ] : yield item
This method retrieve an iterator with all the available types .
114
11
248,984
def works ( self , type_id ) : context = '%s/%s' % ( self . ENDPOINT , str ( type_id ) ) return Works ( context = context )
This method retrieve a iterable of Works of the given type .
42
13
248,985
def works ( self , prefix_id ) : context = '%s/%s' % ( self . ENDPOINT , str ( prefix_id ) ) return Works ( context = context )
This method retrieve a iterable of Works of the given prefix .
42
13
248,986
def works ( self , issn ) : context = '%s/%s' % ( self . ENDPOINT , str ( issn ) ) return Works ( context = context )
This method retrieve a iterable of Works of the given journal .
40
13
248,987
def register_doi ( self , submission_id , request_xml ) : endpoint = self . get_endpoint ( 'deposit' ) files = { 'mdFile' : ( '%s.xml' % submission_id , request_xml ) } params = { 'operation' : 'doMDUpload' , 'login_id' : self . api_user , 'login_passwd' : self . api_key } result = self . do_http_request ( 'post' , endpoint , data = params , files = files , timeout = 10 , custom_header = str ( self . etiquette ) ) return result
This method registry a new DOI number in Crossref or update some DOI metadata .
134
15
248,988
def _find_plugin_dir ( module_type ) : for install_dir in _get_plugin_install_dirs ( ) : candidate = os . path . join ( install_dir , module_type ) if os . path . isdir ( candidate ) : return candidate else : raise PluginCandidateError ( 'No plugin found for `{}` module in paths:\n{}' . format ( module_type , '\n' . join ( _get_plugin_install_dirs ( ) ) ) )
Find the directory containing the plugin definition for the given type . Do this by searching all the paths where plugins can live for a dir that matches the type name .
112
32
248,989
def merged_args_dicts ( global_args , subcommand_args ) : merged = global_args . copy ( ) for key , val in subcommand_args . items ( ) : if key not in merged : merged [ key ] = val elif type ( merged [ key ] ) is type ( val ) is bool : merged [ key ] = merged [ key ] or val else : raise RuntimeError ( "Unmergable args." ) return merged
We deal with docopt args from the toplevel peru parse and the subcommand parse . We don t want False values for a flag in the subcommand to override True values if that flag was given at the top level . This function specifically handles that case .
97
54
248,990
def force_utf8_in_ascii_mode_hack ( ) : if sys . stdout . encoding == 'ANSI_X3.4-1968' : sys . stdout = open ( sys . stdout . fileno ( ) , mode = 'w' , encoding = 'utf8' , buffering = 1 ) sys . stderr = open ( sys . stderr . fileno ( ) , mode = 'w' , encoding = 'utf8' , buffering = 1 )
In systems without a UTF8 locale configured Python will default to ASCII mode for stdout and stderr . This causes our fancy display to fail with encoding errors . In particular you run into this if you try to run peru inside of Docker . This is a hack to force emitting UTF8 in that case . Hopefully it doesn t break anything important .
110
71
248,991
async def parse_target ( self , runtime , target_str ) : pipeline_parts = target_str . split ( RULE_SEPARATOR ) module = await self . resolve_module ( runtime , pipeline_parts [ 0 ] , target_str ) rules = [ ] for part in pipeline_parts [ 1 : ] : rule = await self . resolve_rule ( runtime , part ) rules . append ( rule ) return module , tuple ( rules )
A target is a pipeline of a module into zero or more rules and each module and rule can itself be scoped with zero or more module names .
96
30
248,992
def _maybe_quote ( val ) : assert isinstance ( val , str ) , 'We should never set non-string values.' needs_quoting = False try : int ( val ) needs_quoting = True except Exception : pass try : float ( val ) needs_quoting = True except Exception : pass if needs_quoting : return '"{}"' . format ( val ) else : return val
All of our values should be strings . Usually those can be passed in as bare words but if they re parseable as an int or float we need to quote them .
87
34
248,993
async def gather_coalescing_exceptions ( coros , display , * , verbose ) : exceptions = [ ] reprs = [ ] async def catching_wrapper ( coro ) : try : return ( await coro ) except Exception as e : exceptions . append ( e ) if isinstance ( e , PrintableError ) and not verbose : reprs . append ( e . message ) else : reprs . append ( traceback . format_exc ( ) ) return None # Suppress a deprecation warning in Python 3.5, while continuing to support # 3.3 and early 3.4 releases. if hasattr ( asyncio , 'ensure_future' ) : schedule = getattr ( asyncio , 'ensure_future' ) else : schedule = getattr ( asyncio , 'async' ) futures = [ schedule ( catching_wrapper ( coro ) ) for coro in coros ] results = await asyncio . gather ( * futures ) if exceptions : raise GatheredExceptions ( exceptions , reprs ) else : return results
The tricky thing about running multiple coroutines in parallel is what we re supposed to do when one of them raises an exception . The approach we re using here is to catch exceptions and keep waiting for other tasks to finish . At the end we reraise a GatheredExceptions error if any exceptions were caught .
226
63
248,994
async def create_subprocess_with_handle ( command , display_handle , * , shell = False , cwd , * * kwargs ) : # We're going to get chunks of bytes from the subprocess, and it's possible # that one of those chunks ends in the middle of a unicode character. An # incremental decoder keeps those dangling bytes around until the next # chunk arrives, so that split characters get decoded properly. Use # stdout's encoding, but provide a default for the case where stdout has # been redirected to a StringIO. (This happens in tests.) encoding = sys . stdout . encoding or 'utf8' decoder_factory = codecs . getincrementaldecoder ( encoding ) decoder = decoder_factory ( errors = 'replace' ) output_copy = io . StringIO ( ) # Display handles are context managers. Entering and exiting the display # handle lets the display know when the job starts and stops. with display_handle : stdin = asyncio . subprocess . DEVNULL stdout = asyncio . subprocess . PIPE stderr = asyncio . subprocess . STDOUT if shell : proc = await asyncio . create_subprocess_shell ( command , stdin = stdin , stdout = stdout , stderr = stderr , cwd = cwd , * * kwargs ) else : proc = await asyncio . create_subprocess_exec ( * command , stdin = stdin , stdout = stdout , stderr = stderr , cwd = cwd , * * kwargs ) # Read all the output from the subprocess as its comes in. while True : outputbytes = await proc . stdout . read ( 4096 ) if not outputbytes : break outputstr = decoder . decode ( outputbytes ) outputstr_unified = _unify_newlines ( outputstr ) display_handle . write ( outputstr_unified ) output_copy . write ( outputstr_unified ) returncode = await proc . wait ( ) if returncode != 0 : raise subprocess . CalledProcessError ( returncode , command , output_copy . getvalue ( ) ) if hasattr ( decoder , 'buffer' ) : # The utf8 decoder has this attribute, but some others don't. assert not decoder . buffer , 'decoder nonempty: ' + repr ( decoder . buffer ) return output_copy . getvalue ( )
Writes subprocess output to a display handle as it comes in and also returns a copy of it as a string . Throws if the subprocess returns an error . Note that cwd is a required keyword - only argument on theory that peru should never start child processes wherever I happen to be running right now .
533
64
248,995
def raises_gathered ( error_type ) : container = RaisesGatheredContainer ( ) try : yield container except GatheredExceptions as e : # Make sure there is exactly one exception. if len ( e . exceptions ) != 1 : raise inner = e . exceptions [ 0 ] # Make sure the exception is the right type. if not isinstance ( inner , error_type ) : raise # Success. container . exception = inner
For use in tests . Many tests expect a single error to be thrown and want it to be of a specific type . This is a helper method for when that type is inside a gathered exception .
91
39
248,996
def get_request_filename ( request ) : # Check to see if a filename is specified in the HTTP headers. if 'Content-Disposition' in request . info ( ) : disposition = request . info ( ) [ 'Content-Disposition' ] pieces = re . split ( r'\s*;\s*' , disposition ) for piece in pieces : if piece . startswith ( 'filename=' ) : filename = piece [ len ( 'filename=' ) : ] # Strip exactly one " from each end. if filename . startswith ( '"' ) : filename = filename [ 1 : ] if filename . endswith ( '"' ) : filename = filename [ : - 1 ] # Interpret backslashed quotes. filename = filename . replace ( '\\"' , '"' ) return filename # If no filename was specified, pick a reasonable default. return os . path . basename ( urlsplit ( request . url ) . path ) or 'index.html'
Figure out the filename for an HTTP download .
207
9
248,997
def _extract_optional_list_field ( blob , name ) : value = _optional_list ( typesafe_pop ( blob , name , [ ] ) ) if value is None : raise ParserError ( '"{}" field must be a string or a list.' . format ( name ) ) return value
Handle optional fields that can be either a string or a list of strings .
67
15
248,998
def pop_all ( self ) : new_stack = type ( self ) ( ) new_stack . _exit_callbacks = self . _exit_callbacks self . _exit_callbacks = deque ( ) return new_stack
Preserve the context stack by transferring it to a new instance .
51
13
248,999
def callback ( self , callback , * args , * * kwds ) : _exit_wrapper = self . _create_cb_wrapper ( callback , * args , * * kwds ) # We changed the signature, so using @wraps is not appropriate, but # setting __wrapped__ may still help with introspection. _exit_wrapper . __wrapped__ = callback self . _push_exit_callback ( _exit_wrapper ) return callback
Registers an arbitrary callback and arguments . Cannot suppress exceptions .
98
12