idx int64 0 252k | question stringlengths 48 5.28k | target stringlengths 5 1.23k |
|---|---|---|
249,900 | def session ( context , data ) : context . http . reset ( ) user = context . get ( 'user' ) password = context . get ( 'password' ) if user is not None and password is not None : context . http . session . auth = ( user , password ) user_agent = context . get ( 'user_agent' ) if user_agent is not None : context . http ... | Set some HTTP parameters for all subsequent requests . |
249,901 | def save ( cls , crawler , stage , level , run_id , error = None , message = None ) : event = { 'stage' : stage . name , 'level' : level , 'timestamp' : pack_now ( ) , 'error' : error , 'message' : message } data = dump_json ( event ) conn . lpush ( make_key ( crawler , "events" ) , data ) conn . lpush ( make_key ( cra... | Create an event possibly based on an exception . |
249,902 | def get_stage_events ( cls , crawler , stage_name , start , end , level = None ) : key = make_key ( crawler , "events" , stage_name , level ) return cls . event_list ( key , start , end ) | events from a particular stage |
249,903 | def get_run_events ( cls , crawler , run_id , start , end , level = None ) : key = make_key ( crawler , "events" , run_id , level ) return cls . event_list ( key , start , end ) | Events from a particular run |
249,904 | def soviet_checksum ( code ) : def sum_digits ( code , offset = 1 ) : total = 0 for digit , index in zip ( code [ : 7 ] , count ( offset ) ) : total += int ( digit ) * index summed = ( total / 11 * 11 ) return total - summed check = sum_digits ( code , 1 ) if check == 10 : check = sum_digits ( code , 3 ) if check == 10... | Courtesy of Sir Vlad Lavrov . |
249,905 | def search_results_total ( html , xpath , check , delimiter ) : for container in html . findall ( xpath ) : if check in container . findtext ( '.' ) : text = container . findtext ( '.' ) . split ( delimiter ) total = int ( text [ - 1 ] . strip ( ) ) return total | Get the total number of results from the DOM of a search index . |
249,906 | def search_results_last_url ( html , xpath , label ) : for container in html . findall ( xpath ) : if container . text_content ( ) . strip ( ) == label : return container . find ( './/a' ) . get ( 'href' ) | Get the URL of the last button in a search results listing . |
249,907 | def op_count ( cls , crawler , stage = None ) : if stage : total_ops = conn . get ( make_key ( crawler , stage ) ) else : total_ops = conn . get ( make_key ( crawler , "total_ops" ) ) return unpack_int ( total_ops ) | Total operations performed for this crawler |
249,908 | def index ( ) : crawlers = [ ] for crawler in manager : data = Event . get_counts ( crawler ) data [ 'last_active' ] = crawler . last_run data [ 'total_ops' ] = crawler . op_count data [ 'running' ] = crawler . is_running data [ 'crawler' ] = crawler crawlers . append ( data ) return render_template ( 'index.html' , cr... | Generate a list of all crawlers alphabetically with op counts . |
249,909 | def clean_html ( context , data ) : doc = _get_html_document ( context , data ) if doc is None : context . emit ( data = data ) return remove_paths = context . params . get ( 'remove_paths' ) for path in ensure_list ( remove_paths ) : for el in doc . findall ( path ) : el . drop_tree ( ) html_text = html . tostring ( d... | Clean an HTML DOM and store the changed version . |
249,910 | def execute ( cls , stage , state , data , next_allowed_exec_time = None ) : try : context = Context . from_state ( state , stage ) now = datetime . utcnow ( ) if next_allowed_exec_time and now < next_allowed_exec_time : Queue . queue ( stage , state , data , delay = next_allowed_exec_time ) elif context . crawler . di... | Execute the operation rate limiting allowing . |
249,911 | def _recursive_upsert ( context , params , data ) : children = params . get ( "children" , { } ) nested_calls = [ ] for child_params in children : key = child_params . get ( "key" ) child_data_list = ensure_list ( data . pop ( key ) ) if isinstance ( child_data_list , dict ) : child_data_list = [ child_data_list ] if n... | Insert or update nested dicts recursively into db tables |
249,912 | def db ( context , data ) : table = context . params . get ( "table" , context . crawler . name ) params = context . params params [ "table" ] = table _recursive_upsert ( context , params , data ) | Insert or update data as a row into specified db table |
249,913 | def cli ( debug , cache , incremental ) : settings . HTTP_CACHE = cache settings . INCREMENTAL = incremental settings . DEBUG = debug if settings . DEBUG : logging . basicConfig ( level = logging . DEBUG ) else : logging . basicConfig ( level = logging . INFO ) init_memorious ( ) | Crawler framework for documents and structured scrapers . |
249,914 | def run ( crawler ) : crawler = get_crawler ( crawler ) crawler . run ( ) if is_sync_mode ( ) : TaskRunner . run_sync ( ) | Run a specified crawler . |
249,915 | def index ( ) : crawler_list = [ ] for crawler in manager : is_due = 'yes' if crawler . check_due ( ) else 'no' if crawler . disabled : is_due = 'off' crawler_list . append ( [ crawler . name , crawler . description , crawler . schedule , is_due , Queue . size ( crawler ) ] ) headers = [ 'Name' , 'Description' , 'Sched... | List the available crawlers . |
249,916 | def scheduled ( wait = False ) : manager . run_scheduled ( ) while wait : manager . run_scheduled ( ) time . sleep ( settings . SCHEDULER_INTERVAL ) | Run crawlers that are due . |
249,917 | def _get_directory_path ( context ) : path = os . path . join ( settings . BASE_PATH , 'store' ) path = context . params . get ( 'path' , path ) path = os . path . join ( path , context . crawler . name ) path = os . path . abspath ( os . path . expandvars ( path ) ) try : os . makedirs ( path ) except Exception : pass... | Get the storage path fro the output . |
249,918 | def directory ( context , data ) : with context . http . rehash ( data ) as result : if not result . ok : return content_hash = data . get ( 'content_hash' ) if content_hash is None : context . emit_warning ( "No content hash in data." ) return path = _get_directory_path ( context ) file_name = data . get ( 'file_name'... | Store the collected files to a given directory . |
249,919 | def seed ( context , data ) : for key in ( 'url' , 'urls' ) : for url in ensure_list ( context . params . get ( key ) ) : url = url % data context . emit ( data = { 'url' : url } ) | Initialize a crawler with a set of seed URLs . |
249,920 | def enumerate ( context , data ) : items = ensure_list ( context . params . get ( 'items' ) ) for item in items : data [ 'item' ] = item context . emit ( data = data ) | Iterate through a set of items and emit each one of them . |
249,921 | def sequence ( context , data ) : number = data . get ( 'number' , context . params . get ( 'start' , 1 ) ) stop = context . params . get ( 'stop' ) step = context . params . get ( 'step' , 1 ) delay = context . params . get ( 'delay' ) prefix = context . params . get ( 'tag' ) while True : tag = None if prefix is None... | Generate a sequence of numbers . |
249,922 | def fetch ( self ) : if self . _file_path is not None : return self . _file_path temp_path = self . context . work_path if self . _content_hash is not None : self . _file_path = storage . load_file ( self . _content_hash , temp_path = temp_path ) return self . _file_path if self . response is not None : self . _file_pa... | Lazily trigger download of the data when requested . |
249,923 | def make_key ( * criteria ) : criteria = [ stringify ( c ) for c in criteria ] criteria = [ c for c in criteria if c is not None ] if len ( criteria ) : return ':' . join ( criteria ) | Make a string key out of many criteria . |
249,924 | def random_filename ( path = None ) : filename = uuid4 ( ) . hex if path is not None : filename = os . path . join ( path , filename ) return filename | Make a UUID - based file name which is extremely unlikely to exist already . |
249,925 | def sample_vMF ( mu , kappa , num_samples ) : dim = len ( mu ) result = np . zeros ( ( num_samples , dim ) ) for nn in range ( num_samples ) : w = _sample_weight ( kappa , dim ) v = _sample_orthonormal_to ( mu ) result [ nn , : ] = v * np . sqrt ( 1. - w ** 2 ) + w * mu return result | Generate num_samples N - dimensional samples from von Mises Fisher distribution around center mu \ in R^N with concentration kappa . |
249,926 | def _sample_weight ( kappa , dim ) : dim = dim - 1 b = dim / ( np . sqrt ( 4. * kappa ** 2 + dim ** 2 ) + 2 * kappa ) x = ( 1. - b ) / ( 1. + b ) c = kappa * x + dim * np . log ( 1 - x ** 2 ) while True : z = np . random . beta ( dim / 2. , dim / 2. ) w = ( 1. - ( 1. + b ) * z ) / ( 1. - ( 1. - b ) * z ) u = np . rando... | Rejection sampling scheme for sampling distance from center on surface of the sphere . |
249,927 | def _sample_orthonormal_to ( mu ) : v = np . random . randn ( mu . shape [ 0 ] ) proj_mu_v = mu * np . dot ( mu , v ) / np . linalg . norm ( mu ) orthto = v - proj_mu_v return orthto / np . linalg . norm ( orthto ) | Sample point on sphere orthogonal to mu . |
249,928 | def _spherical_kmeans_single_lloyd ( X , n_clusters , sample_weight = None , max_iter = 300 , init = "k-means++" , verbose = False , x_squared_norms = None , random_state = None , tol = 1e-4 , precompute_distances = True , ) : random_state = check_random_state ( random_state ) sample_weight = _check_sample_weight ( X ,... | Modified from sklearn . cluster . k_means_ . k_means_single_lloyd . |
249,929 | def spherical_k_means ( X , n_clusters , sample_weight = None , init = "k-means++" , n_init = 10 , max_iter = 300 , verbose = False , tol = 1e-4 , random_state = None , copy_x = True , n_jobs = 1 , algorithm = "auto" , return_n_iter = False , ) : if n_init <= 0 : raise ValueError ( "Invalid number of initializations." ... | Modified from sklearn . cluster . k_means_ . k_means . |
249,930 | def fit ( self , X , y = None , sample_weight = None ) : if self . normalize : X = normalize ( X ) random_state = check_random_state ( self . random_state ) self . cluster_centers_ , self . labels_ , self . inertia_ , self . n_iter_ = spherical_k_means ( X , n_clusters = self . n_clusters , sample_weight = sample_weigh... | Compute k - means clustering . |
249,931 | def _inertia_from_labels ( X , centers , labels ) : n_examples , n_features = X . shape inertia = np . zeros ( ( n_examples , ) ) for ee in range ( n_examples ) : inertia [ ee ] = 1 - X [ ee , : ] . dot ( centers [ int ( labels [ ee ] ) , : ] . T ) return np . sum ( inertia ) | Compute inertia with cosine distance using known labels . |
249,932 | def _labels_inertia ( X , centers ) : n_examples , n_features = X . shape n_clusters , n_features = centers . shape labels = np . zeros ( ( n_examples , ) ) inertia = np . zeros ( ( n_examples , ) ) for ee in range ( n_examples ) : dists = np . zeros ( ( n_clusters , ) ) for cc in range ( n_clusters ) : dists [ cc ] = ... | Compute labels and inertia with cosine distance . |
249,933 | def _S ( kappa , alpha , beta ) : kappa = 1. * np . abs ( kappa ) alpha = 1. * alpha beta = 1. * np . abs ( beta ) a_plus_b = alpha + beta u = np . sqrt ( kappa ** 2 + beta ** 2 ) if alpha == 0 : alpha_scale = 0 else : alpha_scale = alpha * np . log ( ( alpha + u ) / a_plus_b ) return u - beta - alpha_scale | Compute the antiderivative of the Amos - type bound G on the modified Bessel function ratio . |
249,934 | def _init_unit_centers ( X , n_clusters , random_state , init ) : n_examples , n_features = np . shape ( X ) if isinstance ( init , np . ndarray ) : n_init_clusters , n_init_features = init . shape assert n_init_clusters == n_clusters assert n_init_features == n_features centers = init for cc in range ( n_clusters ) : ... | Initializes unit norm centers . |
249,935 | def _expectation ( X , centers , weights , concentrations , posterior_type = "soft" ) : n_examples , n_features = np . shape ( X ) n_clusters , _ = centers . shape if n_features <= 50 : vmf_f = _vmf_log else : vmf_f = _vmf_log_asymptotic f_log = np . zeros ( ( n_clusters , n_examples ) ) for cc in range ( n_clusters ) ... | Compute the log - likelihood of each datapoint being in each cluster . |
249,936 | def _maximization ( X , posterior , force_weights = None ) : n_examples , n_features = X . shape n_clusters , n_examples = posterior . shape concentrations = np . zeros ( ( n_clusters , ) ) centers = np . zeros ( ( n_clusters , n_features ) ) if force_weights is None : weights = np . zeros ( ( n_clusters , ) ) for cc i... | Estimate new centers weights and concentrations from |
249,937 | def _movMF ( X , n_clusters , posterior_type = "soft" , force_weights = None , max_iter = 300 , verbose = False , init = "random-class" , random_state = None , tol = 1e-6 , ) : random_state = check_random_state ( random_state ) n_examples , n_features = np . shape ( X ) centers = _init_unit_centers ( X , n_clusters , r... | Mixture of von Mises Fisher clustering . |
249,938 | def movMF ( X , n_clusters , posterior_type = "soft" , force_weights = None , n_init = 10 , n_jobs = 1 , max_iter = 300 , verbose = False , init = "random-class" , random_state = None , tol = 1e-6 , copy_x = True , ) : if n_init <= 0 : raise ValueError ( "Invalid number of initializations." " n_init=%d must be bigger t... | Wrapper for parallelization of _movMF and running n_init times . |
249,939 | def _check_fit_data ( self , X ) : X = check_array ( X , accept_sparse = "csr" , dtype = [ np . float64 , np . float32 ] ) n_samples , n_features = X . shape if X . shape [ 0 ] < self . n_clusters : raise ValueError ( "n_samples=%d should be >= n_clusters=%d" % ( X . shape [ 0 ] , self . n_clusters ) ) for ee in range ... | Verify that the number of samples given is larger than k |
249,940 | def fit ( self , X , y = None ) : if self . normalize : X = normalize ( X ) self . _check_force_weights ( ) random_state = check_random_state ( self . random_state ) X = self . _check_fit_data ( X ) ( self . cluster_centers_ , self . labels_ , self . inertia_ , self . weights_ , self . concentrations_ , self . posterio... | Compute mixture of von Mises Fisher clustering . |
249,941 | def transform ( self , X , y = None ) : if self . normalize : X = normalize ( X ) check_is_fitted ( self , "cluster_centers_" ) X = self . _check_test_data ( X ) return self . _transform ( X ) | Transform X to a cluster - distance space . In the new space each dimension is the cosine distance to the cluster centers . Note that even if X is sparse the array returned by transform will typically be dense . |
249,942 | def log_likelihood ( covariance , precision ) : assert covariance . shape == precision . shape dim , _ = precision . shape log_likelihood_ = ( - np . sum ( covariance * precision ) + fast_logdet ( precision ) - dim * np . log ( 2 * np . pi ) ) log_likelihood_ /= 2. return log_likelihood_ | Computes the log - likelihood between the covariance and precision estimate . |
249,943 | def kl_loss ( covariance , precision ) : assert covariance . shape == precision . shape dim , _ = precision . shape logdet_p_dot_c = fast_logdet ( np . dot ( precision , covariance ) ) return 0.5 * ( np . sum ( precision * covariance ) - logdet_p_dot_c - dim ) | Computes the KL divergence between precision estimate and reference covariance . |
249,944 | def ebic ( covariance , precision , n_samples , n_features , gamma = 0 ) : l_theta = - np . sum ( covariance * precision ) + fast_logdet ( precision ) l_theta *= n_features / 2. if np . isinf ( l_theta ) or np . isnan ( l_theta ) : return 1e10 mask = np . abs ( precision . flat ) > np . finfo ( precision . dtype ) . ep... | Extended Bayesian Information Criteria for model selection . |
249,945 | def lattice ( prng , n_features , alpha , random_sign = False , low = 0.3 , high = 0.7 ) : degree = int ( 1 + np . round ( alpha * n_features / 2. ) ) if random_sign : sign_row = - 1.0 * np . ones ( degree ) + 2 * ( prng . uniform ( low = 0 , high = 1 , size = degree ) > .5 ) else : sign_row = - 1.0 * np . ones ( degre... | Returns the adjacency matrix for a lattice network . |
249,946 | def _to_diagonally_dominant ( mat ) : mat += np . diag ( np . sum ( mat != 0 , axis = 1 ) + 0.01 ) return mat | Make matrix unweighted diagonally dominant using the Laplacian . |
249,947 | def _to_diagonally_dominant_weighted ( mat ) : mat += np . diag ( np . sum ( np . abs ( mat ) , axis = 1 ) + 0.01 ) return mat | Make matrix weighted diagonally dominant using the Laplacian . |
249,948 | def _rescale_to_unit_diagonals ( mat ) : d = np . sqrt ( np . diag ( mat ) ) mat /= d mat /= d [ : , np . newaxis ] return mat | Rescale matrix to have unit diagonals . |
249,949 | def create ( self , n_features , alpha ) : n_block_features = int ( np . floor ( 1. * n_features / self . n_blocks ) ) if n_block_features * self . n_blocks != n_features : raise ValueError ( ( "Error: n_features {} not divisible by n_blocks {}." "Use n_features = n_blocks * int" ) . format ( n_features , self . n_bloc... | Build a new graph with block structure . |
249,950 | def _sample_mvn ( n_samples , cov , prng ) : n_features , _ = cov . shape return prng . multivariate_normal ( np . zeros ( n_features ) , cov , size = n_samples ) | Draw a multivariate normal sample from the graph defined by cov . |
249,951 | def _fully_random_weights ( n_features , lam_scale , prng ) : weights = np . zeros ( ( n_features , n_features ) ) n_off_diag = int ( ( n_features ** 2 - n_features ) / 2 ) weights [ np . triu_indices ( n_features , k = 1 ) ] = 0.1 * lam_scale * prng . randn ( n_off_diag ) + ( 0.25 * lam_scale ) weights [ weights < 0 ]... | Generate a symmetric random matrix with zeros along the diagonal . |
249,952 | def _fix_weights ( weight_fun , * args ) : weights = weight_fun ( * args ) return weights if _check_psd ( weights ) : return weights off_diag_sums = np . sum ( weights , axis = 1 ) mod_mat = np . linalg . inv ( np . sqrt ( np . diag ( off_diag_sums ) ) ) return np . dot ( mod_mat , weights , mod_mat ) | Ensure random weight matrix is valid . |
249,953 | def _fit ( indexed_params , penalization , lam , lam_perturb , lam_scale_ , estimator , penalty_name , subsample , bootstrap , prng , X = None , ) : index = indexed_params if isinstance ( X , np . ndarray ) : local_X = X else : local_X = X . value n_samples , n_features = local_X . shape prec_is_real = False while not ... | Wrapper function outside of instance for fitting a single model average trial . |
249,954 | def _spark_map ( fun , indexed_param_grid , sc , seed , X_bc ) : def _wrap_random_state ( split_index , partition ) : prng = np . random . RandomState ( seed + split_index ) yield map ( partial ( fun , prng = prng , X = X_bc ) , partition ) par_param_grid = sc . parallelize ( indexed_param_grid ) indexed_results = par_... | We cannot pass a RandomState instance to each spark worker since it will behave identically across partitions . Instead we explictly handle the partitions with a newly seeded instance . |
249,955 | def quic_graph_lasso_ebic_manual ( X , gamma = 0 ) : print ( "QuicGraphicalLasso (manual EBIC) with:" ) print ( " mode: path" ) print ( " gamma: {}" . format ( gamma ) ) model = QuicGraphicalLasso ( lam = 1.0 , mode = "path" , init_method = "cov" , path = np . logspace ( np . log10 ( 0.01 ) , np . log10 ( 1.0 ) , n... | Run QuicGraphicalLasso with mode = path and gamma ; use EBIC criteria for model selection . |
249,956 | def quic_graph_lasso_ebic ( X , gamma = 0 ) : print ( "QuicGraphicalLassoEBIC with:" ) print ( " mode: path" ) print ( " gamma: {}" . format ( gamma ) ) model = QuicGraphicalLassoEBIC ( lam = 1.0 , init_method = "cov" , gamma = gamma ) model . fit ( X ) print ( " len(path lams): {}" . format ( len ( model . path_... | Run QuicGraphicalLassoEBIC with gamma . |
249,957 | def empirical ( X ) : print ( "Empirical" ) cov = np . dot ( X . T , X ) / n_samples return cov , np . linalg . inv ( cov ) | Compute empirical covariance as baseline estimator . |
249,958 | def sk_ledoit_wolf ( X ) : print ( "Ledoit-Wolf (sklearn)" ) lw_cov_ , _ = ledoit_wolf ( X ) lw_prec_ = np . linalg . inv ( lw_cov_ ) return lw_cov_ , lw_prec_ | Estimate inverse covariance via scikit - learn ledoit_wolf function . |
249,959 | def _nonzero_intersection ( m , m_hat ) : n_features , _ = m . shape m_no_diag = m . copy ( ) m_no_diag [ np . diag_indices ( n_features ) ] = 0 m_hat_no_diag = m_hat . copy ( ) m_hat_no_diag [ np . diag_indices ( n_features ) ] = 0 m_hat_nnz = len ( np . nonzero ( m_hat_no_diag . flat ) [ 0 ] ) m_nnz = len ( np . nonz... | Count the number of nonzeros in and between m and m_hat . |
249,960 | def support_false_positive_count ( m , m_hat ) : m_nnz , m_hat_nnz , intersection_nnz = _nonzero_intersection ( m , m_hat ) return int ( ( m_hat_nnz - intersection_nnz ) / 2.0 ) | Count the number of false positive support elements in m_hat in one triangle not including the diagonal . |
249,961 | def support_false_negative_count ( m , m_hat ) : m_nnz , m_hat_nnz , intersection_nnz = _nonzero_intersection ( m , m_hat ) return int ( ( m_nnz - intersection_nnz ) / 2.0 ) | Count the number of false negative support elements in m_hat in one triangle not including the diagonal . |
249,962 | def support_difference_count ( m , m_hat ) : m_nnz , m_hat_nnz , intersection_nnz = _nonzero_intersection ( m , m_hat ) return int ( ( m_nnz + m_hat_nnz - ( 2 * intersection_nnz ) ) / 2.0 ) | Count the number of different elements in the support in one triangle not including the diagonal . |
249,963 | def has_exact_support ( m , m_hat ) : m_nnz , m_hat_nnz , intersection_nnz = _nonzero_intersection ( m , m_hat ) return int ( ( m_nnz + m_hat_nnz - ( 2 * intersection_nnz ) ) == 0 ) | Returns 1 if support_difference_count is zero 0 else . |
249,964 | def has_approx_support ( m , m_hat , prob = 0.01 ) : m_nz = np . flatnonzero ( np . triu ( m , 1 ) ) m_hat_nz = np . flatnonzero ( np . triu ( m_hat , 1 ) ) upper_diagonal_mask = np . flatnonzero ( np . triu ( np . ones ( m . shape ) , 1 ) ) not_m_nz = np . setdiff1d ( upper_diagonal_mask , m_nz ) intersection = np . i... | Returns 1 if model selection error is less than or equal to prob rate 0 else . |
249,965 | def _validate_path ( path ) : if path is None : return None new_path = np . array ( sorted ( set ( path ) , reverse = True ) ) if new_path [ 0 ] != path [ 0 ] : print ( "Warning: Path must be sorted largest to smallest." ) return new_path | Sorts path values from largest to smallest . |
249,966 | def ebic ( self , gamma = 0 ) : if not self . is_fitted_ : return if not isinstance ( self . precision_ , list ) : return metrics . ebic ( self . sample_covariance_ , self . precision_ , self . n_samples_ , self . n_features_ , gamma = gamma , ) ebic_scores = [ ] for lidx , lam in enumerate ( self . path_ ) : ebic_scor... | Compute EBIC scores for each model . If model is not path then returns a scalar score value . |
249,967 | def ebic_select ( self , gamma = 0 ) : if not isinstance ( self . precision_ , list ) : raise ValueError ( "EBIC requires multiple models to select from." ) return if not self . is_fitted_ : return ebic_scores = self . ebic ( gamma = gamma ) min_indices = np . where ( np . abs ( ebic_scores - ebic_scores . min ( ) ) < ... | Uses Extended Bayesian Information Criteria for model selection . |
249,968 | def quic_graph_lasso ( X , num_folds , metric ) : print ( "QuicGraphicalLasso + GridSearchCV with:" ) print ( " metric: {}" . format ( metric ) ) search_grid = { "lam" : np . logspace ( np . log10 ( 0.01 ) , np . log10 ( 1.0 ) , num = 100 , endpoint = True ) , "init_method" : [ "cov" ] , "score_metric" : [ metric ] ,... | Run QuicGraphicalLasso with mode = default and use standard scikit GridSearchCV to find the best lambda . |
249,969 | def quic_graph_lasso_cv ( X , metric ) : print ( "QuicGraphicalLassoCV with:" ) print ( " metric: {}" . format ( metric ) ) model = QuicGraphicalLassoCV ( cv = 2 , n_refinements = 6 , n_jobs = 1 , init_method = "cov" , score_metric = metric , ) model . fit ( X ) print ( " len(cv_lams): {}" . format ( len ( model . ... | Run QuicGraphicalLassoCV on data with metric of choice . |
249,970 | def graph_lasso ( X , num_folds ) : print ( "GraphLasso (sklearn)" ) model = GraphLassoCV ( cv = num_folds ) model . fit ( X ) print ( " lam_: {}" . format ( model . alpha_ ) ) return model . covariance_ , model . precision_ , model . alpha_ | Estimate inverse covariance via scikit - learn GraphLassoCV class . |
249,971 | def _quic_path ( X , path , X_test = None , lam = 0.5 , tol = 1e-6 , max_iter = 1000 , Theta0 = None , Sigma0 = None , method = "quic" , verbose = 0 , score_metric = "log_likelihood" , init_method = "corrcoef" , ) : S , lam_scale_ = _init_coefs ( X , method = init_method ) path = path . copy ( order = "C" ) if method =... | Wrapper to compute path for example X . |
249,972 | def lam_at_index ( self , lidx ) : if self . path_ is None : return self . lam * self . lam_scale_ return self . lam * self . lam_scale_ * self . path_ [ lidx ] | Compute the scaled lambda used at index lidx . |
249,973 | def _compute_ranks ( X , winsorize = False , truncation = None , verbose = True ) : n_samples , n_features = X . shape Xrank = np . zeros ( shape = X . shape ) if winsorize : if truncation is None : truncation = 1 / ( 4 * np . power ( n_samples , 0.25 ) * np . sqrt ( np . pi * np . log ( n_samples ) ) ) elif truncation... | Transform each column into ranked data . Tied ranks are averaged . Ranks can optionally be winsorized as described in Liu 2009 otherwise this returns Tsukahara s scaled rank based Z - estimator . |
249,974 | def spearman_correlation ( X , rowvar = False ) : Xrank = _compute_ranks ( X ) rank_correlation = np . corrcoef ( Xrank , rowvar = rowvar ) return 2 * np . sin ( rank_correlation * np . pi / 6 ) | Computes the spearman correlation estimate . This is effectively a bias corrected pearson correlation between rank transformed columns of X . |
249,975 | def kendalltau_correlation ( X , rowvar = False , weighted = False ) : if rowvar : X = X . T _ , n_features = X . shape rank_correlation = np . eye ( n_features ) for row in np . arange ( n_features ) : for col in np . arange ( 1 + row , n_features ) : if weighted : rank_correlation [ row , col ] , _ = weightedtau ( X ... | Computes kendall s tau correlation estimate . The option to use scipy . stats . weightedtau is not recommended as the implementation does not appear to handle ties correctly . |
249,976 | def version ( self ) : request_params = dict ( self . request_params ) request_url = str ( self . request_url ) result = self . do_http_request ( 'get' , request_url , data = request_params , custom_header = str ( self . etiquette ) ) . json ( ) return result [ 'message-version' ] | This attribute retrieve the API version . |
249,977 | def count ( self ) : request_params = dict ( self . request_params ) request_url = str ( self . request_url ) request_params [ 'rows' ] = 0 result = self . do_http_request ( 'get' , request_url , data = request_params , custom_header = str ( self . etiquette ) ) . json ( ) return int ( result [ 'message' ] [ 'total-res... | This method retrieve the total of records resulting from a given query . |
249,978 | def url ( self ) : request_params = self . _escaped_pagging ( ) sorted_request_params = sorted ( [ ( k , v ) for k , v in request_params . items ( ) ] ) req = requests . Request ( 'get' , self . request_url , params = sorted_request_params ) . prepare ( ) return req . url | This attribute retrieve the url that will be used as a HTTP request to the Crossref API . |
249,979 | def doi ( self , doi , only_message = True ) : request_url = build_url_endpoint ( '/' . join ( [ self . ENDPOINT , doi ] ) ) request_params = { } result = self . do_http_request ( 'get' , request_url , data = request_params , custom_header = str ( self . etiquette ) ) if result . status_code == 404 : return result = re... | This method retrieve the DOI metadata related to a given DOI number . |
249,980 | def doi_exists ( self , doi ) : request_url = build_url_endpoint ( '/' . join ( [ self . ENDPOINT , doi ] ) ) request_params = { } result = self . do_http_request ( 'get' , request_url , data = request_params , only_headers = True , custom_header = str ( self . etiquette ) ) if result . status_code == 404 : return Fals... | This method retrieve a boolean according to the existence of a crossref DOI number . It returns False if the API results a 404 status code . |
249,981 | def works ( self , funder_id ) : context = '%s/%s' % ( self . ENDPOINT , str ( funder_id ) ) return Works ( context = context ) | This method retrieve a iterable of Works of the given funder . |
249,982 | def works ( self , member_id ) : context = '%s/%s' % ( self . ENDPOINT , str ( member_id ) ) return Works ( context = context ) | This method retrieve a iterable of Works of the given member . |
249,983 | def all ( self ) : request_url = build_url_endpoint ( self . ENDPOINT , self . context ) request_params = dict ( self . request_params ) result = self . do_http_request ( 'get' , request_url , data = request_params , custom_header = str ( self . etiquette ) ) if result . status_code == 404 : raise StopIteration ( ) res... | This method retrieve an iterator with all the available types . |
249,984 | def works ( self , type_id ) : context = '%s/%s' % ( self . ENDPOINT , str ( type_id ) ) return Works ( context = context ) | This method retrieve a iterable of Works of the given type . |
249,985 | def works ( self , prefix_id ) : context = '%s/%s' % ( self . ENDPOINT , str ( prefix_id ) ) return Works ( context = context ) | This method retrieve a iterable of Works of the given prefix . |
249,986 | def works ( self , issn ) : context = '%s/%s' % ( self . ENDPOINT , str ( issn ) ) return Works ( context = context ) | This method retrieve a iterable of Works of the given journal . |
249,987 | def register_doi ( self , submission_id , request_xml ) : endpoint = self . get_endpoint ( 'deposit' ) files = { 'mdFile' : ( '%s.xml' % submission_id , request_xml ) } params = { 'operation' : 'doMDUpload' , 'login_id' : self . api_user , 'login_passwd' : self . api_key } result = self . do_http_request ( 'post' , end... | This method registry a new DOI number in Crossref or update some DOI metadata . |
249,988 | def _find_plugin_dir ( module_type ) : for install_dir in _get_plugin_install_dirs ( ) : candidate = os . path . join ( install_dir , module_type ) if os . path . isdir ( candidate ) : return candidate else : raise PluginCandidateError ( 'No plugin found for `{}` module in paths:\n{}' . format ( module_type , '\n' . jo... | Find the directory containing the plugin definition for the given type . Do this by searching all the paths where plugins can live for a dir that matches the type name . |
249,989 | def merged_args_dicts ( global_args , subcommand_args ) : merged = global_args . copy ( ) for key , val in subcommand_args . items ( ) : if key not in merged : merged [ key ] = val elif type ( merged [ key ] ) is type ( val ) is bool : merged [ key ] = merged [ key ] or val else : raise RuntimeError ( "Unmergable args.... | We deal with docopt args from the toplevel peru parse and the subcommand parse . We don t want False values for a flag in the subcommand to override True values if that flag was given at the top level . This function specifically handles that case . |
249,990 | def force_utf8_in_ascii_mode_hack ( ) : if sys . stdout . encoding == 'ANSI_X3.4-1968' : sys . stdout = open ( sys . stdout . fileno ( ) , mode = 'w' , encoding = 'utf8' , buffering = 1 ) sys . stderr = open ( sys . stderr . fileno ( ) , mode = 'w' , encoding = 'utf8' , buffering = 1 ) | In systems without a UTF8 locale configured Python will default to ASCII mode for stdout and stderr . This causes our fancy display to fail with encoding errors . In particular you run into this if you try to run peru inside of Docker . This is a hack to force emitting UTF8 in that case . Hopefully it doesn t break any... |
249,991 | async def parse_target ( self , runtime , target_str ) : pipeline_parts = target_str . split ( RULE_SEPARATOR ) module = await self . resolve_module ( runtime , pipeline_parts [ 0 ] , target_str ) rules = [ ] for part in pipeline_parts [ 1 : ] : rule = await self . resolve_rule ( runtime , part ) rules . append ( rule ... | A target is a pipeline of a module into zero or more rules and each module and rule can itself be scoped with zero or more module names . |
249,992 | def _maybe_quote ( val ) : assert isinstance ( val , str ) , 'We should never set non-string values.' needs_quoting = False try : int ( val ) needs_quoting = True except Exception : pass try : float ( val ) needs_quoting = True except Exception : pass if needs_quoting : return '"{}"' . format ( val ) else : return val | All of our values should be strings . Usually those can be passed in as bare words but if they re parseable as an int or float we need to quote them . |
249,993 | async def gather_coalescing_exceptions ( coros , display , * , verbose ) : exceptions = [ ] reprs = [ ] async def catching_wrapper ( coro ) : try : return ( await coro ) except Exception as e : exceptions . append ( e ) if isinstance ( e , PrintableError ) and not verbose : reprs . append ( e . message ) else : reprs .... | The tricky thing about running multiple coroutines in parallel is what we re supposed to do when one of them raises an exception . The approach we re using here is to catch exceptions and keep waiting for other tasks to finish . At the end we reraise a GatheredExceptions error if any exceptions were caught . |
249,994 | async def create_subprocess_with_handle ( command , display_handle , * , shell = False , cwd , ** kwargs ) : encoding = sys . stdout . encoding or 'utf8' decoder_factory = codecs . getincrementaldecoder ( encoding ) decoder = decoder_factory ( errors = 'replace' ) output_copy = io . StringIO ( ) with display_handle : s... | Writes subprocess output to a display handle as it comes in and also returns a copy of it as a string . Throws if the subprocess returns an error . Note that cwd is a required keyword - only argument on theory that peru should never start child processes wherever I happen to be running right now . |
249,995 | def raises_gathered ( error_type ) : container = RaisesGatheredContainer ( ) try : yield container except GatheredExceptions as e : if len ( e . exceptions ) != 1 : raise inner = e . exceptions [ 0 ] if not isinstance ( inner , error_type ) : raise container . exception = inner | For use in tests . Many tests expect a single error to be thrown and want it to be of a specific type . This is a helper method for when that type is inside a gathered exception . |
249,996 | def get_request_filename ( request ) : if 'Content-Disposition' in request . info ( ) : disposition = request . info ( ) [ 'Content-Disposition' ] pieces = re . split ( r'\s*;\s*' , disposition ) for piece in pieces : if piece . startswith ( 'filename=' ) : filename = piece [ len ( 'filename=' ) : ] if filename . start... | Figure out the filename for an HTTP download . |
249,997 | def _extract_optional_list_field ( blob , name ) : value = _optional_list ( typesafe_pop ( blob , name , [ ] ) ) if value is None : raise ParserError ( '"{}" field must be a string or a list.' . format ( name ) ) return value | Handle optional fields that can be either a string or a list of strings . |
249,998 | def pop_all ( self ) : new_stack = type ( self ) ( ) new_stack . _exit_callbacks = self . _exit_callbacks self . _exit_callbacks = deque ( ) return new_stack | Preserve the context stack by transferring it to a new instance . |
249,999 | def callback ( self , callback , * args , ** kwds ) : _exit_wrapper = self . _create_cb_wrapper ( callback , * args , ** kwds ) _exit_wrapper . __wrapped__ = callback self . _push_exit_callback ( _exit_wrapper ) return callback | Registers an arbitrary callback and arguments . Cannot suppress exceptions . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.