idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
17,600 | def devserver_cmd ( argv = sys . argv [ 1 : ] ) : arguments = docopt ( devserver_cmd . __doc__ , argv = argv ) initialize_config ( ) app . run ( host = arguments [ '--host' ] , port = int ( arguments [ '--port' ] ) , debug = int ( arguments [ '--debug' ] ) , ) | \ Serve the web API for development . |
17,601 | def stream_cmd ( argv = sys . argv [ 1 : ] ) : docopt ( stream_cmd . __doc__ , argv = argv ) initialize_config ( ) stream = PredictStream ( ) stream . listen ( sys . stdin , sys . stdout , sys . stderr ) | \ Start the streaming server which listens to stdin processes line by line and returns predictions . |
17,602 | def listen ( self , io_in , io_out , io_err ) : for line in io_in : if line . strip ( ) . lower ( ) == 'exit' : break try : y_pred = self . process_line ( line ) except Exception as e : io_out . write ( '[]\n' ) io_err . write ( "Error while processing input row: {}" "{}: {}\n" . format ( line , type ( e ) , e ) ) io_err . flush ( ) else : io_out . write ( ujson . dumps ( y_pred . tolist ( ) ) ) io_out . write ( '\n' ) io_out . flush ( ) | Listens to provided io stream and writes predictions to output . In case of errors the error stream will be used . |
17,603 | def list_cmd ( argv = sys . argv [ 1 : ] ) : docopt ( list_cmd . __doc__ , argv = argv ) initialize_config ( __mode__ = 'fit' ) list ( ) | \ List information about available models . |
17,604 | def fit_cmd ( argv = sys . argv [ 1 : ] ) : arguments = docopt ( fit_cmd . __doc__ , argv = argv ) no_save = arguments [ '--no-save' ] no_activate = arguments [ '--no-activate' ] save_if_better_than = arguments [ '--save-if-better-than' ] evaluate = arguments [ '--evaluate' ] or bool ( save_if_better_than ) if save_if_better_than is not None : save_if_better_than = float ( save_if_better_than ) initialize_config ( __mode__ = 'fit' ) fit ( persist = not no_save , activate = not no_activate , evaluate = evaluate , persist_if_better_than = save_if_better_than , ) | \ Fit a model and save to database . |
17,605 | def admin_cmd ( argv = sys . argv [ 1 : ] ) : arguments = docopt ( admin_cmd . __doc__ , argv = argv ) initialize_config ( __mode__ = 'fit' ) if arguments [ 'activate' ] : activate ( model_version = int ( arguments [ '<version>' ] ) ) elif arguments [ 'delete' ] : delete ( model_version = int ( arguments [ '<version>' ] ) ) | \ Activate or delete models . |
17,606 | def grid_search_cmd ( argv = sys . argv [ 1 : ] ) : arguments = docopt ( grid_search_cmd . __doc__ , argv = argv ) initialize_config ( __mode__ = 'fit' ) grid_search ( save_results = arguments [ '--save-results' ] , persist_best = arguments [ '--persist-best' ] , ) | \ Grid search parameters for the model . |
17,607 | def switch_fingerprint_method ( self , old = False ) : if old : self . has_fingerprint = self . has_fingerprint_moduli else : self . has_fingerprint = self . has_fingerprint_dlog | Switches main fingerprinting method . |
17,608 | def _map_tril_1d_on_2d ( indices , dims ) : N = ( dims * dims - dims ) / 2 m = np . ceil ( np . sqrt ( 2 * N ) ) c = m - np . round ( np . sqrt ( 2 * ( N - indices ) ) ) - 1 r = np . mod ( indices + ( c + 1 ) * ( c + 2 ) / 2 - 1 , m ) + 1 return np . array ( [ r , c ] , dtype = np . int64 ) | Map 1d indices on lower triangular matrix in 2d . |
17,609 | def _unique_rows_numpy ( a ) : a = np . ascontiguousarray ( a ) unique_a = np . unique ( a . view ( [ ( '' , a . dtype ) ] * a . shape [ 1 ] ) ) return unique_a . view ( a . dtype ) . reshape ( ( unique_a . shape [ 0 ] , a . shape [ 1 ] ) ) | return unique rows |
17,610 | def random_pairs_with_replacement ( n , shape , random_state = None ) : if not isinstance ( random_state , np . random . RandomState ) : random_state = np . random . RandomState ( random_state ) n_max = max_pairs ( shape ) if n_max <= 0 : raise ValueError ( 'n_max must be larger than 0' ) indices = random_state . randint ( 0 , n_max , n ) if len ( shape ) == 1 : return _map_tril_1d_on_2d ( indices , shape [ 0 ] ) else : return np . unravel_index ( indices , shape ) | make random record pairs |
17,611 | def random_pairs_without_replacement_large_frames ( n , shape , random_state = None ) : n_max = max_pairs ( shape ) sample = np . array ( [ ] ) while len ( sample ) < n : n_sample_size = ( n - len ( sample ) ) * 2 sample = random_state . randint ( n_max , size = n_sample_size ) pairs_non_unique = np . append ( sample , sample ) sample = _unique_rows_numpy ( pairs_non_unique ) if len ( shape ) == 1 : return _map_tril_1d_on_2d ( sample [ 0 : n ] , shape [ 0 ] ) else : return np . unravel_index ( sample [ 0 : n ] , shape ) | Make a sample of random pairs with replacement |
17,612 | def clean ( s , lowercase = True , replace_by_none = r'[^ \-\_A-Za-z0-9]+' , replace_by_whitespace = r'[\-\_]' , strip_accents = None , remove_brackets = True , encoding = 'utf-8' , decode_error = 'strict' ) : if s . shape [ 0 ] == 0 : return s if lowercase is True : s = s . str . lower ( ) if not strip_accents : pass elif callable ( strip_accents ) : strip_accents_fn = strip_accents elif strip_accents == 'ascii' : strip_accents_fn = strip_accents_ascii elif strip_accents == 'unicode' : strip_accents_fn = strip_accents_unicode else : raise ValueError ( "Invalid value for 'strip_accents': {}" . format ( strip_accents ) ) if strip_accents : def strip_accents_fn_wrapper ( x ) : if sys . version_info [ 0 ] >= 3 : if isinstance ( x , str ) : return strip_accents_fn ( x ) else : return x else : if isinstance ( x , unicode ) : return strip_accents_fn ( x ) else : return x s = s . apply ( lambda x : x . decode ( encoding , decode_error ) if type ( x ) == bytes else x ) s = s . map ( lambda x : strip_accents_fn_wrapper ( x ) ) if remove_brackets is True : s = s . str . replace ( r'(\[.*?\]|\(.*?\)|\{.*?\})' , '' ) if replace_by_none : s = s . str . replace ( replace_by_none , '' ) if replace_by_whitespace : s = s . str . replace ( replace_by_whitespace , ' ' ) s = s . str . replace ( r'\s\s+' , ' ' ) s = s . str . lstrip ( ) . str . rstrip ( ) return s | Clean string variables . |
17,613 | def value_occurence ( s ) : value_count = s . fillna ( 'NAN' ) return value_count . groupby ( by = value_count ) . transform ( 'count' ) | Count the number of times each value occurs . |
17,614 | def safe_sparse_dot ( a , b , dense_output = False ) : if issparse ( a ) or issparse ( b ) : ret = a * b if dense_output and hasattr ( ret , "toarray" ) : ret = ret . toarray ( ) return ret else : return np . dot ( a , b ) | Dot product that handle the sparse matrix case correctly Uses BLAS GEMM as replacement for numpy . dot where possible to avoid unnecessary copies . |
17,615 | def _joint_log_likelihood ( self , X ) : check_is_fitted ( self , "classes_" ) X = check_array ( X , accept_sparse = 'csr' ) X_bin = self . _transform_data ( X ) n_classes , n_features = self . feature_log_prob_ . shape n_samples , n_features_X = X_bin . shape if n_features_X != n_features : raise ValueError ( "Expected input with %d features, got %d instead" % ( n_features , n_features_X ) ) jll = safe_sparse_dot ( X_bin , self . feature_log_prob_ . T ) jll += self . class_log_prior_ return jll | Calculate the posterior log probability of the samples X |
17,616 | def predict ( self , X ) : jll = self . _joint_log_likelihood ( X ) return self . classes_ [ np . argmax ( jll , axis = 1 ) ] | Perform classification on an array of test vectors X . |
17,617 | def predict_log_proba ( self , X ) : jll = self . _joint_log_likelihood ( X ) log_prob_x = logsumexp ( jll , axis = 1 ) return jll - np . atleast_2d ( log_prob_x ) . T | Return log - probability estimates for the test vector X . |
17,618 | def _count ( self , X , Y ) : self . feature_count_ += safe_sparse_dot ( Y . T , X ) self . class_count_ += Y . sum ( axis = 0 ) | Count and smooth feature occurrences . |
17,619 | def _update_feature_log_prob ( self , alpha ) : smoothed_fc = self . feature_count_ + alpha smoothed_cc = self . class_count_ + alpha * 2 self . feature_log_prob_ = ( np . log ( smoothed_fc ) - np . log ( smoothed_cc . reshape ( - 1 , 1 ) ) ) | Apply smoothing to raw counts and recompute log probabilities |
17,620 | def fit ( self , X , y , sample_weight = None ) : X , y = check_X_y ( X , y , 'csr' ) X_bin = self . _fit_data ( X ) _ , n_features = X_bin . shape labelbin = LabelBinarizer ( ) Y = labelbin . fit_transform ( y ) self . classes_ = labelbin . classes_ if Y . shape [ 1 ] == 1 : Y = np . concatenate ( ( 1 - Y , Y ) , axis = 1 ) Y = Y . astype ( np . float64 ) if sample_weight is not None : sample_weight = np . atleast_2d ( sample_weight ) Y *= check_array ( sample_weight ) . T class_prior = self . class_prior n_effective_classes = Y . shape [ 1 ] self . class_count_ = np . zeros ( n_effective_classes , dtype = np . float64 ) self . feature_count_ = np . zeros ( ( n_effective_classes , n_features ) , dtype = np . float64 ) self . _count ( X_bin , Y ) alpha = self . _check_alpha ( ) self . _update_feature_log_prob ( alpha ) self . _update_class_log_prior ( class_prior = class_prior ) return self | Fit Naive Bayes classifier according to X y |
17,621 | def fit ( self , X ) : X = check_array ( X , accept_sparse = 'csr' ) X_unique , X_freq = np . unique ( X , axis = 0 , return_counts = True ) X_freq = np . atleast_2d ( X_freq ) X_unique_bin = self . _fit_data ( X_unique ) _ , n_features = X_unique_bin . shape self . classes_ = np . array ( [ 0 , 1 ] ) if is_string_like ( self . init ) and self . init == 'random' : self . class_log_prior_ , self . feature_log_prob_ = self . _init_parameters_random ( X_unique_bin ) elif is_string_like ( self . init ) and self . init == 'jaro' : self . class_log_prior_ , self . feature_log_prob_ = self . _init_parameters_jaro ( X_unique_bin ) else : raise ValueError ( "'{}' is not a valid value for " "argument 'init'" . format ( self . init ) ) iteration = 0 stop_iteration = False self . _log_class_log_prior = np . atleast_2d ( self . class_log_prior_ ) self . _log_feature_log_prob = np . atleast_3d ( self . feature_log_prob_ ) while iteration < self . max_iter and not stop_iteration : g = self . predict_proba ( X_unique ) g_freq = g * X_freq . T g_freq_sum = g_freq . sum ( axis = 0 ) class_log_prior_ = np . log ( g_freq_sum ) - np . log ( X . shape [ 0 ] ) feature_log_prob_ = np . log ( safe_sparse_dot ( g_freq . T , X_unique_bin ) ) feature_log_prob_ -= np . log ( np . atleast_2d ( g_freq_sum ) . T ) class_log_prior_close = np . allclose ( class_log_prior_ , self . class_log_prior_ , atol = self . atol ) feature_log_prob_close = np . allclose ( feature_log_prob_ , self . feature_log_prob_ , atol = self . atol ) if ( class_log_prior_close and feature_log_prob_close ) : stop_iteration = True if np . all ( np . isnan ( feature_log_prob_ ) ) : stop_iteration = True self . class_log_prior_ = class_log_prior_ self . feature_log_prob_ = feature_log_prob_ self . _log_class_log_prior = np . concatenate ( [ self . _log_class_log_prior , np . atleast_2d ( self . class_log_prior_ ) ] ) self . _log_feature_log_prob = np . concatenate ( [ self . _log_feature_log_prob , np . atleast_3d ( self . feature_log_prob_ ) ] , axis = 2 ) iteration += 1 return self | Fit ECM classifier according to X |
17,622 | def _get_sorting_key_values ( self , array1 , array2 ) : concat_arrays = numpy . concatenate ( [ array1 , array2 ] ) unique_values = numpy . unique ( concat_arrays ) return numpy . sort ( unique_values ) | return the sorting key values as a series |
17,623 | def compute ( self , links ) : try : import networkx as nx except ImportError ( ) : raise Exception ( "'networkx' module is needed for this operation" ) G = nx . Graph ( ) G . add_edges_from ( links . values ) connected_components = nx . connected_component_subgraphs ( G ) links_result = [ pd . MultiIndex . from_tuples ( subgraph . edges ( ) ) for subgraph in connected_components ] return links_result | Return the connected components . |
17,624 | def _prob_match ( self , features ) : probs = self . kernel . predict_proba ( features ) classes = list ( self . kernel . classes_ ) match_class_position = classes . index ( 1 ) return probs [ : , match_class_position ] | Compute match probabilities . |
17,625 | def _predict ( self , features ) : from sklearn . exceptions import NotFittedError try : prediction = self . kernel . predict_classes ( features ) [ : , 0 ] except NotFittedError : raise NotFittedError ( "{} is not fitted yet. Call 'fit' with appropriate " "arguments before using this method." . format ( type ( self ) . __name__ ) ) return prediction | Predict matches and non - matches . |
17,626 | def _febrl_links ( df ) : index = df . index . to_series ( ) keys = index . str . extract ( r'rec-(\d+)' , expand = True ) [ 0 ] index_int = numpy . arange ( len ( df ) ) df_helper = pandas . DataFrame ( { 'key' : keys , 'index' : index_int } ) pairs_df = df_helper . merge ( df_helper , on = 'key' ) [ [ 'index_x' , 'index_y' ] ] pairs_df = pairs_df [ pairs_df [ 'index_x' ] > pairs_df [ 'index_y' ] ] return pandas . MultiIndex ( levels = [ df . index . values , df . index . values ] , labels = [ pairs_df [ 'index_x' ] . values , pairs_df [ 'index_y' ] . values ] , names = [ None , None ] , verify_integrity = False ) | Get the links of a FEBRL dataset . |
17,627 | def load_febrl1 ( return_links = False ) : df = _febrl_load_data ( 'dataset1.csv' ) if return_links : links = _febrl_links ( df ) return df , links else : return df | Load the FEBRL 1 dataset . |
17,628 | def load_febrl2 ( return_links = False ) : df = _febrl_load_data ( 'dataset2.csv' ) if return_links : links = _febrl_links ( df ) return df , links else : return df | Load the FEBRL 2 dataset . |
17,629 | def load_febrl3 ( return_links = False ) : df = _febrl_load_data ( 'dataset3.csv' ) if return_links : links = _febrl_links ( df ) return df , links else : return df | Load the FEBRL 3 dataset . |
17,630 | def load_febrl4 ( return_links = False ) : df_a = _febrl_load_data ( 'dataset4a.csv' ) df_b = _febrl_load_data ( 'dataset4b.csv' ) if return_links : links = pandas . MultiIndex . from_arrays ( [ [ "rec-{}-org" . format ( i ) for i in range ( 0 , 5000 ) ] , [ "rec-{}-dup-0" . format ( i ) for i in range ( 0 , 5000 ) ] ] ) return df_a , df_b , links else : return df_a , df_b | Load the FEBRL 4 datasets . |
17,631 | def load_krebsregister ( block = [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ] , missing_values = None , shuffle = True ) : for i in range ( 1 , 11 ) : filepath = os . path . join ( os . path . dirname ( __file__ ) , 'krebsregister' , 'block_{}.zip' . format ( i ) ) if not os . path . exists ( filepath ) : _download_krebsregister ( ) break if isinstance ( block , ( list , tuple ) ) : data = pandas . concat ( [ _krebsregister_block ( bl ) for bl in block ] ) else : data = _krebsregister_block ( block ) if shuffle : data = data . sample ( frac = 1 , random_state = 535 ) match_index = data . index [ data [ 'is_match' ] ] del data [ 'is_match' ] if pandas . notnull ( missing_values ) : data . fillna ( missing_values , inplace = True ) return data , match_index | Load the Krebsregister dataset . |
17,632 | def phonetic ( s , method , concat = True , encoding = 'utf-8' , decode_error = 'strict' ) : if sys . version_info [ 0 ] == 2 : s = s . apply ( lambda x : x . decode ( encoding , decode_error ) if type ( x ) == bytes else x ) if concat : s = s . str . replace ( r"[\-\_\s]" , "" ) for alg in _phonetic_algorithms : if method in alg [ 'argument_names' ] : phonetic_callback = alg [ 'callback' ] break else : raise ValueError ( "The algorithm '{}' is not known." . format ( method ) ) return s . str . upper ( ) . apply ( lambda x : phonetic_callback ( x ) if pandas . notnull ( x ) else np . nan ) | Convert names or strings into phonetic codes . |
17,633 | def block ( self , * args , ** kwargs ) : indexer = Block ( * args , ** kwargs ) self . add ( indexer ) return self | Add a block index . |
17,634 | def sortedneighbourhood ( self , * args , ** kwargs ) : indexer = SortedNeighbourhood ( * args , ** kwargs ) self . add ( indexer ) return self | Add a Sorted Neighbourhood Index . |
17,635 | def random ( self , * args , ** kwargs ) : indexer = Random ( ) self . add ( indexer ) return self | Add a random index . |
17,636 | def exact ( self , * args , ** kwargs ) : compare = Exact ( * args , ** kwargs ) self . add ( compare ) return self | Compare attributes of pairs exactly . |
17,637 | def string ( self , * args , ** kwargs ) : compare = String ( * args , ** kwargs ) self . add ( compare ) return self | Compare attributes of pairs with string algorithm . |
17,638 | def numeric ( self , * args , ** kwargs ) : compare = Numeric ( * args , ** kwargs ) self . add ( compare ) return self | Compare attributes of pairs with numeric algorithm . |
17,639 | def geo ( self , * args , ** kwargs ) : compare = Geographic ( * args , ** kwargs ) self . add ( compare ) return self | Compare attributes of pairs with geo algorithm . |
17,640 | def date ( self , * args , ** kwargs ) : compare = Date ( * args , ** kwargs ) self . add ( compare ) return self | Compare attributes of pairs with date algorithm . |
17,641 | def reduction_ratio ( links_pred , * total ) : n_max = full_index_size ( * total ) if isinstance ( links_pred , pandas . MultiIndex ) : links_pred = len ( links_pred ) if links_pred > n_max : raise ValueError ( "n has to be smaller of equal n_max" ) return 1 - links_pred / n_max | Compute the reduction ratio . |
17,642 | def full_index_size ( * args ) : if len ( args ) == 1 and isinstance ( args [ 0 ] , ( list , tuple ) ) : args = tuple ( args [ 0 ] ) if len ( args ) == 1 : n = get_length ( args [ 0 ] ) size = int ( n * ( n - 1 ) / 2 ) else : size = numpy . prod ( [ get_length ( arg ) for arg in args ] ) return size | Compute the number of records in a full index . |
17,643 | def true_positives ( links_true , links_pred ) : links_true = _get_multiindex ( links_true ) links_pred = _get_multiindex ( links_pred ) return len ( links_true & links_pred ) | Count the number of True Positives . |
17,644 | def true_negatives ( links_true , links_pred , total ) : links_true = _get_multiindex ( links_true ) links_pred = _get_multiindex ( links_pred ) if isinstance ( total , pandas . MultiIndex ) : total = len ( total ) return int ( total ) - len ( links_true | links_pred ) | Count the number of True Negatives . |
17,645 | def false_positives ( links_true , links_pred ) : links_true = _get_multiindex ( links_true ) links_pred = _get_multiindex ( links_pred ) return len ( links_pred . difference ( links_true ) ) | Count the number of False Positives . |
17,646 | def false_negatives ( links_true , links_pred ) : links_true = _get_multiindex ( links_true ) links_pred = _get_multiindex ( links_pred ) return len ( links_true . difference ( links_pred ) ) | Count the number of False Negatives . |
17,647 | def confusion_matrix ( links_true , links_pred , total = None ) : links_true = _get_multiindex ( links_true ) links_pred = _get_multiindex ( links_pred ) tp = true_positives ( links_true , links_pred ) fp = false_positives ( links_true , links_pred ) fn = false_negatives ( links_true , links_pred ) if total is None : tn = numpy . nan else : tn = true_negatives ( links_true , links_pred , total ) return numpy . array ( [ [ tp , fn ] , [ fp , tn ] ] ) | Compute the confusion matrix . |
17,648 | def compute ( self , pairs , x = None , x_link = None ) : df_empty = pd . DataFrame ( index = pairs ) return self . _compute ( tuple ( [ df_empty ] ) , tuple ( [ df_empty ] ) ) | Return continuous random values for each record pair . |
17,649 | def _parallel_compare_helper ( class_obj , pairs , x , x_link = None ) : return class_obj . _compute ( pairs , x , x_link ) | Internal function to overcome pickling problem in python2 . |
17,650 | def chunk_pandas ( frame_or_series , chunksize = None ) : if not isinstance ( chunksize , int ) : raise ValueError ( 'argument chunksize needs to be integer type' ) bins = np . arange ( 0 , len ( frame_or_series ) , step = chunksize ) for b in bins : yield frame_or_series [ b : b + chunksize ] | Chunk a frame into smaller equal parts . |
17,651 | def add ( self , model ) : if isinstance ( model , list ) : self . algorithms = self . algorithms + model else : self . algorithms . append ( model ) | Add a index method . |
17,652 | def _dedup_index ( self , df_a ) : pairs = self . _link_index ( df_a , df_a ) pairs = pairs [ pairs . labels [ 0 ] > pairs . labels [ 1 ] ] return pairs | Build an index for deduplicating a dataset . |
17,653 | def _compute ( self , left_on , right_on ) : result = self . _compute_vectorized ( * tuple ( left_on + right_on ) ) return result | Compare the data on the left and right . |
17,654 | def compare_vectorized ( self , comp_func , labels_left , labels_right , * args , ** kwargs ) : label = kwargs . pop ( 'label' , None ) if isinstance ( labels_left , tuple ) : labels_left = list ( labels_left ) if isinstance ( labels_right , tuple ) : labels_right = list ( labels_right ) feature = BaseCompareFeature ( labels_left , labels_right , args , kwargs , label = label ) feature . _f_compare_vectorized = comp_func self . add ( feature ) | Compute the similarity between values with a callable . |
17,655 | def _get_labels_left ( self , validate = None ) : labels = [ ] for compare_func in self . features : labels = labels + listify ( compare_func . labels_left ) if not is_label_dataframe ( labels , validate ) : error_msg = "label is not found in the dataframe" raise KeyError ( error_msg ) return unique ( labels ) | Get all labels of the left dataframe . |
17,656 | def _get_labels_right ( self , validate = None ) : labels = [ ] for compare_func in self . features : labels = labels + listify ( compare_func . labels_right ) if not is_label_dataframe ( labels , validate ) : error_msg = "label is not found in the dataframe" raise KeyError ( error_msg ) return unique ( labels ) | Get all labels of the right dataframe . |
17,657 | def _union ( self , objs , index = None , column_i = 0 ) : feat_conc = [ ] for feat , label in objs : if isinstance ( feat , tuple ) : if label is None : label = [ None ] * len ( feat ) partial_result = self . _union ( zip ( feat , label ) , column_i = column_i ) feat_conc . append ( partial_result ) column_i = column_i + partial_result . shape [ 1 ] elif isinstance ( feat , pandas . Series ) : feat . reset_index ( drop = True , inplace = True ) if label is None : label = column_i feat . rename ( label , inplace = True ) feat_conc . append ( feat ) column_i = column_i + 1 elif isinstance ( feat , pandas . DataFrame ) : feat . reset_index ( drop = True , inplace = True ) if label is None : label = np . arange ( column_i , column_i + feat . shape [ 1 ] ) feat . columns = label feat_conc . append ( feat ) column_i = column_i + feat . shape [ 1 ] elif is_numpy_like ( feat ) and len ( feat . shape ) == 1 : if label is None : label = column_i f = pandas . Series ( feat , name = label , copy = False ) feat_conc . append ( f ) column_i = column_i + 1 elif is_numpy_like ( feat ) and len ( feat . shape ) == 2 : if label is None : label = np . arange ( column_i , column_i + feat . shape [ 1 ] ) feat_df = pandas . DataFrame ( feat , columns = label , copy = False ) if label is None : feat_df . columns = [ None for _ in range ( feat_df . shape [ 1 ] ) ] feat_conc . append ( feat_df ) column_i = column_i + feat . shape [ 1 ] else : raise ValueError ( "expected numpy.ndarray or " "pandas object to be returned, " "got '{}'" . format ( feat . __class__ . __name__ ) ) result = pandas . concat ( feat_conc , axis = 1 , copy = False ) if index is not None : result . set_index ( index , inplace = True ) return result | Make a union of the features . |
17,658 | def predict ( self , comparison_vectors ) : logging . info ( "Classification - predict matches and non-matches" ) prediction = self . _predict ( comparison_vectors . values ) self . _post_predict ( prediction ) return self . _return_result ( prediction , comparison_vectors ) | Predict the class of the record pairs . |
17,659 | def prob ( self , comparison_vectors , return_type = None ) : if return_type is not None : warnings . warn ( "The argument 'return_type' is removed. " "Default value is now 'series'." , VisibleDeprecationWarning , stacklevel = 2 ) logging . info ( "Classification - compute probabilities" ) prob_match = self . _prob_match ( comparison_vectors . values ) return pandas . Series ( prob_match , index = comparison_vectors . index ) | Compute the probabilities for each record pair . |
17,660 | def _return_result ( self , result , comparison_vectors = None ) : return_type = cf . get_option ( 'classification.return_type' ) if type ( result ) != np . ndarray : raise ValueError ( "numpy.ndarray expected." ) if return_type == 'index' : return comparison_vectors . index [ result . astype ( bool ) ] elif return_type == 'series' : return pandas . Series ( result , index = comparison_vectors . index , name = 'classification' ) elif return_type == 'array' : return result else : raise ValueError ( "return_type {} unknown. Choose 'index', 'series' or " "'array'" . format ( return_type ) ) | Return different formatted classification results . |
17,661 | def binary_vectors ( n , n_match , m = [ 0.9 ] * 8 , u = [ 0.1 ] * 8 , random_state = None , return_links = False , dtype = np . int8 ) : if len ( m ) != len ( u ) : raise ValueError ( "the length of 'm' is not equal the length of 'u'" ) if n_match >= n or n_match < 0 : raise ValueError ( "the number of matches is bounded by [0, n]" ) np . random . seed ( random_state ) matches = [ ] nonmatches = [ ] sample_set = np . array ( [ 0 , 1 ] , dtype = dtype ) for i , _ in enumerate ( m ) : p_mi = [ 1 - m [ i ] , m [ i ] ] p_ui = [ 1 - u [ i ] , u [ i ] ] comp_mi = np . random . choice ( sample_set , ( n_match , 1 ) , p = p_mi ) comp_ui = np . random . choice ( sample_set , ( n - n_match , 1 ) , p = p_ui ) nonmatches . append ( comp_ui ) matches . append ( comp_mi ) match_block = np . concatenate ( matches , axis = 1 ) nonmatch_block = np . concatenate ( nonmatches , axis = 1 ) data_np = np . concatenate ( ( match_block , nonmatch_block ) , axis = 0 ) index_np = np . random . randint ( 1001 , 1001 + n * 2 , ( n , 2 ) ) data_col_names = [ 'c_%s' % ( i + 1 ) for i in range ( len ( m ) ) ] data_mi = pd . MultiIndex . from_arrays ( [ index_np [ : , 0 ] , index_np [ : , 1 ] ] ) data_df = pd . DataFrame ( data_np , index = data_mi , columns = data_col_names ) features = data_df . sample ( frac = 1 , random_state = random_state ) if return_links : links = data_mi [ : n_match ] return features , links else : return features | Generate random binary comparison vectors . |
17,662 | def _match_class_pos ( self ) : if self . kernel . classes_ . shape [ 0 ] != 2 : raise ValueError ( "Number of classes is {}, expected 2." . format ( self . kernel . classes_ . shape [ 0 ] ) ) return 1 | Return the position of the match class . |
17,663 | def _nonmatch_class_pos ( self ) : if self . kernel . classes_ . shape [ 0 ] != 2 : raise ValueError ( "Number of classes is {}, expected 2." . format ( self . kernel . classes_ . shape [ 0 ] ) ) return 0 | Return the position of the non - match class . |
17,664 | def log_weights ( self ) : m = self . kernel . feature_log_prob_ [ self . _match_class_pos ( ) ] u = self . kernel . feature_log_prob_ [ self . _nonmatch_class_pos ( ) ] return self . _prob_inverse_transform ( m - u ) | Log weights as described in the FS framework . |
17,665 | def weights ( self ) : m = self . kernel . feature_log_prob_ [ self . _match_class_pos ( ) ] u = self . kernel . feature_log_prob_ [ self . _nonmatch_class_pos ( ) ] return self . _prob_inverse_transform ( numpy . exp ( m - u ) ) | Weights as described in the FS framework . |
17,666 | def _initialise_classifier ( self , comparison_vectors ) : self . kernel . init = numpy . array ( [ [ 0.05 ] * len ( list ( comparison_vectors ) ) , [ 0.95 ] * len ( list ( comparison_vectors ) ) ] ) | Set the centers of the clusters . |
17,667 | def is_label_dataframe ( label , df ) : setdiff = set ( label ) - set ( df . columns . tolist ( ) ) if len ( setdiff ) == 0 : return True else : return False | check column label existance |
17,668 | def listify ( x , none_value = [ ] ) : if isinstance ( x , list ) : return x elif isinstance ( x , tuple ) : return list ( x ) elif x is None : return none_value else : return [ x ] | Make a list of the argument if it is not a list . |
17,669 | def multi_index_to_frame ( index ) : return pandas . DataFrame ( index . tolist ( ) , index = index , columns = index . names ) | Replicates MultiIndex . to_frame which was introduced in pandas 0 . 21 for the sake of backwards compatibility . |
17,670 | def index_split ( index , chunks ) : Ntotal = index . shape [ 0 ] Nsections = int ( chunks ) if Nsections <= 0 : raise ValueError ( 'number sections must be larger than 0.' ) Neach_section , extras = divmod ( Ntotal , Nsections ) section_sizes = ( [ 0 ] + extras * [ Neach_section + 1 ] + ( Nsections - extras ) * [ Neach_section ] ) div_points = numpy . array ( section_sizes ) . cumsum ( ) sub_ind = [ ] for i in range ( Nsections ) : st = div_points [ i ] end = div_points [ i + 1 ] sub_ind . append ( index [ st : end ] ) return sub_ind | Function to split pandas . Index and pandas . MultiIndex objects . |
17,671 | def frame_indexing ( frame , multi_index , level_i , indexing_type = 'label' ) : if indexing_type == "label" : data = frame . loc [ multi_index . get_level_values ( level_i ) ] data . index = multi_index elif indexing_type == "position" : data = frame . iloc [ multi_index . get_level_values ( level_i ) ] data . index = multi_index else : raise ValueError ( "indexing_type needs to be 'label' or 'position'" ) return data | Index dataframe based on one level of MultiIndex . |
17,672 | def fillna ( series_or_arr , missing_value = 0.0 ) : if pandas . notnull ( missing_value ) : if isinstance ( series_or_arr , ( numpy . ndarray ) ) : series_or_arr [ numpy . isnan ( series_or_arr ) ] = missing_value else : series_or_arr . fillna ( missing_value , inplace = True ) return series_or_arr | Fill missing values in pandas objects and numpy arrays . |
17,673 | def get_related_model ( field ) : model = None if hasattr ( field , 'related_model' ) and field . related_model : model = field . related_model elif hasattr ( field , 'rel' ) and field . rel : model = field . rel . to return model | Gets the related model from a related field |
17,674 | def to_timeseries ( self , fieldnames = ( ) , verbose = True , index = None , storage = 'wide' , values = None , pivot_columns = None , freq = None , coerce_float = True , rs_kwargs = None ) : assert index is not None , 'You must supply an index field' assert storage in ( 'wide' , 'long' ) , 'storage must be wide or long' if rs_kwargs is None : rs_kwargs = { } if storage == 'wide' : df = self . to_dataframe ( fieldnames , verbose = verbose , index = index , coerce_float = coerce_float , datetime_index = True ) else : df = self . to_dataframe ( fieldnames , verbose = verbose , coerce_float = coerce_float , datetime_index = True ) assert values is not None , 'You must specify a values field' assert pivot_columns is not None , 'You must specify pivot_columns' if isinstance ( pivot_columns , ( tuple , list ) ) : df [ 'combined_keys' ] = '' for c in pivot_columns : df [ 'combined_keys' ] += df [ c ] . str . upper ( ) + '.' df [ 'combined_keys' ] += values . lower ( ) df = df . pivot ( index = index , columns = 'combined_keys' , values = values ) else : df = df . pivot ( index = index , columns = pivot_columns , values = values ) if freq is not None : df = df . resample ( freq , ** rs_kwargs ) return df | A convenience method for creating a time series DataFrame i . e the DataFrame index will be an instance of DateTime or PeriodIndex |
17,675 | def to_dataframe ( self , fieldnames = ( ) , verbose = True , index = None , coerce_float = False , datetime_index = False ) : return read_frame ( self , fieldnames = fieldnames , verbose = verbose , index_col = index , coerce_float = coerce_float , datetime_index = datetime_index ) | Returns a DataFrame from the queryset |
17,676 | def read_frame ( qs , fieldnames = ( ) , index_col = None , coerce_float = False , verbose = True , datetime_index = False ) : if fieldnames : fieldnames = pd . unique ( fieldnames ) if index_col is not None and index_col not in fieldnames : fieldnames = tuple ( fieldnames ) + ( index_col , ) fields = to_fields ( qs , fieldnames ) elif is_values_queryset ( qs ) : if django . VERSION < ( 1 , 9 ) : annotation_field_names = list ( qs . query . annotation_select ) if annotation_field_names is None : annotation_field_names = [ ] extra_field_names = qs . extra_names if extra_field_names is None : extra_field_names = [ ] select_field_names = qs . field_names else : annotation_field_names = list ( qs . query . annotation_select ) extra_field_names = list ( qs . query . extra_select ) select_field_names = list ( qs . query . values_select ) fieldnames = select_field_names + annotation_field_names + extra_field_names fields = [ None if '__' in f else qs . model . _meta . get_field ( f ) for f in select_field_names ] + [ None ] * ( len ( annotation_field_names ) + len ( extra_field_names ) ) uniq_fields = set ( ) fieldnames , fields = zip ( * ( f for f in zip ( fieldnames , fields ) if f [ 0 ] not in uniq_fields and not uniq_fields . add ( f [ 0 ] ) ) ) else : fields = qs . model . _meta . fields fieldnames = [ f . name for f in fields ] fieldnames += list ( qs . query . annotation_select . keys ( ) ) if is_values_queryset ( qs ) : recs = list ( qs ) else : recs = list ( qs . values_list ( * fieldnames ) ) df = pd . DataFrame . from_records ( recs , columns = fieldnames , coerce_float = coerce_float ) if verbose : update_with_verbose ( df , fieldnames , fields ) if index_col is not None : df . set_index ( index_col , inplace = True ) if datetime_index : df . index = pd . to_datetime ( df . index , errors = "ignore" ) return df | Returns a dataframe from a QuerySet |
17,677 | def _is_balanced ( root ) : if root is None : return 0 left = _is_balanced ( root . left ) if left < 0 : return - 1 right = _is_balanced ( root . right ) if right < 0 : return - 1 return - 1 if abs ( left - right ) > 1 else max ( left , right ) + 1 | Return the height if the binary tree is balanced - 1 otherwise . |
17,678 | def _build_bst_from_sorted_values ( sorted_values ) : if len ( sorted_values ) == 0 : return None mid_index = len ( sorted_values ) // 2 root = Node ( sorted_values [ mid_index ] ) root . left = _build_bst_from_sorted_values ( sorted_values [ : mid_index ] ) root . right = _build_bst_from_sorted_values ( sorted_values [ mid_index + 1 : ] ) return root | Recursively build a perfect BST from odd number of sorted values . |
17,679 | def _generate_random_leaf_count ( height ) : max_leaf_count = 2 ** height half_leaf_count = max_leaf_count // 2 roll_1 = random . randint ( 0 , half_leaf_count ) roll_2 = random . randint ( 0 , max_leaf_count - half_leaf_count ) return roll_1 + roll_2 or half_leaf_count | Return a random leaf count for building binary trees . |
17,680 | def _generate_random_node_values ( height ) : max_node_count = 2 ** ( height + 1 ) - 1 node_values = list ( range ( max_node_count ) ) random . shuffle ( node_values ) return node_values | Return random node values for building binary trees . |
17,681 | def _build_tree_string ( root , curr_index , index = False , delimiter = '-' ) : if root is None : return [ ] , 0 , 0 , 0 line1 = [ ] line2 = [ ] if index : node_repr = '{}{}{}' . format ( curr_index , delimiter , root . value ) else : node_repr = str ( root . value ) new_root_width = gap_size = len ( node_repr ) l_box , l_box_width , l_root_start , l_root_end = _build_tree_string ( root . left , 2 * curr_index + 1 , index , delimiter ) r_box , r_box_width , r_root_start , r_root_end = _build_tree_string ( root . right , 2 * curr_index + 2 , index , delimiter ) if l_box_width > 0 : l_root = ( l_root_start + l_root_end ) // 2 + 1 line1 . append ( ' ' * ( l_root + 1 ) ) line1 . append ( '_' * ( l_box_width - l_root ) ) line2 . append ( ' ' * l_root + '/' ) line2 . append ( ' ' * ( l_box_width - l_root ) ) new_root_start = l_box_width + 1 gap_size += 1 else : new_root_start = 0 line1 . append ( node_repr ) line2 . append ( ' ' * new_root_width ) if r_box_width > 0 : r_root = ( r_root_start + r_root_end ) // 2 line1 . append ( '_' * r_root ) line1 . append ( ' ' * ( r_box_width - r_root + 1 ) ) line2 . append ( ' ' * r_root + '\\' ) line2 . append ( ' ' * ( r_box_width - r_root ) ) gap_size += 1 new_root_end = new_root_start + new_root_width - 1 gap = ' ' * gap_size new_box = [ '' . join ( line1 ) , '' . join ( line2 ) ] for i in range ( max ( len ( l_box ) , len ( r_box ) ) ) : l_line = l_box [ i ] if i < len ( l_box ) else ' ' * l_box_width r_line = r_box [ i ] if i < len ( r_box ) else ' ' * r_box_width new_box . append ( l_line + gap + r_line ) return new_box , len ( new_box [ 0 ] ) , new_root_start , new_root_end | Recursively walk down the binary tree and build a pretty - print string . |
17,682 | def build ( values ) : nodes = [ None if v is None else Node ( v ) for v in values ] for index in range ( 1 , len ( nodes ) ) : node = nodes [ index ] if node is not None : parent_index = ( index - 1 ) // 2 parent = nodes [ parent_index ] if parent is None : raise NodeNotFoundError ( 'parent node missing at index {}' . format ( parent_index ) ) setattr ( parent , 'left' if index % 2 else 'right' , node ) return nodes [ 0 ] if nodes else None | Build a tree from list representation _ and return its root node . |
17,683 | def tree ( height = 3 , is_perfect = False ) : _validate_tree_height ( height ) values = _generate_random_node_values ( height ) if is_perfect : return build ( values ) leaf_count = _generate_random_leaf_count ( height ) root = Node ( values . pop ( 0 ) ) leaves = set ( ) for value in values : node = root depth = 0 inserted = False while depth < height and not inserted : attr = random . choice ( ( 'left' , 'right' ) ) if getattr ( node , attr ) is None : setattr ( node , attr , Node ( value ) ) inserted = True node = getattr ( node , attr ) depth += 1 if inserted and depth == height : leaves . add ( node ) if len ( leaves ) == leaf_count : break return root | Generate a random binary tree and return its root node . |
17,684 | def heap ( height = 3 , is_max = True , is_perfect = False ) : _validate_tree_height ( height ) values = _generate_random_node_values ( height ) if not is_perfect : random_cut = random . randint ( 2 ** height , len ( values ) ) values = values [ : random_cut ] if is_max : negated = [ - v for v in values ] heapq . heapify ( negated ) return build ( [ - v for v in negated ] ) else : heapq . heapify ( values ) return build ( values ) | Generate a random heap and return its root node . |
17,685 | def pprint ( self , index = False , delimiter = '-' ) : lines = _build_tree_string ( self , 0 , index , delimiter ) [ 0 ] print ( '\n' + '\n' . join ( ( line . rstrip ( ) for line in lines ) ) ) | Pretty - print the binary tree . |
17,686 | def validate ( self ) : has_more_nodes = True visited = set ( ) to_visit = [ self ] index = 0 while has_more_nodes : has_more_nodes = False next_nodes = [ ] for node in to_visit : if node is None : next_nodes . extend ( ( None , None ) ) else : if node in visited : raise NodeReferenceError ( 'cyclic node reference at index {}' . format ( index ) ) if not isinstance ( node , Node ) : raise NodeTypeError ( 'invalid node instance at index {}' . format ( index ) ) if not isinstance ( node . value , numbers . Number ) : raise NodeValueError ( 'invalid node value at index {}' . format ( index ) ) if node . left is not None or node . right is not None : has_more_nodes = True visited . add ( node ) next_nodes . extend ( ( node . left , node . right ) ) index += 1 to_visit = next_nodes | Check if the binary tree is malformed . |
17,687 | def values ( self ) : current_nodes = [ self ] has_more_nodes = True values = [ ] while has_more_nodes : has_more_nodes = False next_nodes = [ ] for node in current_nodes : if node is None : values . append ( None ) next_nodes . extend ( ( None , None ) ) continue if node . left is not None or node . right is not None : has_more_nodes = True values . append ( node . value ) next_nodes . extend ( ( node . left , node . right ) ) current_nodes = next_nodes while values and values [ - 1 ] is None : values . pop ( ) return values | Return the list representation _ of the binary tree . |
17,688 | def leaves ( self ) : current_nodes = [ self ] leaves = [ ] while len ( current_nodes ) > 0 : next_nodes = [ ] for node in current_nodes : if node . left is None and node . right is None : leaves . append ( node ) continue if node . left is not None : next_nodes . append ( node . left ) if node . right is not None : next_nodes . append ( node . right ) current_nodes = next_nodes return leaves | Return the leaf nodes of the binary tree . |
17,689 | def properties ( self ) : properties = _get_tree_properties ( self ) properties . update ( { 'is_bst' : _is_bst ( self ) , 'is_balanced' : _is_balanced ( self ) >= 0 } ) return properties | Return various properties of the binary tree . |
17,690 | def inorder ( self ) : node_stack = [ ] result = [ ] node = self while True : if node is not None : node_stack . append ( node ) node = node . left elif len ( node_stack ) > 0 : node = node_stack . pop ( ) result . append ( node ) node = node . right else : break return result | Return the nodes in the binary tree using in - order_ traversal . |
17,691 | def preorder ( self ) : node_stack = [ self ] result = [ ] while len ( node_stack ) > 0 : node = node_stack . pop ( ) result . append ( node ) if node . right is not None : node_stack . append ( node . right ) if node . left is not None : node_stack . append ( node . left ) return result | Return the nodes in the binary tree using pre - order_ traversal . |
17,692 | def postorder ( self ) : node_stack = [ ] result = [ ] node = self while True : while node is not None : if node . right is not None : node_stack . append ( node . right ) node_stack . append ( node ) node = node . left node = node_stack . pop ( ) if ( node . right is not None and len ( node_stack ) > 0 and node_stack [ - 1 ] is node . right ) : node_stack . pop ( ) node_stack . append ( node ) node = node . right else : result . append ( node ) node = None if len ( node_stack ) == 0 : break return result | Return the nodes in the binary tree using post - order_ traversal . |
17,693 | def levelorder ( self ) : current_nodes = [ self ] result = [ ] while len ( current_nodes ) > 0 : next_nodes = [ ] for node in current_nodes : result . append ( node ) if node . left is not None : next_nodes . append ( node . left ) if node . right is not None : next_nodes . append ( node . right ) current_nodes = next_nodes return result | Return the nodes in the binary tree using level - order_ traversal . |
17,694 | def invitation_backend ( backend = None , namespace = None ) : backend = backend or ORGS_INVITATION_BACKEND class_module , class_name = backend . rsplit ( "." , 1 ) mod = import_module ( class_module ) return getattr ( mod , class_name ) ( namespace = namespace ) | Returns a specified invitation backend |
17,695 | def registration_backend ( backend = None , namespace = None ) : backend = backend or ORGS_REGISTRATION_BACKEND class_module , class_name = backend . rsplit ( "." , 1 ) mod = import_module ( class_module ) return getattr ( mod , class_name ) ( namespace = namespace ) | Returns a specified registration backend |
17,696 | def org_registration_form ( org_model ) : class OrganizationRegistrationForm ( forms . ModelForm ) : email = forms . EmailField ( ) class Meta : model = org_model exclude = ( "is_active" , "users" ) def save ( self , * args , ** kwargs ) : self . instance . is_active = False super ( OrganizationRegistrationForm , self ) . save ( * args , ** kwargs ) return OrganizationRegistrationForm | Generates a registration ModelForm for the given organization model class |
17,697 | def save ( self , * args , ** kwargs ) : try : user = get_user_model ( ) . objects . get ( email__iexact = self . cleaned_data [ "email" ] ) except get_user_model ( ) . MultipleObjectsReturned : raise forms . ValidationError ( _ ( "This email address has been used multiple times." ) ) except get_user_model ( ) . DoesNotExist : user = invitation_backend ( ) . invite_by_email ( self . cleaned_data [ "email" ] , ** { "domain" : get_current_site ( self . request ) , "organization" : self . organization , "sender" : self . request . user , } ) invitation_backend ( ) . send_notification ( user , ** { "domain" : get_current_site ( self . request ) , "organization" : self . organization , "sender" : self . request . user , } ) return OrganizationUser . objects . create ( user = user , organization = self . organization , is_admin = self . cleaned_data [ "is_admin" ] , ) | The save method should create a new OrganizationUser linking the User matching the provided email address . If not matching User is found it should kick off the registration process . It needs to create a User in order to link it to the Organization . |
17,698 | def save ( self , ** kwargs ) : is_active = True try : user = get_user_model ( ) . objects . get ( email = self . cleaned_data [ "email" ] ) except get_user_model ( ) . DoesNotExist : user = invitation_backend ( ) . invite_by_email ( self . cleaned_data [ "email" ] , ** { "domain" : get_current_site ( self . request ) , "organization" : self . cleaned_data [ "name" ] , "sender" : self . request . user , "created" : True , } ) is_active = False return create_organization ( user , self . cleaned_data [ "name" ] , self . cleaned_data [ "slug" ] , is_active = is_active , ) | Create the organization then get the user then make the owner . |
17,699 | def invite_by_email ( self , email , user , organization , ** kwargs ) : try : invitee = self . user_model . objects . get ( email__iexact = email ) except self . user_model . DoesNotExist : invitee = None user_invitation = self . invitation_model . objects . create ( invitee = invitee , invitee_identifier = email . lower ( ) , invited_by = user , organization = organization , ) self . send_invitation ( user_invitation ) return user_invitation | Primary interface method by which one user invites another to join |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.