idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
28,000
def rank ( words : List [ str ] , exclude_stopwords : bool = False ) -> Counter : if not words : return None if exclude_stopwords : words = [ word for word in words if word not in _STOPWORDS ] return Counter ( words )
Sort words by frequency
28,001
def replace_rep_after ( text : str ) -> str : "Replace repetitions at the character level in `text` after the repetition" def _replace_rep ( m ) : c , cc = m . groups ( ) return f"{c}{TK_REP}{len(cc)+1}" re_rep = re . compile ( r"(\S)(\1{2,})" ) return re_rep . sub ( _replace_rep , text )
Replace repetitions at the character level in text after the repetition
28,002
def rm_brackets ( text : str ) -> str : "Remove all empty brackets from `t`." new_line = re . sub ( r"\(\)" , "" , text ) new_line = re . sub ( r"\{\}" , "" , new_line ) new_line = re . sub ( r"\[\]" , "" , new_line ) return new_line
Remove all empty brackets from t .
28,003
def _calc_validation_statistics ( validation_results ) : successful_expectations = sum ( exp [ "success" ] for exp in validation_results ) evaluated_expectations = len ( validation_results ) unsuccessful_expectations = evaluated_expectations - successful_expectations success = successful_expectations == evaluated_expec...
Calculate summary statistics for the validation results and return ExpectationStatistics .
28,004
def expectation ( cls , method_arg_names ) : def outer_wrapper ( func ) : @ wraps ( func ) def wrapper ( self , * args , ** kwargs ) : method_name = func . __name__ all_args = dict ( zip ( method_arg_names , args ) ) all_args . update ( kwargs ) if "include_config" in kwargs : include_config = kwargs [ "include_config"...
Manages configuration and running of expectation objects .
28,005
def _append_expectation ( self , expectation_config ) : expectation_type = expectation_config [ 'expectation_type' ] json . dumps ( expectation_config ) if 'column' in expectation_config [ 'kwargs' ] : column = expectation_config [ 'kwargs' ] [ 'column' ] self . _expectations_config . expectations = [ f for f in filter...
Appends an expectation to DataAsset . _expectations_config and drops existing expectations of the same type .
28,006
def _copy_and_clean_up_expectation ( self , expectation , discard_result_format_kwargs = True , discard_include_configs_kwargs = True , discard_catch_exceptions_kwargs = True , ) : new_expectation = copy . deepcopy ( expectation ) if "success_on_last_run" in new_expectation : del new_expectation [ "success_on_last_run"...
Returns copy of expectation without success_on_last_run and other specified key - value pairs removed
28,007
def _copy_and_clean_up_expectations_from_indexes ( self , match_indexes , discard_result_format_kwargs = True , discard_include_configs_kwargs = True , discard_catch_exceptions_kwargs = True , ) : rval = [ ] for i in match_indexes : rval . append ( self . _copy_and_clean_up_expectation ( self . _expectations_config . e...
Copies and cleans all expectations provided by their index in DataAsset . _expectations_config . expectations .
28,008
def get_expectations_config ( self , discard_failed_expectations = True , discard_result_format_kwargs = True , discard_include_configs_kwargs = True , discard_catch_exceptions_kwargs = True , suppress_warnings = False ) : config = dict ( self . _expectations_config ) config = copy . deepcopy ( config ) expectations = ...
Returns _expectation_config as a JSON object and perform some cleaning along the way .
28,009
def save_expectations_config ( self , filepath = None , discard_failed_expectations = True , discard_result_format_kwargs = True , discard_include_configs_kwargs = True , discard_catch_exceptions_kwargs = True , suppress_warnings = False ) : if filepath == None : pass expectations_config = self . get_expectations_confi...
Writes _expectation_config to a JSON file .
28,010
def validate ( self , expectations_config = None , evaluation_parameters = None , catch_exceptions = True , result_format = None , only_return_failures = False ) : results = [ ] if expectations_config is None : expectations_config = self . get_expectations_config ( discard_failed_expectations = False , discard_result_f...
Generates a JSON - formatted report describing the outcome of all expectations .
28,011
def get_evaluation_parameter ( self , parameter_name , default_value = None ) : if "evaluation_parameters" in self . _expectations_config and parameter_name in self . _expectations_config [ 'evaluation_parameters' ] : return self . _expectations_config [ 'evaluation_parameters' ] [ parameter_name ] else : return defaul...
Get an evaluation parameter value that has been stored in meta .
28,012
def set_evaluation_parameter ( self , parameter_name , parameter_value ) : if 'evaluation_parameters' not in self . _expectations_config : self . _expectations_config [ 'evaluation_parameters' ] = { } self . _expectations_config [ 'evaluation_parameters' ] . update ( { parameter_name : parameter_value } )
Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate parameterized expectations .
28,013
def _build_evaluation_parameters ( self , expectation_args , evaluation_parameters ) : evaluation_args = copy . deepcopy ( expectation_args ) for key , value in evaluation_args . items ( ) : if isinstance ( value , dict ) and '$PARAMETER' in value : if "$PARAMETER." + value [ "$PARAMETER" ] in value : evaluation_args [...
Build a dictionary of parameters to evaluate using the provided evaluation_paramters AND mutate expectation_args by removing any parameter values passed in as temporary values during exploratory work .
28,014
def _calc_map_expectation_success ( self , success_count , nonnull_count , mostly ) : if nonnull_count > 0 : percent_success = success_count / nonnull_count if mostly != None : success = bool ( percent_success >= mostly ) else : success = bool ( nonnull_count - success_count == 0 ) else : success = True percent_success...
Calculate success and percent_success for column_map_expectations
28,015
def validate ( parsed_args ) : parsed_args = vars ( parsed_args ) data_set = parsed_args [ 'dataset' ] expectations_config_file = parsed_args [ 'expectations_config_file' ] expectations_config = json . load ( open ( expectations_config_file ) ) if parsed_args [ "evaluation_parameters" ] is not None : evaluation_paramet...
Read a dataset file and validate it using a config saved in another file . Uses parameters defined in the dispatch method .
28,016
def categorical_partition_data ( data ) : series = pd . Series ( data ) value_counts = series . value_counts ( dropna = True ) null_indexes = series . isnull ( ) nonnull_count = ( null_indexes == False ) . sum ( ) weights = value_counts . values / nonnull_count return { "values" : value_counts . index . tolist ( ) , "w...
Convenience method for creating weights from categorical data .
28,017
def kde_partition_data ( data , estimate_tails = True ) : kde = stats . kde . gaussian_kde ( data ) evaluation_bins = np . linspace ( start = np . min ( data ) - ( kde . covariance_factor ( ) / 2 ) , stop = np . max ( data ) + ( kde . covariance_factor ( ) / 2 ) , num = np . floor ( ( ( np . max ( data ) - np . min ( d...
Convenience method for building a partition and weights using a gaussian Kernel Density Estimate and default bandwidth .
28,018
def continuous_partition_data ( data , bins = 'auto' , n_bins = 10 ) : if bins == 'uniform' : bins = np . linspace ( start = np . min ( data ) , stop = np . max ( data ) , num = n_bins + 1 ) elif bins == 'ntile' : bins = np . percentile ( data , np . linspace ( start = 0 , stop = 100 , num = n_bins + 1 ) ) elif bins !=...
Convenience method for building a partition object on continuous data
28,019
def infer_distribution_parameters ( data , distribution , params = None ) : if params is None : params = dict ( ) elif not isinstance ( params , dict ) : raise TypeError ( "params must be a dictionary object, see great_expectations documentation" ) if 'mean' not in params . keys ( ) : params [ 'mean' ] = data . mean ( ...
Convenience method for determining the shape parameters of a given distribution
28,020
def _scipy_distribution_positional_args_from_dict ( distribution , params ) : params [ 'loc' ] = params . get ( 'loc' , 0 ) if 'scale' not in params : params [ 'scale' ] = 1 if distribution == 'norm' : return params [ 'mean' ] , params [ 'std_dev' ] elif distribution == 'beta' : return params [ 'alpha' ] , params [ 'be...
Helper function that returns positional arguments for a scipy distribution using a dict of parameters .
28,021
def create_multiple_expectations ( df , columns , expectation_type , * args , ** kwargs ) : expectation = getattr ( df , expectation_type ) results = list ( ) for column in columns : results . append ( expectation ( column , * args , ** kwargs ) ) return results
Creates an identical expectation for each of the given columns with the specified arguments if any .
28,022
def columns_exist ( inspect_dataset ) : if not hasattr ( inspect_dataset , "columns" ) : warnings . warn ( "No columns list found in dataset; no autoinspection performed." ) return elif isinstance ( inspect_dataset . columns [ 0 ] , string_types ) : columns = inspect_dataset . columns elif isinstance ( inspect_dataset ...
This function will take a dataset and add expectations that each column present exists .
28,023
def column_reflection_fallback ( self ) : sql = sa . select ( [ sa . text ( "*" ) ] ) . select_from ( self . _table ) col_names = self . engine . execute ( sql ) . keys ( ) col_dict = [ { 'name' : col_name } for col_name in col_names ] return col_dict
If we can t reflect the table use a query to at least get column names .
28,024
def parse_result_format ( result_format ) : if isinstance ( result_format , string_types ) : result_format = { 'result_format' : result_format , 'partial_unexpected_count' : 20 } else : if 'partial_unexpected_count' not in result_format : result_format [ 'partial_unexpected_count' ] = 20 return result_format
This is a simple helper utility that can be used to parse a string result_format into the dict format used internally by great_expectations . It is not necessary but allows shorthand for result_format in cases where there is no need to specify a custom partial_unexpected_count .
28,025
def recursively_convert_to_json_serializable ( test_obj ) : try : if not isinstance ( test_obj , list ) and np . isnan ( test_obj ) : return None except TypeError : pass except ValueError : pass if isinstance ( test_obj , ( string_types , integer_types , float , bool ) ) : return test_obj elif isinstance ( test_obj , d...
Helper function to convert a dict object to one that is serializable
28,026
def read_excel ( filename , dataset_class = dataset . pandas_dataset . PandasDataset , expectations_config = None , autoinspect_func = None , * args , ** kwargs ) : df = pd . read_excel ( filename , * args , ** kwargs ) if isinstance ( df , dict ) : for key in df : df [ key ] = _convert_to_dataset_class ( df [ key ] , ...
Read a file using Pandas read_excel and return a great_expectations dataset .
28,027
def from_pandas ( pandas_df , dataset_class = dataset . pandas_dataset . PandasDataset , expectations_config = None , autoinspect_func = None ) : return _convert_to_dataset_class ( pandas_df , dataset_class , expectations_config , autoinspect_func )
Read a Pandas data frame and return a great_expectations dataset .
28,028
def file_lines_map_expectation ( cls , func ) : if PY3 : argspec = inspect . getfullargspec ( func ) [ 0 ] [ 1 : ] else : argspec = inspect . getargspec ( func ) [ 0 ] [ 1 : ] @ cls . expectation ( argspec ) @ wraps ( func ) def inner_wrapper ( self , skip = None , mostly = None , null_lines_regex = r"^\s*$" , result_f...
Constructs an expectation using file lines map semantics . The file_lines_map_expectations decorator handles boilerplate issues surrounding the common pattern of evaluating truthiness of some condition on an line by line basis in a file .
28,029
def expect_file_hash_to_equal ( self , value , hash_alg = 'md5' , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : success = False try : hash = hashlib . new ( hash_alg ) BLOCKSIZE = 65536 try : with open ( self . _path , 'rb' ) as file : file_buffer = file . read ( BLOCKSIZE ) ...
Expect computed file hash to equal some given value .
28,030
def expect_file_size_to_be_between ( self , minsize = 0 , maxsize = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : try : size = os . path . getsize ( self . _path ) except OSError : raise try : if not float ( minsize ) . is_integer ( ) : raise ValueError ( 'minsize must...
Expect file size to be between a user specified maxsize and minsize .
28,031
def expect_file_to_exist ( self , filepath = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : if filepath is not None and os . path . isfile ( filepath ) : success = True elif self . _path is not None and os . path . isfile ( self . _path ) : success = True else : success...
Checks to see if a file specified by the user actually exists
28,032
def expect_file_to_have_valid_table_header ( self , regex , skip = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : try : comp_regex = re . compile ( regex ) except : raise ValueError ( "Must enter valid regular expression for regex" ) success = False try : with open ( se...
Checks to see if a file has a line with unique delimited values such a line may be used as a table header .
28,033
def get_data_context ( context_type , options , * args , ** kwargs ) : if context_type == "SqlAlchemy" : return SqlAlchemyDataContext ( options , * args , ** kwargs ) elif context_type == "PandasCSV" : return PandasCSVDataContext ( options , * args , ** kwargs ) else : raise ValueError ( "Unknown data context." )
Return a data_context object which exposes options to list datasets and get a dataset from that context . This is a new API in Great Expectations 0 . 4 and is subject to rapid change .
28,034
def _initialize_expectations ( self , config = None , data_asset_name = None ) : super ( Dataset , self ) . _initialize_expectations ( config = config , data_asset_name = data_asset_name ) self . _expectations_config [ "data_asset_type" ] = "Dataset"
Override data_asset_type with Dataset
28,035
def expect_column_to_exist ( self , column , column_index = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect the specified column to exist .
28,036
def expect_table_row_count_to_be_between ( self , min_value = 0 , max_value = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect the number of rows to be between two values .
28,037
def expect_column_values_to_be_of_type ( self , column , type_ , mostly = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect each column entry to be a specified data type .
28,038
def expect_column_values_to_be_in_type_list ( self , column , type_list , mostly = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect each column entry to match a list of specified data types .
28,039
def expect_column_values_to_be_in_set ( self , column , value_set , mostly = None , parse_strings_as_datetimes = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect each column value to be in a given set .
28,040
def expect_column_values_to_be_decreasing ( self , column , strictly = None , parse_strings_as_datetimes = None , mostly = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect column values to be decreasing .
28,041
def expect_column_values_to_match_regex_list ( self , column , regex_list , match_on = "any" , mostly = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect the column entries to be strings that can be matched to either any of or all of a list of regular expressions . Matches can be anywhere in the string .
28,042
def expect_column_values_to_not_match_regex_list ( self , column , regex_list , mostly = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect the column entries to be strings that do not match any of a list of regular expressions . Matches can \ be anywhere in the string .
28,043
def expect_column_values_to_match_strftime_format ( self , column , strftime_format , mostly = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect column entries to be strings representing a date or time with a given format .
28,044
def expect_column_values_to_be_dateutil_parseable ( self , column , mostly = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect column entries to be parseable using dateutil .
28,045
def expect_column_values_to_match_json_schema ( self , column , json_schema , mostly = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect column entries to be JSON objects matching a given JSON schema .
28,046
def expect_column_most_common_value_to_be_in_set ( self , column , value_set , ties_okay = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect the most common value to be within the designated value set
28,047
def expect_column_min_to_be_between ( self , column , min_value = None , max_value = None , parse_strings_as_datetimes = None , output_strftime_format = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect the column to sum to be between an min and max value
28,048
def expect_column_pair_values_to_be_equal ( self , column_A , column_B , ignore_row_if = "both_values_are_missing" , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect the values in column A to be the same as column B .
28,049
def expect_column_pair_values_A_to_be_greater_than_B ( self , column_A , column_B , or_equal = None , parse_strings_as_datetimes = None , allow_cross_type_comparisons = None , ignore_row_if = "both_values_are_missing" , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotI...
Expect values in column A to be greater than column B .
28,050
def expect_column_pair_values_to_be_in_set ( self , column_A , column_B , value_pairs_set , ignore_row_if = "both_values_are_missing" , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect paired values from columns A and B to belong to a set of valid pairs .
28,051
def expect_multicolumn_values_to_be_unique ( self , column_list , ignore_row_if = "all_values_are_missing" , result_format = None , include_config = False , catch_exceptions = None , meta = None ) : raise NotImplementedError
Expect the values for each row to be unique across the columns listed .
28,052
def column_map_expectation ( cls , func ) : if PY3 : argspec = inspect . getfullargspec ( func ) [ 0 ] [ 1 : ] else : argspec = inspect . getargspec ( func ) [ 0 ] [ 1 : ] @ cls . expectation ( argspec ) @ wraps ( func ) def inner_wrapper ( self , column , mostly = None , result_format = None , * args , ** kwargs ) : i...
Constructs an expectation using column - map semantics .
28,053
def column_pair_map_expectation ( cls , func ) : if PY3 : argspec = inspect . getfullargspec ( func ) [ 0 ] [ 1 : ] else : argspec = inspect . getargspec ( func ) [ 0 ] [ 1 : ] @ cls . expectation ( argspec ) @ wraps ( func ) def inner_wrapper ( self , column_A , column_B , mostly = None , ignore_row_if = "both_values_...
The column_pair_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating truthiness of some condition on a per row basis across a pair of columns .
28,054
def multicolumn_map_expectation ( cls , func ) : if PY3 : argspec = inspect . getfullargspec ( func ) [ 0 ] [ 1 : ] else : argspec = inspect . getargspec ( func ) [ 0 ] [ 1 : ] @ cls . expectation ( argspec ) @ wraps ( func ) def inner_wrapper ( self , column_list , mostly = None , ignore_row_if = "all_values_are_missi...
The multicolumn_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating truthiness of some condition on a per row basis across a set of columns .
28,055
def dispatch ( command_class , argv = sys . argv , input_file = sys . stdin , output_file = sys . stdout , module_name = None ) : assert issubclass ( command_class , SearchCommand ) if module_name is None or module_name == '__main__' : command_class ( ) . process ( argv , input_file , output_file )
Instantiates and executes a search command class
28,056
def search_results_info ( self ) : if self . _search_results_info is not None : return self . _search_results_info if self . _protocol_version == 1 : try : path = self . _input_header [ 'infoPath' ] except KeyError : return None else : assert self . _protocol_version == 2 try : dispatch_dir = self . _metadata . searchi...
Returns the search results info for this command invocation .
28,057
def process ( self , argv = sys . argv , ifile = sys . stdin , ofile = sys . stdout ) : if len ( argv ) > 1 : self . _process_protocol_v1 ( argv , ifile , ofile ) else : self . _process_protocol_v2 ( argv , ifile , ofile )
Process data .
28,058
def _execute ( self , ifile , process ) : self . _record_writer . write_records ( process ( self . _records ( ifile ) ) ) self . finish ( )
Default processing loop
28,059
def parse ( argv , rules = None , config = None , ** kwargs ) : parser_ = parser ( rules , ** kwargs ) if config is not None : parser_ . loadrc ( config ) return parser_ . parse ( argv ) . result
Parse the given arg vector with the default Splunk command rules .
28,060
def parser ( rules = None , ** kwargs ) : rules = RULES_SPLUNK if rules is None else dict ( RULES_SPLUNK , ** rules ) return Parser ( rules , ** kwargs )
Instantiate a parser with the default Splunk command rules .
28,061
def cmdline ( argv , rules = None , config = None , ** kwargs ) : parser = Parser ( rules , ** kwargs ) if config is not None : parser . loadrc ( config ) return parser . parse ( argv ) . result
Simplified cmdopts interface that does not default any parsing rules and that does not allow compounding calls to the parser .
28,062
def init ( self , rules ) : for dest in rules . keys ( ) : rule = rules [ dest ] if 'default' in rule : self . result [ 'kwargs' ] [ dest ] = rule [ 'default' ] flags = rule [ 'flags' ] kwargs = { 'action' : rule . get ( 'action' , "store" ) } for key in [ 'callback' , 'help' , 'metavar' , 'type' ] : if key in rule : k...
Initialize the parser with the given command rules .
28,063
def loadif ( self , filepath ) : if path . isfile ( filepath ) : self . load ( filepath ) return self
Load the given filepath if it exists otherwise ignore .
28,064
def parse ( self , argv ) : kwargs , args = self . parse_args ( argv ) self . result [ 'args' ] += args for dest in self . dests : value = getattr ( kwargs , dest ) if value is not None : self . result [ 'kwargs' ] [ dest ] = value return self
Parse the given argument vector .
28,065
def feed_index ( service , opts ) : indexname = opts . args [ 0 ] itype = opts . kwargs [ 'ingest' ] try : index = service . indexes [ indexname ] except KeyError : print ( "Index %s not found" % indexname ) return if itype in [ "stream" , "submit" ] : stream = index . attach ( ) else : input_host = opts . kwargs . get...
Feed the named index in a specific manner .
28,066
def xml_compare ( expected , found ) : if expected == found : return True if set ( expected . items ( ) ) != set ( found . items ( ) ) : return False expected_children = list ( expected ) found_children = list ( found ) if len ( expected_children ) != len ( found_children ) : return False if not all ( [ xml_compare ( a...
Checks equality of two ElementTree objects .
28,067
def cmdline ( argv , flags ) : rules = dict ( [ ( flag , { 'flags' : [ "--%s" % flag ] } ) for flag in flags ] ) return parse ( argv , rules )
A cmdopts wrapper that takes a list of flags and builds the corresponding cmdopts rules to match those flags .
28,068
def output ( stream ) : while True : content = stream . read ( 1024 ) if len ( content ) == 0 : break sys . stdout . write ( content )
Write the contents of the given stream to stdout .
28,069
def create ( self , argv ) : opts = cmdline ( argv , FLAGS_CREATE ) if len ( opts . args ) != 1 : error ( "Command requires a search expression" , 2 ) query = opts . args [ 0 ] job = self . service . jobs . create ( opts . args [ 0 ] , ** opts . kwargs ) print ( job . sid )
Create a search job .
28,070
def events ( self , argv ) : opts = cmdline ( argv , FLAGS_EVENTS ) self . foreach ( opts . args , lambda job : output ( job . events ( ** opts . kwargs ) ) )
Retrieve events for the specified search jobs .
28,071
def foreach ( self , argv , func ) : if len ( argv ) == 0 : error ( "Command requires a search specifier." , 2 ) for item in argv : job = self . lookup ( item ) if job is None : error ( "Search job '%s' does not exist" % item , 2 ) func ( job )
Apply the function to each job specified in the argument vector .
28,072
def list ( self , argv ) : def read ( job ) : for key in sorted ( job . content . keys ( ) ) : if key in [ "performance" ] : continue print ( "%s: %s" % ( key , job . content [ key ] ) ) if len ( argv ) == 0 : index = 0 for job in self . service . jobs : print ( "@%d : %s" % ( index , job . sid ) ) index += 1 return se...
List all current search jobs if no jobs specified otherwise list the properties of the specified jobs .
28,073
def preview ( self , argv ) : opts = cmdline ( argv , FLAGS_RESULTS ) self . foreach ( opts . args , lambda job : output ( job . preview ( ** opts . kwargs ) ) )
Retrieve the preview for the specified search jobs .
28,074
def run ( self , argv ) : command = argv [ 0 ] handlers = { 'cancel' : self . cancel , 'create' : self . create , 'events' : self . events , 'finalize' : self . finalize , 'list' : self . list , 'pause' : self . pause , 'preview' : self . preview , 'results' : self . results , 'searchlog' : self . searchlog , 'summary'...
Dispatch the given command .
28,075
def searchlog ( self , argv ) : opts = cmdline ( argv , FLAGS_SEARCHLOG ) self . foreach ( opts . args , lambda job : output ( job . searchlog ( ** opts . kwargs ) ) )
Retrieve the searchlog for the specified search jobs .
28,076
def handler ( ca_file = None ) : def request ( url , message , ** kwargs ) : scheme , host , port , path = spliturl ( url ) if scheme != "https" : ValueError ( "unsupported scheme: %s" % scheme ) connection = HTTPSConnection ( host , port , ca_file ) try : body = message . get ( 'body' , "" ) headers = dict ( message ....
Returns an HTTP request handler configured with the given ca_file .
28,077
def list ( self , argv ) : def read ( index ) : print ( index . name ) for key in sorted ( index . content . keys ( ) ) : value = index . content [ key ] print ( " %s: %s" % ( key , value ) ) if len ( argv ) == 0 : for index in self . service . indexes : count = index [ 'totalEventCount' ] print ( "%s (%s)" % ( inde...
List available indexes if no names provided otherwise list the properties of the named indexes .
28,078
def foreach ( self , argv , func ) : opts = cmdline ( argv ) if len ( opts . args ) == 0 : error ( "Command requires an index name" , 2 ) for name in opts . args : if name not in self . service . indexes : error ( "Index '%s' does not exist" % name , 2 ) index = self . service . indexes [ name ] func ( index )
Apply the function to each index named in the argument vector .
28,079
def update ( self , argv ) : if len ( argv ) == 0 : error ( "Command requires an index name" , 2 ) name = argv [ 0 ] if name not in self . service . indexes : error ( "Index '%s' does not exist" % name , 2 ) index = self . service . indexes [ name ] fields = self . service . indexes . itemmeta ( ) . fields . optional r...
Update an index according to the given argument vector .
28,080
def add_to_document ( self , parent ) : arg = ET . SubElement ( parent , "arg" ) arg . set ( "name" , self . name ) if self . title is not None : ET . SubElement ( arg , "title" ) . text = self . title if self . description is not None : ET . SubElement ( arg , "description" ) . text = self . description if self . vali...
Adds an Argument object to this ElementTree document .
28,081
def get_event_start ( event_buffer , event_format ) : if event_format == "csv" : return get_csv_event_start ( event_buffer ) elif event_format == "xml" : return get_xml_event_start ( event_buffer ) else : return get_json_event_start ( event_buffer )
dispatch event start method based on event format type
28,082
def recover ( options ) : event_format = options . kwargs [ 'omode' ] buffer_size = 64 * 1024 fpd = open ( options . kwargs [ 'output' ] , "r+" ) fpd . seek ( 0 , 2 ) fptr = max ( fpd . tell ( ) - buffer_size , 0 ) fptr_eof = 0 while ( fptr > 0 ) : fpd . seek ( fptr ) event_buffer = fpd . read ( buffer_size ) ( event_s...
recover from an existing export run . We do this by finding the last time change between events truncate the file and restart from there
28,083
def cleanup_tail ( options ) : if options . kwargs [ 'omode' ] == "csv" : options . kwargs [ 'fd' ] . write ( "\n" ) elif options . kwargs [ 'omode' ] == "xml" : options . kwargs [ 'fd' ] . write ( "\n</results>\n" ) else : options . kwargs [ 'fd' ] . write ( "\n]\n" )
cleanup the tail of a recovery
28,084
def to_xml ( self ) : root = ET . Element ( "scheme" ) ET . SubElement ( root , "title" ) . text = self . title if self . description is not None : ET . SubElement ( root , "description" ) . text = self . description subelements = [ ( "use_external_validation" , self . use_external_validation ) , ( "use_single_instance...
Creates an ET . Element representing self then returns it .
28,085
def write_xml_document ( self , document ) : self . _out . write ( ET . tostring ( document ) ) self . _out . flush ( )
Writes a string representation of an ElementTree object to the output stream .
28,086
def create ( self , opts ) : argv = opts . args count = len ( argv ) cpres = True if count > 0 else False spres = True if count > 1 else False kpres = True if count > 2 else False if kpres : kvpair = argv [ 2 ] . split ( "=" ) if len ( kvpair ) != 2 : error ( "Creating a k/v pair requires key and value" , 2 ) else : ke...
Create a conf stanza .
28,087
def delete ( self , opts ) : argv = opts . args count = len ( argv ) cpres = True if count > 0 else False spres = True if count > 1 else False kpres = True if count > 2 else False if not cpres : error ( "Conf name is required for delete" , 2 ) if not cpres and not spres : error ( "Conf name and stanza name is required ...
Delete a conf stanza .
28,088
def list ( self , opts ) : argv = opts . args count = len ( argv ) cpres = True if count > 0 else False spres = True if count > 1 else False kpres = True if count > 2 else False if not cpres : for conf in self . service . confs : print ( conf . name ) else : name = argv [ 0 ] conf = self . service . confs [ name ] for ...
List all confs or if a conf is given all the stanzas in it .
28,089
def tob ( data , enc = 'utf8' ) : return data . encode ( enc ) if isinstance ( data , six . text_type ) else bytes ( data )
Convert anything to bytes
28,090
def make_default_app_wrapper ( name ) : @ functools . wraps ( getattr ( Bottle , name ) ) def wrapper ( * a , ** ka ) : return getattr ( app ( ) , name ) ( * a , ** ka ) return wrapper
Return a callable that relays calls to the current default app .
28,091
def load_app ( target ) : tmp = app . push ( ) rv = _load ( target ) app . remove ( tmp ) return rv if isinstance ( rv , Bottle ) else tmp
Load a bottle application based on a target string and return the application object .
28,092
def run ( app = None , server = 'wsgiref' , host = '127.0.0.1' , port = 8080 , interval = 1 , reloader = False , quiet = False , ** kargs ) : app = app or default_app ( ) if isinstance ( app , six . string_types ) : app = load_app ( app ) if isinstance ( server , six . string_types ) : server = server_names . get ( ser...
Start a server instance . This method blocks until the server terminates .
28,093
def build ( self , _name , * anon , ** args ) : if _name not in self . named : raise RouteBuildError ( "No route with that name." , _name ) rule , pairs = self . named [ _name ] if not pairs : token = self . syntax . split ( rule ) parts = [ p . replace ( '\\:' , ':' ) for p in token [ : : 3 ] ] names = token [ 1 : : 3...
Return a string that matches a named route . Use keyword arguments to fill out named wildcards . Remaining arguments are appended as a query string . Raises RouteBuildError or KeyError .
28,094
def _match_path ( self , environ ) : path = environ [ 'PATH_INFO' ] or '/' match = self . static . get ( path ) if match : return match , { } for combined , rules in self . dynamic : match = combined . match ( path ) if not match : continue gpat , match = rules [ match . lastindex - 1 ] return match , gpat . match ( pa...
Optimized PATH_INFO matcher .
28,095
def _compile ( self ) : self . static = { } self . dynamic = [ ] def fpat_sub ( m ) : return m . group ( 0 ) if len ( m . group ( 1 ) ) % 2 else m . group ( 1 ) + '(?:' for rule in self . rules : target = self . routes [ rule ] if not self . syntax . search ( rule ) : self . static [ rule . replace ( '\\:' , ':' ) ] = ...
Prepare static and dynamic search structures .
28,096
def _compile_pattern ( self , rule ) : out = '' for i , part in enumerate ( self . syntax . split ( rule ) ) : if i % 3 == 0 : out += re . escape ( part . replace ( '\\:' , ':' ) ) elif i % 3 == 1 : out += '(?P<%s>' % part if part else '(?:' else : out += '%s)' % ( part or '[^/]+' ) return re . compile ( '^%s$' % out )
Return a regular expression with named groups for each wildcard .
28,097
def mount ( self , app , prefix , ** options ) : if not isinstance ( app , Bottle ) : raise TypeError ( 'Only Bottle instances are supported for now.' ) prefix = '/' . join ( [ _f for _f in prefix . split ( '/' ) if _f ] ) if not prefix : raise TypeError ( 'Empty prefix. Perhaps you want a merge()?' ) for other in self...
Mount an application to a specific URL prefix . The prefix is added to SCIPT_PATH and removed from PATH_INFO before the sub - application is called .
28,098
def uninstall ( self , plugin ) : removed , remove = [ ] , plugin for i , plugin in list ( enumerate ( self . plugins ) ) [ : : - 1 ] : if remove is True or remove is plugin or remove is type ( plugin ) or getattr ( plugin , 'name' , True ) == remove : removed . append ( plugin ) del self . plugins [ i ] if hasattr ( p...
Uninstall plugins . Pass an instance to remove a specific plugin . Pass a type object to remove all plugins that match that type . Subclasses are not removed . Pass a string to remove all plugins with a matching name attribute . Pass True to remove all plugins . The list of affected plugins is returned .
28,099
def close ( self ) : for plugin in self . plugins : if hasattr ( plugin , 'close' ) : plugin . close ( ) self . stopped = True
Close the application and all installed plugins .