idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
237,000
def _get_table ( name ) : # If name is a variable referencing a table, use that. item = google . datalab . utils . commands . get_notebook_item ( name ) if isinstance ( item , bigquery . Table ) : return item # Else treat this as a BQ table name and return the (cached) table if it exists. try : return _existing_table_cache [ name ] except KeyError : table = bigquery . Table ( name ) if table . exists ( ) : _existing_table_cache [ name ] = table return table return None
Given a variable or table name get a Table if it exists .
127
13
237,001
def _render_list ( data ) : return IPython . core . display . HTML ( google . datalab . utils . commands . HtmlBuilder . render_list ( data ) )
Helper to render a list of objects as an HTML list object .
41
13
237,002
def _dataset_line ( args ) : if args [ 'command' ] == 'list' : filter_ = args [ 'filter' ] if args [ 'filter' ] else '*' context = google . datalab . Context . default ( ) if args [ 'project' ] : context = google . datalab . Context ( args [ 'project' ] , context . credentials ) return _render_list ( [ str ( dataset ) for dataset in bigquery . Datasets ( context ) if fnmatch . fnmatch ( str ( dataset ) , filter_ ) ] ) elif args [ 'command' ] == 'create' : try : bigquery . Dataset ( args [ 'name' ] ) . create ( friendly_name = args [ 'friendly' ] ) except Exception as e : print ( 'Failed to create dataset %s: %s' % ( args [ 'name' ] , e ) ) elif args [ 'command' ] == 'delete' : try : bigquery . Dataset ( args [ 'name' ] ) . delete ( ) except Exception as e : print ( 'Failed to delete dataset %s: %s' % ( args [ 'name' ] , e ) )
Implements the BigQuery dataset magic subcommand used to operate on datasets
263
15
237,003
def _table_cell ( args , cell_body ) : if args [ 'command' ] == 'list' : filter_ = args [ 'filter' ] if args [ 'filter' ] else '*' if args [ 'dataset' ] : if args [ 'project' ] is None : datasets = [ bigquery . Dataset ( args [ 'dataset' ] ) ] else : context = google . datalab . Context ( args [ 'project' ] , google . datalab . Context . default ( ) . credentials ) datasets = [ bigquery . Dataset ( args [ 'dataset' ] , context ) ] else : default_context = google . datalab . Context . default ( ) context = google . datalab . Context ( default_context . project_id , default_context . credentials ) if args [ 'project' ] : context . set_project_id ( args [ 'project' ] ) datasets = bigquery . Datasets ( context ) tables = [ ] for dataset in datasets : tables . extend ( [ table . full_name for table in dataset if fnmatch . fnmatch ( table . full_name , filter_ ) ] ) return _render_list ( tables ) elif args [ 'command' ] == 'create' : if cell_body is None : print ( 'Failed to create %s: no schema specified' % args [ 'name' ] ) else : try : record = google . datalab . utils . commands . parse_config ( cell_body , google . datalab . utils . commands . notebook_environment ( ) , as_dict = False ) jsonschema . validate ( record , BigQuerySchema . TABLE_SCHEMA_SCHEMA ) schema = bigquery . Schema ( record [ 'schema' ] ) bigquery . Table ( args [ 'name' ] ) . create ( schema = schema , overwrite = args [ 'overwrite' ] ) except Exception as e : print ( 'Failed to create table %s: %s' % ( args [ 'name' ] , e ) ) elif args [ 'command' ] == 'describe' : name = args [ 'name' ] table = _get_table ( name ) if not table : raise Exception ( 'Could not find table %s' % name ) html = _repr_html_table_schema ( table . schema ) return IPython . core . display . HTML ( html ) elif args [ 'command' ] == 'delete' : try : bigquery . Table ( args [ 'name' ] ) . delete ( ) except Exception as e : print ( 'Failed to delete table %s: %s' % ( args [ 'name' ] , e ) ) elif args [ 'command' ] == 'view' : name = args [ 'name' ] table = _get_table ( name ) if not table : raise Exception ( 'Could not find table %s' % name ) return table
Implements the BigQuery table magic subcommand used to operate on tables
644
15
237,004
def _extract_cell ( args , cell_body ) : env = google . datalab . utils . commands . notebook_environment ( ) config = google . datalab . utils . commands . parse_config ( cell_body , env , False ) or { } parameters = config . get ( 'parameters' ) if args [ 'table' ] : table = google . datalab . bigquery . Query . resolve_parameters ( args [ 'table' ] , parameters ) source = _get_table ( table ) if not source : raise Exception ( 'Could not find table %s' % table ) csv_delimiter = args [ 'delimiter' ] if args [ 'format' ] == 'csv' else None path = google . datalab . bigquery . Query . resolve_parameters ( args [ 'path' ] , parameters ) job = source . extract ( path , format = args [ 'format' ] , csv_delimiter = csv_delimiter , csv_header = args [ 'header' ] , compress = args [ 'compress' ] ) elif args [ 'query' ] or args [ 'view' ] : source_name = args [ 'view' ] or args [ 'query' ] source = google . datalab . utils . commands . get_notebook_item ( source_name ) if not source : raise Exception ( 'Could not find ' + ( 'view ' + args [ 'view' ] if args [ 'view' ] else 'query ' + args [ 'query' ] ) ) query = source if args [ 'query' ] else bigquery . Query . from_view ( source ) query_params = get_query_parameters ( args , cell_body ) if args [ 'query' ] else None output_options = QueryOutput . file ( path = args [ 'path' ] , format = args [ 'format' ] , csv_delimiter = args [ 'delimiter' ] , csv_header = args [ 'header' ] , compress = args [ 'compress' ] , use_cache = not args [ 'nocache' ] ) context = google . datalab . utils . _utils . _construct_context_for_args ( args ) job = query . execute ( output_options , context = context , query_params = query_params ) else : raise Exception ( 'A query, table, or view is needed to extract' ) if job . failed : raise Exception ( 'Extract failed: %s' % str ( job . fatal_error ) ) elif job . errors : raise Exception ( 'Extract completed with errors: %s' % str ( job . errors ) ) return job . result ( )
Implements the BigQuery extract magic used to extract query or table data to GCS .
594
19
237,005
def bq ( line , cell = None ) : return google . datalab . utils . commands . handle_magic_line ( line , cell , _bigquery_parser )
Implements the bq cell magic for ipython notebooks .
39
13
237,006
def _build_js ( inputs , outputs , name , implementation , support_code ) : # Construct a comma-separated list of input field names # For example, field1,field2,... input_fields = json . dumps ( [ f [ 0 ] for f in inputs ] ) # Construct a json representation of the output schema # For example, [{'name':'field1','type':'string'},...] output_fields = [ { 'name' : f [ 0 ] , 'type' : f [ 1 ] } for f in outputs ] output_fields = json . dumps ( output_fields , sort_keys = True ) # Build the JS from the individual bits with proper escaping of the implementation if support_code is None : support_code = '' return ( '{code}\n{name}={implementation};\nbigquery.defineFunction(\'{name}\', {inputs}, ' '{outputs}, {name});' ) . format ( code = support_code , name = name , implementation = implementation , inputs = str ( input_fields ) , outputs = str ( output_fields ) )
Creates a BigQuery SQL UDF javascript object .
239
11
237,007
def sampling_query ( sql , fields = None , count = 5 , sampling = None ) : if sampling is None : sampling = Sampling . default ( count = count , fields = fields ) return sampling ( sql )
Returns a sampling query for the SQL object .
45
9
237,008
def _remove_nonascii ( self , df ) : df_copy = df . copy ( deep = True ) for col in df_copy . columns : if ( df_copy [ col ] . dtype == np . dtype ( 'O' ) ) : df_copy [ col ] = df [ col ] . apply ( lambda x : re . sub ( r'[^\x00-\x7f]' , r'' , x ) if isinstance ( x , six . string_types ) else x ) return df_copy
Make copy and remove non - ascii characters from it .
117
13
237,009
def plot ( self , data ) : import IPython if not isinstance ( data , dict ) or not all ( isinstance ( v , pd . DataFrame ) for v in data . values ( ) ) : raise ValueError ( 'Expect a dictionary where the values are all dataframes.' ) gfsg = GenericFeatureStatisticsGenerator ( ) data = [ { 'name' : k , 'table' : self . _remove_nonascii ( v ) } for k , v in six . iteritems ( data ) ] data_proto = gfsg . ProtoFromDataFrames ( data ) protostr = base64 . b64encode ( data_proto . SerializeToString ( ) ) . decode ( "utf-8" ) html_id = 'f' + datalab . utils . commands . Html . next_id ( ) HTML_TEMPLATE = """<link rel="import" href="/nbextensions/gcpdatalab/extern/facets-jupyter.html" > <facets-overview id="{html_id}"></facets-overview> <script> document.querySelector("#{html_id}").protoInput = "{protostr}"; </script>""" html = HTML_TEMPLATE . format ( html_id = html_id , protostr = protostr ) return IPython . core . display . HTML ( html )
Plots an overview in a list of dataframes
318
10
237,010
def plot ( self , data , height = 1000 , render_large_data = False ) : import IPython if not isinstance ( data , pd . DataFrame ) : raise ValueError ( 'Expect a DataFrame.' ) if ( len ( data ) > 10000 and not render_large_data ) : raise ValueError ( 'Facets dive may not work well with more than 10000 rows. ' + 'Reduce data or set "render_large_data" to True.' ) jsonstr = data . to_json ( orient = 'records' ) html_id = 'f' + datalab . utils . commands . Html . next_id ( ) HTML_TEMPLATE = """ <link rel="import" href="/nbextensions/gcpdatalab/extern/facets-jupyter.html"> <facets-dive id="{html_id}" height="{height}"></facets-dive> <script> var data = {jsonstr}; document.querySelector("#{html_id}").data = data; </script>""" html = HTML_TEMPLATE . format ( html_id = html_id , jsonstr = jsonstr , height = height ) return IPython . core . display . HTML ( html )
Plots a detail view of data .
281
8
237,011
def DtypeToType ( self , dtype ) : if dtype . char in np . typecodes [ 'AllFloat' ] : return self . fs_proto . FLOAT elif ( dtype . char in np . typecodes [ 'AllInteger' ] or dtype == np . bool or np . issubdtype ( dtype , np . datetime64 ) or np . issubdtype ( dtype , np . timedelta64 ) ) : return self . fs_proto . INT else : return self . fs_proto . STRING
Converts a Numpy dtype to the FeatureNameStatistics . Type proto enum .
122
17
237,012
def NdarrayToEntry ( self , x ) : row_counts = [ ] for row in x : try : rc = np . count_nonzero ( ~ np . isnan ( row ) ) if rc != 0 : row_counts . append ( rc ) except TypeError : try : row_counts . append ( row . size ) except AttributeError : row_counts . append ( 1 ) data_type = self . DtypeToType ( x . dtype ) converter = self . DtypeToNumberConverter ( x . dtype ) flattened = x . ravel ( ) orig_size = len ( flattened ) # Remove all None and nan values and count how many were removed. flattened = flattened [ flattened != np . array ( None ) ] if converter : flattened = converter ( flattened ) if data_type == self . fs_proto . STRING : flattened_temp = [ ] for x in flattened : try : if str ( x ) != 'nan' : flattened_temp . append ( x ) except UnicodeEncodeError : if x . encode ( 'utf-8' ) != 'nan' : flattened_temp . append ( x ) flattened = flattened_temp else : flattened = flattened [ ~ np . isnan ( flattened ) ] . tolist ( ) missing = orig_size - len ( flattened ) return { 'vals' : flattened , 'counts' : row_counts , 'missing' : missing , 'type' : data_type }
Converts an ndarray to the Entry format .
317
11
237,013
def serving_from_csv_input ( train_config , args , keep_target ) : examples = tf . placeholder ( dtype = tf . string , shape = ( None , ) , name = 'csv_input_string' ) features = parse_example_tensor ( examples = examples , train_config = train_config , keep_target = keep_target ) if keep_target : target = features . pop ( train_config [ 'target_column' ] ) else : target = None features , target = preprocess_input ( features = features , target = target , train_config = train_config , preprocess_output_dir = args . preprocess_output_dir , model_type = args . model_type ) return input_fn_utils . InputFnOps ( features , target , { 'csv_line' : examples } )
Read the input features from a placeholder csv string tensor .
183
13
237,014
def parse_example_tensor ( examples , train_config , keep_target ) : csv_header = [ ] if keep_target : csv_header = train_config [ 'csv_header' ] else : csv_header = [ name for name in train_config [ 'csv_header' ] if name != train_config [ 'target_column' ] ] # record_defaults are used by tf.decode_csv to insert defaults, and to infer # the datatype. record_defaults = [ [ train_config [ 'csv_defaults' ] [ name ] ] for name in csv_header ] tensors = tf . decode_csv ( examples , record_defaults , name = 'csv_to_tensors' ) # I'm not really sure why expand_dims needs to be called. If using regression # models, it errors without it. tensors = [ tf . expand_dims ( x , axis = 1 ) for x in tensors ] tensor_dict = dict ( zip ( csv_header , tensors ) ) return tensor_dict
Read the csv files .
240
6
237,015
def get_estimator ( output_dir , train_config , args ) : # Check the requested mode fits the preprocessed data. target_name = train_config [ 'target_column' ] if is_classification_model ( args . model_type ) and target_name not in train_config [ 'categorical_columns' ] : raise ValueError ( 'When using a classification model, the target must be a ' 'categorical variable.' ) if is_regression_model ( args . model_type ) and target_name not in train_config [ 'numerical_columns' ] : raise ValueError ( 'When using a regression model, the target must be a ' 'numerical variable.' ) # Check layers used for dnn models. if is_dnn_model ( args . model_type ) and not args . layer_sizes : raise ValueError ( '--layer-size* must be used with DNN models' ) if is_linear_model ( args . model_type ) and args . layer_sizes : raise ValueError ( '--layer-size* cannot be used with linear models' ) # Build tf.learn features feature_columns = _tflearn_features ( train_config , args ) # Set how often to run checkpointing in terms of time. config = tf . contrib . learn . RunConfig ( save_checkpoints_secs = args . save_checkpoints_secs ) train_dir = os . path . join ( output_dir , 'train' ) if args . model_type == 'dnn_regression' : estimator = tf . contrib . learn . DNNRegressor ( feature_columns = feature_columns , hidden_units = args . layer_sizes , config = config , model_dir = train_dir , optimizer = tf . train . AdamOptimizer ( args . learning_rate , epsilon = args . epsilon ) ) elif args . model_type == 'linear_regression' : estimator = tf . contrib . learn . LinearRegressor ( feature_columns = feature_columns , config = config , model_dir = train_dir , optimizer = tf . train . AdamOptimizer ( args . learning_rate , epsilon = args . epsilon ) ) elif args . model_type == 'dnn_classification' : estimator = tf . contrib . learn . DNNClassifier ( feature_columns = feature_columns , hidden_units = args . layer_sizes , n_classes = train_config [ 'vocab_stats' ] [ target_name ] [ 'n_classes' ] , config = config , model_dir = train_dir , optimizer = tf . train . AdamOptimizer ( args . learning_rate , epsilon = args . epsilon ) ) elif args . model_type == 'linear_classification' : estimator = tf . contrib . learn . LinearClassifier ( feature_columns = feature_columns , n_classes = train_config [ 'vocab_stats' ] [ target_name ] [ 'n_classes' ] , config = config , model_dir = train_dir , optimizer = tf . train . AdamOptimizer ( args . learning_rate , epsilon = args . epsilon ) ) else : raise ValueError ( 'bad --model-type value' ) return estimator
Returns a tf learn estimator .
753
7
237,016
def _scale_tensor ( tensor , range_min , range_max , scale_min , scale_max ) : if range_min == range_max : return tensor float_tensor = tf . to_float ( tensor ) scaled_tensor = tf . divide ( ( tf . subtract ( float_tensor , range_min ) * tf . constant ( float ( scale_max - scale_min ) ) ) , tf . constant ( float ( range_max - range_min ) ) ) shifted_tensor = scaled_tensor + tf . constant ( float ( scale_min ) ) return shifted_tensor
Scale a tensor to scale_min to scale_max .
136
13
237,017
def _tflearn_features ( train_config , args ) : feature_columns = [ ] target_name = train_config [ 'target_column' ] key_name = train_config [ 'key_column' ] for name in train_config [ 'numerical_columns' ] : if name != target_name and name != key_name : feature_columns . append ( tf . contrib . layers . real_valued_column ( name , dimension = 1 ) ) # Supported transforms: # for DNN # 1) string -> make int -> embedding (embedding) # 2) string -> make int -> one_hot (one_hot, default) # for linear # 1) string -> sparse_column_with_hash_bucket (embedding) # 2) string -> make int -> sparse_column_with_integerized_feature (one_hot, default) # It is unfortunate that tf.layers has different feature transforms if the # model is linear or DNN. This pacakge should not expose to the user that # we are using tf.layers. It is crazy that DNN models support more feature # types (like string -> hash sparse column -> embedding) for name in train_config [ 'categorical_columns' ] : if name != target_name and name != key_name : transform_config = train_config [ 'transforms' ] . get ( name , { } ) transform_name = transform_config . get ( 'transform' , None ) if is_dnn_model ( args . model_type ) : if transform_name == 'embedding' : sparse = tf . contrib . layers . sparse_column_with_integerized_feature ( name , bucket_size = train_config [ 'vocab_stats' ] [ name ] [ 'n_classes' ] ) learn_feature = tf . contrib . layers . embedding_column ( sparse , dimension = transform_config [ 'embedding_dim' ] ) elif transform_name == 'one_hot' or transform_name is None : sparse = tf . contrib . layers . sparse_column_with_integerized_feature ( name , bucket_size = train_config [ 'vocab_stats' ] [ name ] [ 'n_classes' ] ) learn_feature = tf . contrib . layers . one_hot_column ( sparse ) else : raise ValueError ( ( 'Unknown transform name. Only \'embedding\' ' 'and \'one_hot\' transforms are supported. Got %s' ) % transform_name ) elif is_linear_model ( args . model_type ) : if transform_name == 'one_hot' or transform_name is None : learn_feature = tf . contrib . layers . sparse_column_with_integerized_feature ( name , bucket_size = train_config [ 'vocab_stats' ] [ name ] [ 'n_classes' ] ) elif transform_name == 'embedding' : learn_feature = tf . contrib . layers . sparse_column_with_hash_bucket ( name , hash_bucket_size = transform_config [ 'embedding_dim' ] ) else : raise ValueError ( ( 'Unknown transform name. Only \'embedding\' ' 'and \'one_hot\' transforms are supported. Got %s' ) % transform_name ) # Save the feature feature_columns . append ( learn_feature ) return feature_columns
Builds the tf . learn feature list .
750
9
237,018
def get_vocabulary ( preprocess_output_dir , name ) : vocab_file = os . path . join ( preprocess_output_dir , CATEGORICAL_ANALYSIS % name ) if not file_io . file_exists ( vocab_file ) : raise ValueError ( 'File %s not found in %s' % ( CATEGORICAL_ANALYSIS % name , preprocess_output_dir ) ) labels = python_portable_string ( file_io . read_file_to_string ( vocab_file ) ) . split ( '\n' ) label_values = [ x for x in labels if x ] # remove empty lines return label_values
Loads the vocabulary file as a list of strings .
157
11
237,019
def validate_metadata ( train_config ) : # Make sure we have a default for every column if len ( train_config [ 'csv_header' ] ) != len ( train_config [ 'csv_defaults' ] ) : raise ValueError ( 'Unequal number of columns in input features file and ' 'schema file.' ) # Check there are no missing columns. sorted_colums has two copies of the # target column because the target column is also listed in # categorical_columns or numerical_columns. sorted_columns = sorted ( train_config [ 'csv_header' ] + [ train_config [ 'target_column' ] ] ) sorted_columns2 = sorted ( train_config [ 'categorical_columns' ] + train_config [ 'numerical_columns' ] + [ train_config [ 'key_column' ] ] + [ train_config [ 'target_column' ] ] ) if sorted_columns2 != sorted_columns : raise ValueError ( 'Each csv header must be a numerical/categorical type, a ' ' key, or a target.' )
Perform some checks that the trainig config is correct .
247
12
237,020
def get_default_id ( credentials = None ) : project_id = _utils . get_project_id ( ) if project_id is None : projects , _ = Projects ( credentials ) . _retrieve_projects ( None , 2 ) if len ( projects ) == 1 : project_id = projects [ 0 ] . id return project_id
Get default project id .
74
5
237,021
def init_app ( state ) : app = state . app app . config . setdefault ( 'SPLIT_ALLOW_MULTIPLE_EXPERIMENTS' , False ) app . config . setdefault ( 'SPLIT_DB_FAILOVER' , False ) app . config . setdefault ( 'SPLIT_IGNORE_IP_ADDRESSES' , [ ] ) app . config . setdefault ( 'SPLIT_ROBOT_REGEX' , r""" (?i)\b( Baidu| Gigabot| Googlebot| libwww-perl| lwp-trivial| msnbot| SiteUptime| Slurp| WordPress| ZIBB| ZyBorg )\b """ ) app . jinja_env . globals . update ( { 'ab_test' : ab_test , 'finished' : finished } ) @ app . template_filter ( ) def percentage ( number ) : number *= 100 if abs ( number ) < 10 : return "%.1f%%" % round ( number , 1 ) else : return "%d%%" % round ( number )
Prepare the Flask application for Flask - Split .
250
10
237,022
def finished ( experiment_name , reset = True ) : if _exclude_visitor ( ) : return redis = _get_redis_connection ( ) try : experiment = Experiment . find ( redis , experiment_name ) if not experiment : return alternative_name = _get_session ( ) . get ( experiment . key ) if alternative_name : split_finished = set ( session . get ( 'split_finished' , [ ] ) ) if experiment . key not in split_finished : alternative = Alternative ( redis , alternative_name , experiment_name ) alternative . increment_completion ( ) if reset : _get_session ( ) . pop ( experiment . key , None ) try : split_finished . remove ( experiment . key ) except KeyError : pass else : split_finished . add ( experiment . key ) session [ 'split_finished' ] = list ( split_finished ) except ConnectionError : if not current_app . config [ 'SPLIT_DB_FAILOVER' ] : raise
Track a conversion .
218
4
237,023
def _is_robot ( ) : robot_regex = current_app . config [ 'SPLIT_ROBOT_REGEX' ] user_agent = request . headers . get ( 'User-Agent' , '' ) return re . search ( robot_regex , user_agent , flags = re . VERBOSE )
Return True if the current visitor is a robot or spider or False otherwise .
73
15
237,024
def start_time ( self ) : t = self . redis . hget ( 'experiment_start_times' , self . name ) if t : return datetime . strptime ( t , '%Y-%m-%dT%H:%M:%S' )
The start time of this experiment .
64
7
237,025
def reset ( self ) : for alternative in self . alternatives : alternative . reset ( ) self . reset_winner ( ) self . increment_version ( )
Delete all data for this experiment .
32
7
237,026
def delete ( self ) : for alternative in self . alternatives : alternative . delete ( ) self . reset_winner ( ) self . redis . srem ( 'experiments' , self . name ) self . redis . delete ( self . name ) self . increment_version ( )
Delete this experiment and all its data .
60
8
237,027
def _get_redis_connection ( ) : url = current_app . config . get ( 'REDIS_URL' , 'redis://localhost:6379' ) return redis . from_url ( url , decode_responses = True )
Return a Redis connection based on the Flask application s configuration .
55
13
237,028
def set_experiment_winner ( experiment ) : redis = _get_redis_connection ( ) experiment = Experiment . find ( redis , experiment ) if experiment : alternative_name = request . form . get ( 'alternative' ) alternative = Alternative ( redis , alternative_name , experiment . name ) if alternative . name in experiment . alternative_names : experiment . winner = alternative . name return redirect ( url_for ( '.index' ) )
Mark an alternative as the winner of the experiment .
97
10
237,029
def reset_experiment ( experiment ) : redis = _get_redis_connection ( ) experiment = Experiment . find ( redis , experiment ) if experiment : experiment . reset ( ) return redirect ( url_for ( '.index' ) )
Delete all data for an experiment .
52
7
237,030
def delete_experiment ( experiment ) : redis = _get_redis_connection ( ) experiment = Experiment . find ( redis , experiment ) if experiment : experiment . delete ( ) return redirect ( url_for ( '.index' ) )
Delete an experiment and all its data .
52
8
237,031
def _get_ipaddress ( node ) : if "ipaddress" not in node : with settings ( hide ( 'stdout' ) , warn_only = True ) : output = sudo ( 'ohai -l warn ipaddress' ) if output . succeeded : try : node [ 'ipaddress' ] = json . loads ( output ) [ 0 ] except ValueError : abort ( "Could not parse ohai's output for ipaddress" ":\n {0}" . format ( output ) ) return True return False
Adds the ipaddress attribute to the given node object if not already present and it is correctly given by ohai Returns True if ipaddress is added False otherwise
109
31
237,032
def sync_node ( node ) : if node . get ( 'dummy' ) or 'dummy' in node . get ( 'tags' , [ ] ) : lib . print_header ( "Skipping dummy: {0}" . format ( env . host ) ) return False current_node = lib . get_node ( node [ 'name' ] ) # Always configure Chef Solo solo . configure ( current_node ) ipaddress = _get_ipaddress ( node ) # Everything was configured alright, so save the node configuration # This is done without credentials, so that we keep the node name used # by the user and not the hostname or IP translated by .ssh/config filepath = save_config ( node , ipaddress ) try : # Synchronize the kitchen directory _synchronize_node ( filepath , node ) # Execute Chef Solo _configure_node ( ) finally : _node_cleanup ( ) return True
Builds synchronizes and configures a node . It also injects the ipaddress to the node s config file if not already existent .
201
29
237,033
def build_dct ( dic , keys , value ) : key = keys . pop ( 0 ) if len ( keys ) : dic . setdefault ( key , { } ) build_dct ( dic [ key ] , keys , value ) else : # Transform cookbook default attribute strings into proper booleans if value == "false" : value = False elif value == "true" : value = True # It's a leaf, assign value dic [ key ] = deepcopy ( value )
Builds a dictionary with arbitrary depth out of a key list
108
12
237,034
def update_dct ( dic1 , dic2 ) : for key , val in dic2 . items ( ) : if isinstance ( val , dict ) : dic1 . setdefault ( key , { } ) update_dct ( dic1 [ key ] , val ) else : dic1 [ key ] = val
Merges two dictionaries recursively dic2 will have preference over dic1
73
18
237,035
def _add_merged_attributes ( node , all_recipes , all_roles ) : # Get cookbooks from extended recipes attributes = { } for recipe in node [ 'recipes' ] : # Find this recipe found = False for r in all_recipes : if recipe == r [ 'name' ] : found = True for attr in r [ 'attributes' ] : if r [ 'attributes' ] [ attr ] . get ( 'type' ) == "hash" : value = { } else : value = r [ 'attributes' ] [ attr ] . get ( 'default' ) # Attribute dictionaries are defined as a single # compound key. Split and build proper dict build_dct ( attributes , attr . split ( "/" ) , value ) if not found : error = "Could not find recipe '{0}' while " . format ( recipe ) error += "building node data bag for '{0}'" . format ( node [ 'name' ] ) abort ( error ) # Get default role attributes for role in node [ 'roles' ] : for r in all_roles : if role == r [ 'name' ] : update_dct ( attributes , r . get ( 'default_attributes' , { } ) ) # Get default environment attributes environment = lib . get_environment ( node [ 'chef_environment' ] ) update_dct ( attributes , environment . get ( 'default_attributes' , { } ) ) # Get normal node attributes non_attribute_fields = [ 'id' , 'name' , 'role' , 'roles' , 'recipes' , 'run_list' , 'ipaddress' ] node_attributes = { } for key in node : if key in non_attribute_fields : continue node_attributes [ key ] = node [ key ] update_dct ( attributes , node_attributes ) # Get override role attributes for role in node [ 'roles' ] : for r in all_roles : if role == r [ 'name' ] : update_dct ( attributes , r . get ( 'override_attributes' , { } ) ) # Get override environment attributes update_dct ( attributes , environment . get ( 'override_attributes' , { } ) ) # Merge back to the original node object node . update ( attributes )
Merges attributes from cookbooks node and roles
513
9
237,036
def build_node_data_bag ( ) : nodes = lib . get_nodes ( ) node_data_bag_path = os . path . join ( 'data_bags' , 'node' ) # In case there are leftovers remove_local_node_data_bag ( ) os . makedirs ( node_data_bag_path ) all_recipes = lib . get_recipes ( ) all_roles = lib . get_roles ( ) for node in nodes : # Dots are not allowed (only alphanumeric), substitute by underscores node [ 'id' ] = node [ 'name' ] . replace ( '.' , '_' ) # Build extended role list node [ 'role' ] = lib . get_roles_in_node ( node ) node [ 'roles' ] = node [ 'role' ] [ : ] for role in node [ 'role' ] : node [ 'roles' ] . extend ( lib . get_roles_in_role ( role ) ) node [ 'roles' ] = list ( set ( node [ 'roles' ] ) ) # Build extended recipe list node [ 'recipes' ] = lib . get_recipes_in_node ( node ) # Add recipes found inside each roles in the extended role list for role in node [ 'roles' ] : node [ 'recipes' ] . extend ( lib . get_recipes_in_role ( role ) ) node [ 'recipes' ] = list ( set ( node [ 'recipes' ] ) ) # Add node attributes _add_merged_attributes ( node , all_recipes , all_roles ) _add_automatic_attributes ( node ) # Save node data bag item with open ( os . path . join ( 'data_bags' , 'node' , node [ 'id' ] + '.json' ) , 'w' ) as f : f . write ( json . dumps ( node ) )
Builds one node data bag item per file found in the nodes directory
427
14
237,037
def remove_local_node_data_bag ( ) : node_data_bag_path = os . path . join ( 'data_bags' , 'node' ) if os . path . exists ( node_data_bag_path ) : shutil . rmtree ( node_data_bag_path )
Removes generated node data_bag locally
68
8
237,038
def ensure_berksfile_cookbooks_are_installed ( ) : msg = "Vendoring cookbooks from Berksfile {0} to directory {1}..." print ( msg . format ( env . berksfile , env . berksfile_cookbooks_directory ) ) run_vendor = True cookbooks_dir = env . berksfile_cookbooks_directory berksfile_lock_path = cookbooks_dir + '/Berksfile.lock' berksfile_lock_exists = os . path . isfile ( berksfile_lock_path ) cookbooks_dir_exists = os . path . isdir ( cookbooks_dir ) if cookbooks_dir_exists and berksfile_lock_exists : berksfile_mtime = os . stat ( 'Berksfile' ) . st_mtime cookbooks_mtime = os . stat ( berksfile_lock_path ) . st_mtime run_vendor = berksfile_mtime > cookbooks_mtime if run_vendor : if cookbooks_dir_exists : shutil . rmtree ( env . berksfile_cookbooks_directory ) p = subprocess . Popen ( [ 'berks' , 'vendor' , env . berksfile_cookbooks_directory ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) stdout , stderr = p . communicate ( ) if env . verbose or p . returncode : print stdout , stderr
Run berks vendor to berksfile cookbooks directory
340
11
237,039
def _remove_remote_node_data_bag ( ) : node_data_bag_path = os . path . join ( env . node_work_path , 'data_bags' , 'node' ) if exists ( node_data_bag_path ) : sudo ( "rm -rf {0}" . format ( node_data_bag_path ) )
Removes generated node data_bag from the remote node
79
11
237,040
def _remove_remote_data_bags ( ) : data_bags_path = os . path . join ( env . node_work_path , 'data_bags' ) if exists ( data_bags_path ) : sudo ( "rm -rf {0}" . format ( data_bags_path ) )
Remove remote data bags so it won t leak any sensitive information
67
12
237,041
def _configure_node ( ) : print ( "" ) msg = "Cooking..." if env . parallel : msg = "[{0}]: {1}" . format ( env . host_string , msg ) print ( msg ) # Backup last report with settings ( hide ( 'stdout' , 'warnings' , 'running' ) , warn_only = True ) : sudo ( "mv {0} {0}.1" . format ( LOGFILE ) ) # Build chef-solo command cmd = "RUBYOPT=-Ku chef-solo" if whyrun : cmd += " --why-run" cmd += ' -l {0} -j /etc/chef/node.json' . format ( env . loglevel ) if ENABLE_LOGS : cmd += ' | tee {0}' . format ( LOGFILE ) if env . loglevel == "debug" : print ( "Executing Chef Solo with the following command:\n" "{0}" . format ( cmd ) ) with settings ( hide ( 'warnings' , 'running' ) , warn_only = True ) : output = sudo ( cmd ) if ( output . failed or "FATAL: Stacktrace dumped" in output or ( "Chef Run complete" not in output and "Report handlers complete" not in output ) ) : if 'chef-solo: command not found' in output : print ( colors . red ( "\nFAILED: Chef Solo is not installed on this node" ) ) print ( "Type 'fix node:{0} deploy_chef' to install it" . format ( env . host ) ) abort ( "" ) else : print ( colors . red ( "\nFAILED: chef-solo could not finish configuring the node\n" ) ) import sys sys . exit ( 1 ) else : msg = "\n" if env . parallel : msg += "[{0}]: " . format ( env . host_string ) msg += "SUCCESS: Node correctly configured" print ( colors . green ( msg ) )
Exectutes chef - solo to apply roles and recipes to a node
444
14
237,042
def _resolve_hostname ( name ) : if env . ssh_config is None : return name elif not os . path . exists ( os . path . join ( "nodes" , name + ".json" ) ) : resolved_name = env . ssh_config . lookup ( name ) [ 'hostname' ] if os . path . exists ( os . path . join ( "nodes" , resolved_name + ".json" ) ) : name = resolved_name return name
Returns resolved hostname using the ssh config
105
8
237,043
def get_environment ( name ) : if name == "_default" : return env_from_template ( name ) filename = os . path . join ( "environments" , name + ".json" ) try : with open ( filename ) as f : try : return json . loads ( f . read ( ) ) except ValueError as e : msg = 'LittleChef found the following error in' msg += ' "{0}":\n {1}' . format ( filename , str ( e ) ) abort ( msg ) except IOError : raise FileNotFoundError ( 'File {0} not found' . format ( filename ) )
Returns a JSON environment file as a dictionary
135
8
237,044
def get_environments ( ) : envs = [ ] for root , subfolders , files in os . walk ( 'environments' ) : for filename in files : if filename . endswith ( ".json" ) : path = os . path . join ( root [ len ( 'environments' ) : ] , filename [ : - len ( '.json' ) ] ) envs . append ( get_environment ( path ) ) return sorted ( envs , key = lambda x : x [ 'name' ] )
Gets all environments found in the environments directory
111
9
237,045
def get_node ( name , merged = False ) : if merged : node_path = os . path . join ( "data_bags" , "node" , name . replace ( '.' , '_' ) + ".json" ) else : node_path = os . path . join ( "nodes" , name + ".json" ) if os . path . exists ( node_path ) : # Read node.json with open ( node_path , 'r' ) as f : try : node = json . loads ( f . read ( ) ) except ValueError as e : msg = 'LittleChef found the following error in' msg += ' "{0}":\n {1}' . format ( node_path , str ( e ) ) abort ( msg ) else : print "Creating new node file '{0}.json'" . format ( name ) node = { 'run_list' : [ ] } # Add node name so that we can tell to which node it is node [ 'name' ] = name if not node . get ( 'chef_environment' ) : node [ 'chef_environment' ] = '_default' return node
Returns a JSON node file as a dictionary
249
8
237,046
def get_nodes_with_role ( role_name , environment = None ) : prefix_search = role_name . endswith ( "*" ) if prefix_search : role_name = role_name . rstrip ( "*" ) for n in get_nodes ( environment ) : roles = get_roles_in_node ( n , recursive = True ) if prefix_search : if any ( role . startswith ( role_name ) for role in roles ) : yield n else : if role_name in roles : yield n
Get all nodes which include a given role prefix - searches are also supported
120
14
237,047
def get_nodes_with_tag ( tag , environment = None , include_guests = False ) : nodes = get_nodes ( environment ) nodes_mapping = dict ( ( n [ 'name' ] , n ) for n in nodes ) for n in nodes : if tag in n . get ( 'tags' , [ ] ) : # Remove from node mapping so it doesn't get added twice by # guest walking below try : del nodes_mapping [ n [ 'fqdn' ] ] except KeyError : pass yield n # Walk guest if it is a host if include_guests and n . get ( 'virtualization' , { } ) . get ( 'role' ) == 'host' : for guest in n [ 'virtualization' ] . get ( 'guests' , [ ] ) : try : yield nodes_mapping [ guest [ 'fqdn' ] ] except KeyError : # we ignore guests which are not in the same # chef environments than their hosts for now pass
Get all nodes which include a given tag
216
8
237,048
def get_nodes_with_recipe ( recipe_name , environment = None ) : prefix_search = recipe_name . endswith ( "*" ) if prefix_search : recipe_name = recipe_name . rstrip ( "*" ) for n in get_nodes ( environment ) : recipes = get_recipes_in_node ( n ) for role in get_roles_in_node ( n , recursive = True ) : recipes . extend ( get_recipes_in_role ( role ) ) if prefix_search : if any ( recipe . startswith ( recipe_name ) for recipe in recipes ) : yield n else : if recipe_name in recipes : yield n
Get all nodes which include a given recipe prefix - searches are also supported
152
14
237,049
def print_node ( node , detailed = False ) : nodename = node [ 'name' ] print ( colors . yellow ( "\n" + nodename ) ) # Roles if detailed : for role in get_roles_in_node ( node ) : print_role ( _get_role ( role ) , detailed = False ) else : print ( ' Roles: {0}' . format ( ", " . join ( get_roles_in_node ( node ) ) ) ) # Recipes if detailed : for recipe in get_recipes_in_node ( node ) : print " Recipe:" , recipe print " attributes: {0}" . format ( node . get ( recipe , "" ) ) else : print ( ' Recipes: {0}' . format ( ", " . join ( get_recipes_in_node ( node ) ) ) ) # Node attributes print " Node attributes:" for attribute in node . keys ( ) : if attribute == "run_list" or attribute == "name" : continue print " {0}: {1}" . format ( attribute , node [ attribute ] )
Pretty prints the given node
235
5
237,050
def print_nodes ( nodes , detailed = False ) : found = 0 for node in nodes : found += 1 print_node ( node , detailed = detailed ) print ( "\nFound {0} node{1}" . format ( found , "s" if found != 1 else "" ) )
Prints all the given nodes
62
6
237,051
def _generate_metadata ( path , cookbook_path , name ) : global knife_installed if not knife_installed : return metadata_path_rb = os . path . join ( path , 'metadata.rb' ) metadata_path_json = os . path . join ( path , 'metadata.json' ) if ( os . path . exists ( metadata_path_rb ) and ( not os . path . exists ( metadata_path_json ) or os . stat ( metadata_path_rb ) . st_mtime > os . stat ( metadata_path_json ) . st_mtime ) ) : error_msg = "Warning: metadata.json for {0}" . format ( name ) error_msg += " in {0} is older that metadata.rb" . format ( cookbook_path ) error_msg += ", cookbook attributes could be out of date\n\n" try : proc = subprocess . Popen ( [ 'knife' , 'cookbook' , 'metadata' , '-o' , cookbook_path , name ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) resp , error = proc . communicate ( ) if ( 'ERROR:' in resp or 'FATAL:' in resp or 'Generating metadata for' not in resp ) : if ( "No user specified, pass via -u or specifiy 'node_name'" in error ) : error_msg += "You need to have an up-to-date (>=0.10.x)" error_msg += " version of knife installed locally in order" error_msg += " to generate metadata.json.\nError " else : error_msg += "Unkown error " error_msg += "while executing knife to generate " error_msg += "metadata.json for {0}" . format ( path ) print ( error_msg ) print resp if env . loglevel == 'debug' : print "\n" . join ( resp . split ( "\n" ) [ : 2 ] ) except OSError : knife_installed = False error_msg += "If you locally install Chef's knife tool, LittleChef" error_msg += " will regenerate metadata.json files automatically\n" print ( error_msg ) else : print ( "Generated metadata.json for {0}\n" . format ( path ) )
Checks whether metadata . rb has changed and regenerate metadata . json
513
14
237,052
def get_recipes_in_cookbook ( name ) : recipes = { } path = None cookbook_exists = False metadata_exists = False for cookbook_path in cookbook_paths : path = os . path . join ( cookbook_path , name ) path_exists = os . path . exists ( path ) # cookbook exists if present in any of the cookbook paths cookbook_exists = cookbook_exists or path_exists if not path_exists : continue _generate_metadata ( path , cookbook_path , name ) # Now try to open metadata.json try : with open ( os . path . join ( path , 'metadata.json' ) , 'r' ) as f : try : cookbook = json . loads ( f . read ( ) ) except ValueError as e : msg = "Little Chef found the following error in your" msg += " {0} file:\n {1}" . format ( os . path . join ( path , 'metadata.json' ) , e ) abort ( msg ) # Add each recipe defined in the cookbook metadata_exists = True recipe_defaults = { 'description' : '' , 'version' : cookbook . get ( 'version' ) , 'dependencies' : cookbook . get ( 'dependencies' , { } ) . keys ( ) , 'attributes' : cookbook . get ( 'attributes' , { } ) } for recipe in cookbook . get ( 'recipes' , [ ] ) : recipes [ recipe ] = dict ( recipe_defaults , name = recipe , description = cookbook [ 'recipes' ] [ recipe ] ) # Cookbook metadata.json was found, don't try next cookbook path # because metadata.json in site-cookbooks has preference break except IOError : # metadata.json was not found, try next cookbook_path pass if not cookbook_exists : abort ( 'Unable to find cookbook "{0}"' . format ( name ) ) elif not metadata_exists : abort ( 'Cookbook "{0}" has no metadata.json' . format ( name ) ) # Add recipes found in the 'recipes' directory but not listed # in the metadata for cookbook_path in cookbook_paths : recipes_dir = os . path . join ( cookbook_path , name , 'recipes' ) if not os . path . isdir ( recipes_dir ) : continue for basename in os . listdir ( recipes_dir ) : fname , ext = os . path . splitext ( basename ) if ext != '.rb' : continue if fname != 'default' : recipe = '%s::%s' % ( name , fname ) else : recipe = name if recipe not in recipes : recipes [ recipe ] = dict ( recipe_defaults , name = recipe ) # When a recipe has no default recipe (libraries?), # add one so that it is listed if not recipes : recipes [ name ] = dict ( recipe_defaults , name = name , description = 'This cookbook has no default recipe' ) return recipes . values ( )
Gets the name of all recipes present in a cookbook Returns a list of dictionaries
677
18
237,053
def get_recipes_in_node ( node ) : recipes = [ ] for elem in node . get ( 'run_list' , [ ] ) : if elem . startswith ( "recipe" ) : recipe = elem . split ( '[' ) [ 1 ] . split ( ']' ) [ 0 ] recipes . append ( recipe ) return recipes
Gets the name of all recipes present in the run_list of a node
79
16
237,054
def get_recipes ( ) : dirnames = set ( ) for path in cookbook_paths : dirnames . update ( [ d for d in os . listdir ( path ) if os . path . isdir ( os . path . join ( path , d ) ) and not d . startswith ( '.' ) ] ) recipes = [ ] for dirname in dirnames : recipes . extend ( get_recipes_in_cookbook ( dirname ) ) return sorted ( recipes , key = lambda x : x [ 'name' ] )
Gets all recipes found in the cookbook directories
118
10
237,055
def print_recipe ( recipe ) : print ( colors . yellow ( "\n{0}" . format ( recipe [ 'name' ] ) ) ) print " description: {0}" . format ( recipe [ 'description' ] ) print " version: {0}" . format ( recipe [ 'version' ] ) print " dependencies: {0}" . format ( ", " . join ( recipe [ 'dependencies' ] ) ) print " attributes: {0}" . format ( ", " . join ( recipe [ 'attributes' ] ) )
Pretty prints the given recipe
114
5
237,056
def _get_role ( rolename ) : path = os . path . join ( 'roles' , rolename + '.json' ) if not os . path . exists ( path ) : abort ( "Couldn't read role file {0}" . format ( path ) ) with open ( path , 'r' ) as f : try : role = json . loads ( f . read ( ) ) except ValueError as e : msg = "Little Chef found the following error in your" msg += " {0}.json file:\n {1}" . format ( rolename , str ( e ) ) abort ( msg ) role [ 'fullname' ] = rolename return role
Reads and parses a file containing a role
147
10
237,057
def get_roles ( ) : roles = [ ] for root , subfolders , files in os . walk ( 'roles' ) : for filename in files : if filename . endswith ( ".json" ) : path = os . path . join ( root [ len ( 'roles' ) : ] , filename [ : - len ( '.json' ) ] ) roles . append ( _get_role ( path ) ) return sorted ( roles , key = lambda x : x [ 'fullname' ] )
Gets all roles found in the roles directory
110
9
237,058
def print_role ( role , detailed = True ) : if detailed : print ( colors . yellow ( role . get ( 'fullname' ) ) ) else : print ( " Role: {0}" . format ( role . get ( 'fullname' ) ) ) if detailed : print ( " description: {0}" . format ( role . get ( 'description' ) ) ) if 'default_attributes' in role : print ( " default_attributes:" ) _pprint ( role [ 'default_attributes' ] ) if 'override_attributes' in role : print ( " override_attributes:" ) _pprint ( role [ 'override_attributes' ] ) if detailed : print ( " run_list: {0}" . format ( role . get ( 'run_list' ) ) ) print ( "" )
Pretty prints the given role
180
5
237,059
def import_plugin ( name ) : path = os . path . join ( "plugins" , name + ".py" ) try : with open ( path , 'rb' ) as f : try : plugin = imp . load_module ( "p_" + name , f , name + '.py' , ( '.py' , 'rb' , imp . PY_SOURCE ) ) except SyntaxError as e : error = "Found plugin '{0}', but it seems" . format ( name ) error += " to have a syntax error: {0}" . format ( str ( e ) ) abort ( error ) except IOError : abort ( "Sorry, could not find '{0}.py' in the plugin directory" . format ( name ) ) return plugin
Imports plugin python module
163
5
237,060
def get_cookbook_path ( cookbook_name ) : for cookbook_path in cookbook_paths : path = os . path . join ( cookbook_path , cookbook_name ) if os . path . exists ( path ) : return path raise IOError ( 'Can\'t find cookbook with name "{0}"' . format ( cookbook_name ) )
Returns path to the cookbook for the given cookbook name
82
12
237,061
def global_confirm ( question , default = True ) : if env . abort_on_prompts : return True original_parallel = env . parallel env . parallel = False result = confirm ( question , default ) env . parallel = original_parallel return result
Shows a confirmation that applies to all hosts by temporarily disabling parallel execution in Fabric
57
16
237,062
def _pprint ( dic ) : for key , value in dic . items ( ) : print ( " {0}: {1}" . format ( key , value ) )
Prints a dictionary with one indentation level
38
9
237,063
def get_margin ( length ) : if length > 23 : margin_left = "\t" chars = 1 elif length > 15 : margin_left = "\t\t" chars = 2 elif length > 7 : margin_left = "\t\t\t" chars = 3 else : margin_left = "\t\t\t\t" chars = 4 return margin_left
Add enough tabs to align in two columns
83
8
237,064
def configure ( current_node = None ) : current_node = current_node or { } # Ensure that the /tmp/chef-solo/cache directory exist cache_dir = "{0}/cache" . format ( env . node_work_path ) # First remote call, could go wrong try : cache_exists = exists ( cache_dir ) except EOFError as e : abort ( "Could not login to node, got: {0}" . format ( e ) ) if not cache_exists : with settings ( hide ( 'running' , 'stdout' ) , warn_only = True ) : output = sudo ( 'mkdir -p {0}' . format ( cache_dir ) ) if output . failed : error = "Could not create {0} dir. " . format ( env . node_work_path ) error += "Do you have sudo rights?" abort ( error ) # Change ownership of /tmp/chef-solo/ so that we can rsync with hide ( 'running' , 'stdout' ) : with settings ( warn_only = True ) : output = sudo ( 'chown -R {0} {1}' . format ( env . user , env . node_work_path ) ) if output . failed : error = "Could not modify {0} dir. " . format ( env . node_work_path ) error += "Do you have sudo rights?" abort ( error ) # Set up chef solo configuration logging_path = os . path . dirname ( LOGFILE ) if not exists ( logging_path ) : sudo ( 'mkdir -p {0}' . format ( logging_path ) ) if not exists ( '/etc/chef' ) : sudo ( 'mkdir -p /etc/chef' ) # Set parameters and upload solo.rb template reversed_cookbook_paths = cookbook_paths [ : ] reversed_cookbook_paths . reverse ( ) cookbook_paths_list = '[{0}]' . format ( ', ' . join ( [ '"{0}/{1}"' . format ( env . node_work_path , x ) for x in reversed_cookbook_paths ] ) ) data = { 'node_work_path' : env . node_work_path , 'cookbook_paths_list' : cookbook_paths_list , 'environment' : current_node . get ( 'chef_environment' , '_default' ) , 'verbose' : "true" if env . verbose else "false" , 'http_proxy' : env . http_proxy , 'https_proxy' : env . https_proxy } with settings ( hide ( 'everything' ) ) : try : upload_template ( 'solo.rb.j2' , '/etc/chef/solo.rb' , context = data , use_sudo = True , backup = False , template_dir = BASEDIR , use_jinja = True , mode = 0400 ) except SystemExit : error = ( "Failed to upload '/etc/chef/solo.rb'\nThis " "can happen when the deployment user does not have a " "home directory, which is needed as a temporary location" ) abort ( error ) with hide ( 'stdout' ) : sudo ( 'chown root:$(id -g -n root) {0}' . format ( '/etc/chef/solo.rb' ) )
Deploy chef - solo specific files
755
6
237,065
def execute ( node ) : with hide ( 'everything' ) : virt = json . loads ( sudo ( 'ohai virtualization' ) ) if not len ( virt ) or virt [ 0 ] [ 1 ] != "host" : # It may work for virtualization solutions other than Xen print ( "This node is not a Xen host, doing nothing" ) return node [ 'virtualization' ] = { 'role' : 'host' , 'system' : 'xen' , 'vms' : [ ] , } # VMs with hide ( 'everything' ) : vm_list = sudo ( "xm list" ) for vm in vm_list . split ( "\n" ) [ 2 : ] : data = vm . split ( ) if len ( data ) != 6 : break node [ 'virtualization' ] [ 'vms' ] . append ( { 'fqdn' : data [ 0 ] , 'RAM' : data [ 2 ] , 'cpus' : data [ 3 ] } ) print ( "Found {0} VMs for this Xen host" . format ( len ( node [ 'virtualization' ] [ 'vms' ] ) ) ) # Save node file and remove the returned temp file del node [ 'name' ] os . remove ( chef . save_config ( node , True ) )
Uses ohai to get virtualization information which is then saved to then node file
283
17
237,066
def nodes_with_role ( rolename ) : nodes = [ n [ 'name' ] for n in lib . get_nodes_with_role ( rolename , env . chef_environment ) ] if not len ( nodes ) : print ( "No nodes found with role '{0}'" . format ( rolename ) ) sys . exit ( 0 ) return node ( * nodes )
Configures a list of nodes that have the given role in their run list
86
15
237,067
def nodes_with_recipe ( recipename ) : nodes = [ n [ 'name' ] for n in lib . get_nodes_with_recipe ( recipename , env . chef_environment ) ] if not len ( nodes ) : print ( "No nodes found with recipe '{0}'" . format ( recipename ) ) sys . exit ( 0 ) return node ( * nodes )
Configures a list of nodes that have the given recipe in their run list
85
15
237,068
def node ( * nodes ) : chef . build_node_data_bag ( ) if not len ( nodes ) or nodes [ 0 ] == '' : abort ( 'No node was given' ) elif nodes [ 0 ] == 'all' : # Fetch all nodes and add them to env.hosts for node in lib . get_nodes ( env . chef_environment ) : env . hosts . append ( node [ 'name' ] ) if not len ( env . hosts ) : abort ( 'No nodes found in /nodes/' ) message = "Are you sure you want to configure all nodes ({0})" . format ( len ( env . hosts ) ) if env . chef_environment : message += " in the {0} environment" . format ( env . chef_environment ) message += "?" if not __testing__ : if not lib . global_confirm ( message ) : abort ( 'Aborted by user' ) else : # A list of nodes was given env . hosts = list ( nodes ) env . all_hosts = list ( env . hosts ) # Shouldn't be needed # Check whether another command was given in addition to "node:" if not ( littlechef . __cooking__ and 'node:' not in sys . argv [ - 1 ] and 'nodes_with_role:' not in sys . argv [ - 1 ] and 'nodes_with_recipe:' not in sys . argv [ - 1 ] and 'nodes_with_tag:' not in sys . argv [ - 1 ] ) : # If user didn't type recipe:X, role:Y or deploy_chef, # configure the nodes with settings ( ) : execute ( _node_runner ) chef . remove_local_node_data_bag ( )
Selects and configures a list of nodes . all configures all nodes
382
15
237,069
def _node_runner ( ) : env . host_string = lib . get_env_host_string ( ) node = lib . get_node ( env . host_string ) _configure_fabric_for_platform ( node . get ( "platform" ) ) if __testing__ : print "TEST: would now configure {0}" . format ( env . host_string ) else : lib . print_header ( "Configuring {0}" . format ( env . host_string ) ) if env . autodeploy_chef and not chef . chef_test ( ) : deploy_chef ( ask = "no" ) chef . sync_node ( node )
This is only used by node so that we can execute in parallel
146
13
237,070
def deploy_chef ( ask = "yes" , version = "11" ) : env . host_string = lib . get_env_host_string ( ) if ask == "no" or littlechef . noninteractive : print ( "Deploying Chef using omnibus installer version: ..." . format ( version ) ) else : message = ( '\nAre you sure you want to install Chef version:' '{0} on node {1}?' . format ( version , env . host_string ) ) if not confirm ( message ) : abort ( 'Aborted by user' ) lib . print_header ( "Configuring Chef Solo on {0}" . format ( env . host_string ) ) if not __testing__ : solo . install ( version ) solo . configure ( ) # Build a basic node file if there isn't one already # with some properties from ohai with settings ( hide ( 'stdout' ) , warn_only = True ) : output = sudo ( 'ohai -l warn' ) if output . succeeded : try : ohai = json . loads ( output ) except ValueError : abort ( "Could not parse ohai's output" ":\n {0}" . format ( output ) ) node = { "run_list" : [ ] } for attribute in [ "ipaddress" , "platform" , "platform_family" , "platform_version" ] : if ohai . get ( attribute ) : node [ attribute ] = ohai [ attribute ] chef . save_config ( node )
Install chef - solo on a node
325
7
237,071
def plugin ( name ) : env . host_string = lib . get_env_host_string ( ) plug = lib . import_plugin ( name ) lib . print_header ( "Executing plugin '{0}' on " "{1}" . format ( name , env . host_string ) ) node = lib . get_node ( env . host_string ) if node == { 'run_list' : [ ] } : node [ 'name' ] = env . host_string plug . execute ( node ) print ( "Finished executing plugin" )
Executes the selected plugin Plugins are expected to be found in the kitchen s plugins directory
120
18
237,072
def list_envs ( ) : for env in lib . get_environments ( ) : margin_left = lib . get_margin ( len ( env [ 'name' ] ) ) print ( "{0}{1}{2}" . format ( env [ 'name' ] , margin_left , env . get ( 'description' , '(no description)' ) ) )
List all environments
78
3
237,073
def list_nodes_with_tag ( tag ) : lib . print_nodes ( lib . get_nodes_with_tag ( tag , env . chef_environment , littlechef . include_guests ) )
Show all nodes which have assigned a given tag
49
9
237,074
def list_recipes ( ) : for recipe in lib . get_recipes ( ) : margin_left = lib . get_margin ( len ( recipe [ 'name' ] ) ) print ( "{0}{1}{2}" . format ( recipe [ 'name' ] , margin_left , recipe [ 'description' ] ) )
Show a list of all available recipes
71
7
237,075
def list_roles ( ) : for role in lib . get_roles ( ) : margin_left = lib . get_margin ( len ( role [ 'fullname' ] ) ) print ( "{0}{1}{2}" . format ( role [ 'fullname' ] , margin_left , role . get ( 'description' , '(no description)' ) ) )
Show a list of all available roles
80
7
237,076
def _check_appliances ( ) : filenames = os . listdir ( os . getcwd ( ) ) missing = [ ] for dirname in [ 'nodes' , 'environments' , 'roles' , 'cookbooks' , 'data_bags' ] : if ( dirname not in filenames ) or ( not os . path . isdir ( dirname ) ) : missing . append ( dirname ) return ( not bool ( missing ) ) , missing
Looks around and return True or False based on whether we are in a kitchen
105
15
237,077
def create_ticket_str ( self , prefix = None ) : if not prefix : prefix = self . model . TICKET_PREFIX return "%s-%d-%s" % ( prefix , int ( time . time ( ) ) , get_random_string ( length = self . model . TICKET_RAND_LEN ) )
Generate a sufficiently opaque ticket string to ensure the ticket is not guessable . If a prefix is provided prepend it to the string .
77
28
237,078
def validate_ticket ( self , ticket , service , renew = False , require_https = False ) : if not ticket : raise InvalidRequest ( "No ticket string provided" ) if not self . model . TICKET_RE . match ( ticket ) : raise InvalidTicket ( "Ticket string %s is invalid" % ticket ) try : t = self . get ( ticket = ticket ) except self . model . DoesNotExist : raise InvalidTicket ( "Ticket %s does not exist" % ticket ) if t . is_consumed ( ) : raise InvalidTicket ( "%s %s has already been used" % ( t . name , ticket ) ) if t . is_expired ( ) : raise InvalidTicket ( "%s %s has expired" % ( t . name , ticket ) ) if not service : raise InvalidRequest ( "No service identifier provided" ) if require_https and not is_scheme_https ( service ) : raise InvalidService ( "Service %s is not HTTPS" % service ) if not service_allowed ( service ) : raise InvalidService ( "Service %s is not a valid %s URL" % ( service , t . name ) ) try : if not match_service ( t . service , service ) : raise InvalidService ( "%s %s for service %s is invalid for " "service %s" % ( t . name , ticket , t . service , service ) ) except AttributeError : pass try : if renew and not t . is_primary ( ) : raise InvalidTicket ( "%s %s was not issued via primary " "credentials" % ( t . name , ticket ) ) except AttributeError : pass logger . debug ( "Validated %s %s" % ( t . name , ticket ) ) return t
Given a ticket string and service identifier validate the corresponding Ticket . If validation succeeds return the Ticket . If validation fails raise an appropriate error .
383
27
237,079
def delete_invalid_tickets ( self ) : for ticket in self . filter ( Q ( consumed__isnull = False ) | Q ( expires__lte = now ( ) ) ) . order_by ( '-expires' ) : try : ticket . delete ( ) except models . ProtectedError : pass
Delete consumed or expired Ticket s that are not referenced by other Ticket s . Invalid tickets are no longer valid for authentication and can be safely deleted .
68
29
237,080
def consume_tickets ( self , user ) : for ticket in self . filter ( user = user , consumed__isnull = True , expires__gt = now ( ) ) : ticket . consume ( )
Consume all valid Ticket s for a specified user . This is run when the user logs out to ensure all issued tickets are no longer valid for future authentication attempts .
43
33
237,081
def request_sign_out ( self , user ) : session = Session ( ) for ticket in self . filter ( user = user , consumed__gte = user . last_login ) : ticket . request_sign_out ( session = session )
Send a single logout request to each service accessed by a specified user . This is called at logout when single logout is enabled .
52
28
237,082
def request_sign_out ( self , session = requests ) : if logout_allowed ( self . service ) : request = SingleSignOutRequest ( context = { 'ticket' : self } ) url = get_logout_url ( self . service ) or self . service session . post ( url , data = { 'logoutRequest' : request . render_content ( ) } ) logger . info ( "Single sign-out request sent to %s" % url )
Send a POST request to the ServiceTicket s logout URL to request sign - out .
101
19
237,083
def validate_callback ( self , service , pgturl , pgtid , pgtiou ) : if not proxy_allowed ( service ) : raise UnauthorizedServiceProxy ( "%s is not authorized to use proxy authentication" % service ) if not is_scheme_https ( pgturl ) : raise InvalidProxyCallback ( "Proxy callback %s is not HTTPS" % pgturl ) if not proxy_callback_allowed ( service , pgturl ) : raise InvalidProxyCallback ( "%s is not an authorized proxy callback URL" % pgturl ) # Verify that the SSL certificate is valid verify = os . environ . get ( 'REQUESTS_CA_BUNDLE' , True ) try : requests . get ( pgturl , verify = verify , timeout = 5 ) except requests . exceptions . SSLError : raise InvalidProxyCallback ( "SSL certificate validation failed for proxy callback %s" % pgturl ) except requests . exceptions . RequestException as e : raise InvalidProxyCallback ( e ) # Callback certificate appears valid, so send the ticket strings pgturl = add_query_params ( pgturl , { 'pgtId' : pgtid , 'pgtIou' : pgtiou } ) try : response = requests . get ( pgturl , verify = verify , timeout = 5 ) except requests . exceptions . RequestException as e : raise InvalidProxyCallback ( e ) try : response . raise_for_status ( ) except requests . exceptions . HTTPError as e : raise InvalidProxyCallback ( "Proxy callback %s returned %s" % ( pgturl , e ) )
Verify the provided proxy callback URL .
349
8
237,084
def _get_backends ( ) : backends = [ ] backend_paths = getattr ( settings , 'MAMA_CAS_SERVICE_BACKENDS' , [ 'mama_cas.services.backends.SettingsBackend' ] ) for backend_path in backend_paths : backend = import_string ( backend_path ) ( ) backends . append ( backend ) return backends
Retrieve the list of configured service backends .
89
10
237,085
def _is_allowed ( attr , * args ) : for backend in _get_backends ( ) : try : if getattr ( backend , attr ) ( * args ) : return True except AttributeError : raise NotImplementedError ( "%s.%s.%s() not implemented" % ( backend . __class__ . __module__ , backend . __class__ . __name__ , attr ) ) return False
Test if a given attribute is allowed according to the current set of configured service backends .
94
18
237,086
def _is_valid_service_url ( url ) : valid_services = getattr ( settings , 'MAMA_CAS_VALID_SERVICES' , ( ) ) if not valid_services : return True warnings . warn ( 'The MAMA_CAS_VALID_SERVICES setting is deprecated. Services ' 'should be configured using MAMA_CAS_SERVICES.' , DeprecationWarning ) for service in [ re . compile ( s ) for s in valid_services ] : if service . match ( url ) : return True return False
Access services list from MAMA_CAS_VALID_SERVICES .
123
17
237,087
def get_backend_path ( service ) : for backend in _get_backends ( ) : try : if backend . service_allowed ( service ) : return "%s.%s" % ( backend . __class__ . __module__ , backend . __class__ . __name__ ) except AttributeError : raise NotImplementedError ( "%s.%s.service_allowed() not implemented" % ( backend . __class__ . __module__ , backend . __class__ . __name__ ) ) return None
Return the dotted path of the matching backend .
113
9
237,088
def get_callbacks ( service ) : callbacks = list ( getattr ( settings , 'MAMA_CAS_ATTRIBUTE_CALLBACKS' , [ ] ) ) if callbacks : warnings . warn ( 'The MAMA_CAS_ATTRIBUTE_CALLBACKS setting is deprecated. Service callbacks ' 'should be configured using MAMA_CAS_SERVICES.' , DeprecationWarning ) for backend in _get_backends ( ) : try : callbacks . extend ( backend . get_callbacks ( service ) ) except AttributeError : raise NotImplementedError ( "%s.%s.get_callbacks() not implemented" % ( backend . __class__ . __module__ , backend . __class__ . __name__ ) ) return callbacks
Get configured callbacks list for a given service identifier .
175
11
237,089
def get_logout_url ( service ) : for backend in _get_backends ( ) : try : return backend . get_logout_url ( service ) except AttributeError : raise NotImplementedError ( "%s.%s.get_logout_url() not implemented" % ( backend . __class__ . __module__ , backend . __class__ . __name__ ) ) return None
Get the configured logout URL for a given service identifier if any .
89
14
237,090
def logout_allowed ( service ) : if hasattr ( settings , 'MAMA_CAS_SERVICES' ) : return _is_allowed ( 'logout_allowed' , service ) if hasattr ( settings , 'MAMA_CAS_ENABLE_SINGLE_SIGN_OUT' ) : warnings . warn ( 'The MAMA_CAS_ENABLE_SINGLE_SIGN_OUT setting is deprecated. SLO ' 'should be configured using MAMA_CAS_SERVICES.' , DeprecationWarning ) return getattr ( settings , 'MAMA_CAS_ENABLE_SINGLE_SIGN_OUT' , False )
Check if a given service identifier should be sent a logout request .
146
14
237,091
def proxy_callback_allowed ( service , pgturl ) : if hasattr ( settings , 'MAMA_CAS_SERVICES' ) : return _is_allowed ( 'proxy_callback_allowed' , service , pgturl ) return _is_valid_service_url ( service )
Check if a given proxy callback is allowed for the given service identifier .
65
14
237,092
def clean ( self ) : username = self . cleaned_data . get ( 'username' ) password = self . cleaned_data . get ( 'password' ) if username and password : try : self . user = authenticate ( request = self . request , username = username , password = password ) except Exception : logger . exception ( "Error authenticating %s" % username ) error_msg = _ ( 'Internal error while authenticating user' ) raise forms . ValidationError ( error_msg ) if self . user is None : logger . warning ( "Failed authentication for %s" % username ) error_msg = _ ( 'The username or password is not correct' ) raise forms . ValidationError ( error_msg ) else : if not self . user . is_active : logger . warning ( "User account %s is disabled" % username ) error_msg = _ ( 'This user account is disabled' ) raise forms . ValidationError ( error_msg ) return self . cleaned_data
Pass the provided username and password to the active authentication backends and verify the user account is not disabled . If authentication succeeds the User object is assigned to the form so it can be accessed in the view .
212
41
237,093
def ns ( self , prefix , tag ) : return etree . QName ( self . prefixes [ prefix ] , tag )
Given a prefix and an XML tag output the qualified name for proper namespace handling on output .
27
18
237,094
def validate_service_ticket ( service , ticket , pgturl = None , renew = False , require_https = False ) : logger . debug ( "Service validation request received for %s" % ticket ) # Check for proxy tickets passed to /serviceValidate if ticket and ticket . startswith ( ProxyTicket . TICKET_PREFIX ) : raise InvalidTicketSpec ( 'Proxy tickets cannot be validated with /serviceValidate' ) st = ServiceTicket . objects . validate_ticket ( ticket , service , renew = renew , require_https = require_https ) attributes = get_attributes ( st . user , st . service ) if pgturl is not None : logger . debug ( "Proxy-granting ticket request received for %s" % pgturl ) pgt = ProxyGrantingTicket . objects . create_ticket ( service , pgturl , user = st . user , granted_by_st = st ) else : pgt = None return st , attributes , pgt
Validate a service ticket string . Return a triplet containing a ServiceTicket and an optional ProxyGrantingTicket or a ValidationError if ticket validation failed .
216
34
237,095
def validate_proxy_ticket ( service , ticket , pgturl = None ) : logger . debug ( "Proxy validation request received for %s" % ticket ) pt = ProxyTicket . objects . validate_ticket ( ticket , service ) attributes = get_attributes ( pt . user , pt . service ) # Build a list of all services that proxied authentication, # in reverse order of which they were traversed proxies = [ pt . service ] prior_pt = pt . granted_by_pgt . granted_by_pt while prior_pt : proxies . append ( prior_pt . service ) prior_pt = prior_pt . granted_by_pgt . granted_by_pt if pgturl is not None : logger . debug ( "Proxy-granting ticket request received for %s" % pgturl ) pgt = ProxyGrantingTicket . objects . create_ticket ( service , pgturl , user = pt . user , granted_by_pt = pt ) else : pgt = None return pt , attributes , pgt , proxies
Validate a proxy ticket string . Return a 4 - tuple containing a ProxyTicket an optional ProxyGrantingTicket and a list of proxies through which authentication proceeded or a ValidationError if ticket validation failed .
226
43
237,096
def validate_proxy_granting_ticket ( pgt , target_service ) : logger . debug ( "Proxy ticket request received for %s using %s" % ( target_service , pgt ) ) pgt = ProxyGrantingTicket . objects . validate_ticket ( pgt , target_service ) pt = ProxyTicket . objects . create_ticket ( service = target_service , user = pgt . user , granted_by_pgt = pgt ) return pt
Validate a proxy granting ticket string . Return an ordered pair containing a ProxyTicket or a ValidationError if ticket validation failed .
103
27
237,097
def get_attributes ( user , service ) : attributes = { } for path in get_callbacks ( service ) : callback = import_string ( path ) attributes . update ( callback ( user , service ) ) return attributes
Return a dictionary of user attributes from the set of configured callback functions .
47
14
237,098
def logout_user ( request ) : logger . debug ( "Logout request received for %s" % request . user ) if is_authenticated ( request . user ) : ServiceTicket . objects . consume_tickets ( request . user ) ProxyTicket . objects . consume_tickets ( request . user ) ProxyGrantingTicket . objects . consume_tickets ( request . user ) ServiceTicket . objects . request_sign_out ( request . user ) logger . info ( "Single sign-on session ended for %s" % request . user ) logout ( request ) messages . success ( request , _ ( 'You have been successfully logged out' ) )
End a single sign - on session for the current user .
144
12
237,099
def user_name_attributes ( user , service ) : attributes = { } attributes [ 'username' ] = user . get_username ( ) attributes [ 'full_name' ] = user . get_full_name ( ) attributes [ 'short_name' ] = user . get_short_name ( ) return attributes
Return all available user name related fields and methods .
69
10