idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
25,700
def filtered ( self , step_names ) : return Graph ( steps = self . steps , dag = self . dag . filter ( step_names ) )
Returns a filtered version of this graph .
25,701
def execute ( self , * args , ** kwargs ) : self . walk ( * args , ** kwargs ) failed_steps = [ step for step in self . steps if step . status == FAILED ] if failed_steps : raise PlanFailed ( failed_steps )
Walks each step in the underlying graph and raises an exception if any of the steps fail .
25,702
def walk ( self , walker ) : def walk_func ( step ) : for dep in self . graph . downstream ( step . name ) : if not dep . ok : step . set_status ( FailedStatus ( "dependency has failed" ) ) return step . ok return step . run ( ) return self . graph . walk ( walker , walk_func )
Walks each step in the underlying graph in topological order .
25,703
def create ( self , value ) : if self . _optional and ( value is None or len ( value ) == 0 ) : return None if hasattr ( self . _type , 'resource_type' ) : if not isinstance ( value , dict ) : raise ValueError ( "Resources must be specified as a dict of " "title to parameters" ) if not self . _many and len ( value ) > 1 : raise ValueError ( "Only one resource can be provided for this " "TroposphereType variable" ) result = [ self . _type . from_dict ( title , v ) for title , v in value . items ( ) ] else : if self . _many : result = [ self . _type . from_dict ( None , v ) for v in value ] elif not isinstance ( value , dict ) : raise ValueError ( "TroposphereType for a single non-resource" "type must be specified as a dict of " "parameters" ) else : result = [ self . _type . from_dict ( None , value ) ] if self . _validate : for v in result : v . _validate_props ( ) return result [ 0 ] if not self . _many else result
Create the troposphere type from the value .
25,704
def _lookup_key_parse ( table_keys ) : regex_matcher = '\[([^\]]+)]' valid_dynamodb_datatypes = [ 'M' , 'S' , 'N' , 'L' ] clean_table_keys = [ ] new_keys = [ ] for key in table_keys : match = re . search ( regex_matcher , key ) if match : if match . group ( 1 ) in valid_dynamodb_datatypes : match_val = str ( match . group ( 1 ) ) key = key . replace ( match . group ( 0 ) , '' ) new_keys . append ( { match_val : key } ) clean_table_keys . append ( key ) else : raise ValueError ( ( 'Stacker does not support looking up the datatype: {}' ) . format ( str ( match . group ( 1 ) ) ) ) else : new_keys . append ( { 'S' : key } ) clean_table_keys . append ( key ) key_dict = { } key_dict [ 'new_keys' ] = new_keys key_dict [ 'clean_table_keys' ] = clean_table_keys return key_dict
Return the order in which the stacks should be executed .
25,705
def _build_projection_expression ( clean_table_keys ) : projection_expression = '' for key in clean_table_keys [ : - 1 ] : projection_expression += ( '{},' ) . format ( key ) projection_expression += clean_table_keys [ - 1 ] return projection_expression
Given cleaned up keys this will return a projection expression for the dynamodb lookup .
25,706
def _convert_ddb_list_to_list ( conversion_list ) : ret_list = [ ] for v in conversion_list : for v1 in v : ret_list . append ( v [ v1 ] ) return ret_list
Given a dynamodb list it will return a python list without the dynamodb datatypes
25,707
def handle ( cls , value , ** kwargs ) : value = read_value_from_path ( value ) table_info = None table_keys = None region = None table_name = None if '@' in value : table_info , table_keys = value . split ( '@' , 1 ) if ':' in table_info : region , table_name = table_info . split ( ':' , 1 ) else : table_name = table_info else : raise ValueError ( 'Please make sure to include a tablename' ) if not table_name : raise ValueError ( 'Please make sure to include a dynamodb table ' 'name' ) table_lookup , table_keys = table_keys . split ( ':' , 1 ) table_keys = table_keys . split ( '.' ) key_dict = _lookup_key_parse ( table_keys ) new_keys = key_dict [ 'new_keys' ] clean_table_keys = key_dict [ 'clean_table_keys' ] projection_expression = _build_projection_expression ( clean_table_keys ) dynamodb = get_session ( region ) . client ( 'dynamodb' ) try : response = dynamodb . get_item ( TableName = table_name , Key = { table_lookup : new_keys [ 0 ] } , ProjectionExpression = projection_expression ) except ClientError as e : if e . response [ 'Error' ] [ 'Code' ] == 'ResourceNotFoundException' : raise ValueError ( 'Cannot find the dynamodb table: {}' . format ( table_name ) ) elif e . response [ 'Error' ] [ 'Code' ] == 'ValidationException' : raise ValueError ( 'No dynamodb record matched the partition key: ' '{}' . format ( table_lookup ) ) else : raise ValueError ( 'The dynamodb lookup {} had an error: ' '{}' . format ( value , e ) ) if 'Item' in response : return ( _get_val_from_ddb_data ( response [ 'Item' ] , new_keys [ 1 : ] ) ) else : raise ValueError ( 'The dynamodb record could not be found using the following ' 'key: {}' . format ( new_keys [ 0 ] ) )
Get a value from a dynamodb table
25,708
def plan ( description , stack_action , context , tail = None , reverse = False ) : def target_fn ( * args , ** kwargs ) : return COMPLETE steps = [ Step ( stack , fn = stack_action , watch_func = tail ) for stack in context . get_stacks ( ) ] steps += [ Step ( target , fn = target_fn ) for target in context . get_targets ( ) ] graph = build_graph ( steps ) return build_plan ( description = description , graph = graph , targets = context . stack_names , reverse = reverse )
A simple helper that builds a graph based plan from a set of stacks .
25,709
def stack_template_key_name ( blueprint ) : name = blueprint . name return "stack_templates/%s/%s-%s.json" % ( blueprint . context . get_fqn ( name ) , name , blueprint . version )
Given a blueprint produce an appropriate key name .
25,710
def stack_template_url ( bucket_name , blueprint , endpoint ) : key_name = stack_template_key_name ( blueprint ) return "%s/%s/%s" % ( endpoint , bucket_name , key_name )
Produces an s3 url for a given blueprint .
25,711
def ensure_cfn_bucket ( self ) : if self . bucket_name : ensure_s3_bucket ( self . s3_conn , self . bucket_name , self . bucket_region )
The CloudFormation bucket where templates will be stored .
25,712
def s3_stack_push ( self , blueprint , force = False ) : key_name = stack_template_key_name ( blueprint ) template_url = self . stack_template_url ( blueprint ) try : template_exists = self . s3_conn . head_object ( Bucket = self . bucket_name , Key = key_name ) is not None except botocore . exceptions . ClientError as e : if e . response [ 'Error' ] [ 'Code' ] == '404' : template_exists = False else : raise if template_exists and not force : logger . debug ( "Cloudformation template %s already exists." , template_url ) return template_url self . s3_conn . put_object ( Bucket = self . bucket_name , Key = key_name , Body = blueprint . rendered , ServerSideEncryption = 'AES256' , ACL = 'bucket-owner-full-control' ) logger . debug ( "Blueprint %s pushed to %s." , blueprint . name , template_url ) return template_url
Pushes the rendered blueprint s template to S3 .
25,713
def _zip_files ( files , root ) : zip_data = StringIO ( ) with ZipFile ( zip_data , 'w' , ZIP_DEFLATED ) as zip_file : for fname in files : zip_file . write ( os . path . join ( root , fname ) , fname ) for zip_entry in zip_file . filelist : perms = ( zip_entry . external_attr & ZIP_PERMS_MASK ) >> 16 if perms & stat . S_IXUSR != 0 : new_perms = 0o755 else : new_perms = 0o644 if new_perms != perms : logger . debug ( "lambda: fixing perms: %s: %o => %o" , zip_entry . filename , perms , new_perms ) new_attr = ( ( zip_entry . external_attr & ~ ZIP_PERMS_MASK ) | ( new_perms << 16 ) ) zip_entry . external_attr = new_attr contents = zip_data . getvalue ( ) zip_data . close ( ) content_hash = _calculate_hash ( files , root ) return contents , content_hash
Generates a ZIP file in - memory from a list of files .
25,714
def _calculate_hash ( files , root ) : file_hash = hashlib . md5 ( ) for fname in sorted ( files ) : f = os . path . join ( root , fname ) file_hash . update ( ( fname + "\0" ) . encode ( ) ) with open ( f , "rb" ) as fd : for chunk in iter ( lambda : fd . read ( 4096 ) , "" ) : if not chunk : break file_hash . update ( chunk ) file_hash . update ( "\0" . encode ( ) ) return file_hash . hexdigest ( )
Returns a hash of all of the given files at the given root .
25,715
def _find_files ( root , includes , excludes , follow_symlinks ) : root = os . path . abspath ( root ) file_set = formic . FileSet ( directory = root , include = includes , exclude = excludes , symlinks = follow_symlinks , ) for filename in file_set . qualified_files ( absolute = False ) : yield filename
List files inside a directory based on include and exclude rules .
25,716
def _zip_from_file_patterns ( root , includes , excludes , follow_symlinks ) : logger . info ( 'lambda: base directory: %s' , root ) files = list ( _find_files ( root , includes , excludes , follow_symlinks ) ) if not files : raise RuntimeError ( 'Empty list of files for Lambda payload. Check ' 'your include/exclude options for errors.' ) logger . info ( 'lambda: adding %d files:' , len ( files ) ) for fname in files : logger . debug ( 'lambda: + %s' , fname ) return _zip_files ( files , root )
Generates a ZIP file in - memory from file search patterns .
25,717
def _head_object ( s3_conn , bucket , key ) : try : return s3_conn . head_object ( Bucket = bucket , Key = key ) except botocore . exceptions . ClientError as e : if e . response [ 'Error' ] [ 'Code' ] == '404' : return None else : raise
Retrieve information about an object in S3 if it exists .
25,718
def _upload_code ( s3_conn , bucket , prefix , name , contents , content_hash , payload_acl ) : logger . debug ( 'lambda: ZIP hash: %s' , content_hash ) key = '{}lambda-{}-{}.zip' . format ( prefix , name , content_hash ) if _head_object ( s3_conn , bucket , key ) : logger . info ( 'lambda: object %s already exists, not uploading' , key ) else : logger . info ( 'lambda: uploading object %s' , key ) s3_conn . put_object ( Bucket = bucket , Key = key , Body = contents , ContentType = 'application/zip' , ACL = payload_acl ) return Code ( S3Bucket = bucket , S3Key = key )
Upload a ZIP file to S3 for use by Lambda .
25,719
def _check_pattern_list ( patterns , key , default = None ) : if not patterns : return default if isinstance ( patterns , basestring ) : return [ patterns ] if isinstance ( patterns , list ) : if all ( isinstance ( p , basestring ) for p in patterns ) : return patterns raise ValueError ( "Invalid file patterns in key '{}': must be a string or " 'list of strings' . format ( key ) )
Validates file search patterns from user configuration .
25,720
def _upload_function ( s3_conn , bucket , prefix , name , options , follow_symlinks , payload_acl ) : try : root = os . path . expanduser ( options [ 'path' ] ) except KeyError as e : raise ValueError ( "missing required property '{}' in function '{}'" . format ( e . args [ 0 ] , name ) ) includes = _check_pattern_list ( options . get ( 'include' ) , 'include' , default = [ '**' ] ) excludes = _check_pattern_list ( options . get ( 'exclude' ) , 'exclude' , default = [ ] ) logger . debug ( 'lambda: processing function %s' , name ) if not os . path . isabs ( root ) : root = os . path . abspath ( os . path . join ( get_config_directory ( ) , root ) ) zip_contents , content_hash = _zip_from_file_patterns ( root , includes , excludes , follow_symlinks ) return _upload_code ( s3_conn , bucket , prefix , name , zip_contents , content_hash , payload_acl )
Builds a Lambda payload from user configuration and uploads it to S3 .
25,721
def select_bucket_region ( custom_bucket , hook_region , stacker_bucket_region , provider_region ) : region = None if custom_bucket : region = hook_region else : region = stacker_bucket_region return region or provider_region
Returns the appropriate region to use when uploading functions .
25,722
def upload_lambda_functions ( context , provider , ** kwargs ) : custom_bucket = kwargs . get ( 'bucket' ) if not custom_bucket : bucket_name = context . bucket_name logger . info ( "lambda: using default bucket from stacker: %s" , bucket_name ) else : bucket_name = custom_bucket logger . info ( "lambda: using custom bucket: %s" , bucket_name ) custom_bucket_region = kwargs . get ( "bucket_region" ) if not custom_bucket and custom_bucket_region : raise ValueError ( "Cannot specify `bucket_region` without specifying " "`bucket`." ) bucket_region = select_bucket_region ( custom_bucket , custom_bucket_region , context . config . stacker_bucket_region , provider . region ) follow_symlinks = kwargs . get ( 'follow_symlinks' , False ) if not isinstance ( follow_symlinks , bool ) : raise ValueError ( 'follow_symlinks option must be a boolean' ) payload_acl = kwargs . get ( 'payload_acl' , 'private' ) session = get_session ( bucket_region ) s3_client = session . client ( 's3' ) ensure_s3_bucket ( s3_client , bucket_name , bucket_region ) prefix = kwargs . get ( 'prefix' , '' ) results = { } for name , options in kwargs [ 'functions' ] . items ( ) : results [ name ] = _upload_function ( s3_client , bucket_name , prefix , name , options , follow_symlinks , payload_acl ) return results
Builds Lambda payloads from user configuration and uploads them to S3 .
25,723
def handle ( cls , value , ** kwargs ) : value = read_value_from_path ( value ) region = None if "@" in value : region , value = value . split ( "@" , 1 ) kms = get_session ( region ) . client ( 'kms' ) value = value . encode ( 'utf-8' ) decoded = codecs . decode ( value , 'base64' ) return kms . decrypt ( CiphertextBlob = decoded ) [ "Plaintext" ]
Decrypt the specified value with a master key in KMS .
25,724
def diff_dictionaries ( old_dict , new_dict ) : old_set = set ( old_dict ) new_set = set ( new_dict ) added_set = new_set - old_set removed_set = old_set - new_set common_set = old_set & new_set changes = 0 output = [ ] for key in added_set : changes += 1 output . append ( DictValue ( key , None , new_dict [ key ] ) ) for key in removed_set : changes += 1 output . append ( DictValue ( key , old_dict [ key ] , None ) ) for key in common_set : output . append ( DictValue ( key , old_dict [ key ] , new_dict [ key ] ) ) if str ( old_dict [ key ] ) != str ( new_dict [ key ] ) : changes += 1 output . sort ( key = attrgetter ( "key" ) ) return [ changes , output ]
Diffs two single dimension dictionaries
25,725
def diff_parameters ( old_params , new_params ) : [ changes , diff ] = diff_dictionaries ( old_params , new_params ) if changes == 0 : return [ ] return diff
Compares the old vs . new parameters and returns a diff
25,726
def normalize_json ( template ) : obj = parse_cloudformation_template ( template ) json_str = json . dumps ( obj , sort_keys = True , indent = 4 , default = str , separators = ( ',' , ': ' ) , ) result = [ ] lines = json_str . split ( "\n" ) for line in lines : result . append ( line + "\n" ) return result
Normalize our template for diffing .
25,727
def changes ( self ) : output = [ ] if self . status ( ) is self . UNMODIFIED : output = [ self . formatter % ( ' ' , self . key , self . old_value ) ] elif self . status ( ) is self . ADDED : output . append ( self . formatter % ( '+' , self . key , self . new_value ) ) elif self . status ( ) is self . REMOVED : output . append ( self . formatter % ( '-' , self . key , self . old_value ) ) elif self . status ( ) is self . MODIFIED : output . append ( self . formatter % ( '-' , self . key , self . old_value ) ) output . append ( self . formatter % ( '+' , self . key , self . new_value ) ) return output
Returns a list of changes to represent the diff between old and new value .
25,728
def _diff_stack ( self , stack , ** kwargs ) : if self . cancel . wait ( 0 ) : return INTERRUPTED if not build . should_submit ( stack ) : return NotSubmittedStatus ( ) if not build . should_update ( stack ) : return NotUpdatedStatus ( ) provider = self . build_provider ( stack ) provider_stack = provider . get_stack ( stack . fqn ) try : [ old_template , old_params ] = provider . get_stack_info ( provider_stack ) except exceptions . StackDoesNotExist : old_template = None old_params = { } stack . resolve ( self . context , provider ) parameters = self . build_parameters ( stack ) new_params = dict ( ) for p in parameters : new_params [ p [ 'ParameterKey' ] ] = p [ 'ParameterValue' ] new_template = stack . blueprint . rendered new_stack = normalize_json ( new_template ) output = [ "============== Stack: %s ==============" % ( stack . name , ) ] if not old_template : output . extend ( self . _build_new_template ( new_stack , parameters ) ) else : old_template = parse_cloudformation_template ( old_template ) if isinstance ( old_template , str ) : old_template = parse_cloudformation_template ( old_template ) old_stack = normalize_json ( json . dumps ( old_template , sort_keys = True , indent = 4 , default = str ) ) output . extend ( build_stack_changes ( stack . name , new_stack , old_stack , new_params , old_params ) ) ui . info ( '\n' + '\n' . join ( output ) ) stack . set_outputs ( provider . get_output_dict ( provider_stack ) ) return COMPLETE
Handles the diffing a stack in CloudFormation vs our config
25,729
def each_step ( graph ) : steps = graph . topological_sort ( ) steps . reverse ( ) for step in steps : deps = graph . downstream ( step . name ) yield ( step , deps )
Returns an iterator that yields each step and it s direct dependencies .
25,730
def dot_format ( out , graph , name = "digraph" ) : out . write ( "digraph %s {\n" % name ) for step , deps in each_step ( graph ) : for dep in deps : out . write ( " \"%s\" -> \"%s\";\n" % ( step , dep ) ) out . write ( "}\n" )
Outputs the graph using the graphviz dot format .
25,731
def json_format ( out , graph ) : steps = { } for step , deps in each_step ( graph ) : steps [ step . name ] = { } steps [ step . name ] [ "deps" ] = [ dep . name for dep in deps ] json . dump ( { "steps" : steps } , out , indent = 4 ) out . write ( "\n" )
Outputs the graph in a machine readable JSON format .
25,732
def run ( self , format = None , reduce = False , * args , ** kwargs ) : plan = self . _generate_plan ( ) if reduce : plan . graph . transitive_reduction ( ) fn = FORMATTERS [ format ] fn ( sys . stdout , plan . graph ) sys . stdout . flush ( )
Generates the underlying graph and prints it .
25,733
def get_cert_contents ( kwargs ) : paths = { "certificate" : kwargs . get ( "path_to_certificate" ) , "private_key" : kwargs . get ( "path_to_private_key" ) , "chain" : kwargs . get ( "path_to_chain" ) , } for key , value in paths . items ( ) : if value is not None : continue path = input ( "Path to %s (skip): " % ( key , ) ) if path == "skip" or not path . strip ( ) : continue paths [ key ] = path parameters = { "ServerCertificateName" : kwargs . get ( "cert_name" ) , } for key , path in paths . items ( ) : if not path : continue try : contents = path . read ( ) except AttributeError : with open ( utils . full_path ( path ) ) as read_file : contents = read_file . read ( ) if key == "certificate" : parameters [ "CertificateBody" ] = contents elif key == "private_key" : parameters [ "PrivateKey" ] = contents elif key == "chain" : parameters [ "CertificateChain" ] = contents return parameters
Builds parameters with server cert file contents .
25,734
def get_template_path ( filename ) : if os . path . isfile ( filename ) : return os . path . abspath ( filename ) for i in sys . path : if os . path . isfile ( os . path . join ( i , filename ) ) : return os . path . abspath ( os . path . join ( i , filename ) ) return None
Find raw template in working directory or in sys . path .
25,735
def create_clusters ( provider , context , ** kwargs ) : conn = get_session ( provider . region ) . client ( 'ecs' ) try : clusters = kwargs [ "clusters" ] except KeyError : logger . error ( "setup_clusters hook missing \"clusters\" argument" ) return False if isinstance ( clusters , basestring ) : clusters = [ clusters ] cluster_info = { } for cluster in clusters : logger . debug ( "Creating ECS cluster: %s" , cluster ) r = conn . create_cluster ( clusterName = cluster ) cluster_info [ r [ "cluster" ] [ "clusterName" ] ] = r return { "clusters" : cluster_info }
Creates ECS clusters .
25,736
def build_parameter ( name , properties ) : p = Parameter ( name , Type = properties . get ( "type" ) ) for name , attr in PARAMETER_PROPERTIES . items ( ) : if name in properties : setattr ( p , attr , properties [ name ] ) return p
Builds a troposphere Parameter with the given properties .
25,737
def validate_variable_type ( var_name , var_type , value ) : if isinstance ( var_type , CFNType ) : value = CFNParameter ( name = var_name , value = value ) elif isinstance ( var_type , TroposphereType ) : try : value = var_type . create ( value ) except Exception as exc : name = "{}.create" . format ( var_type . resource_name ) raise ValidatorError ( var_name , name , value , exc ) else : if not isinstance ( value , var_type ) : raise ValueError ( "Value for variable %s must be of type %s. Actual " "type: %s." % ( var_name , var_type , type ( value ) ) ) return value
Ensures the value is the correct variable type .
25,738
def validate_allowed_values ( allowed_values , value ) : if not allowed_values or isinstance ( value , CFNParameter ) : return True return value in allowed_values
Support a variable defining which values it allows .
25,739
def parse_user_data ( variables , raw_user_data , blueprint_name ) : variable_values = { } for key , value in variables . items ( ) : if type ( value ) is CFNParameter : variable_values [ key ] = value . to_parameter_value ( ) else : variable_values [ key ] = value template = string . Template ( raw_user_data ) res = "" try : res = template . substitute ( variable_values ) except ValueError as exp : raise InvalidUserdataPlaceholder ( blueprint_name , exp . args [ 0 ] ) except KeyError as key : raise MissingVariable ( blueprint_name , key ) return res
Parse the given user data and renders it as a template
25,740
def get_parameter_definitions ( self ) : output = { } for var_name , attrs in self . defined_variables ( ) . items ( ) : var_type = attrs . get ( "type" ) if isinstance ( var_type , CFNType ) : cfn_attrs = copy . deepcopy ( attrs ) cfn_attrs [ "type" ] = var_type . parameter_type output [ var_name ] = cfn_attrs return output
Get the parameter definitions to submit to CloudFormation .
25,741
def get_required_parameter_definitions ( self ) : required = { } for name , attrs in self . get_parameter_definitions ( ) . items ( ) : if "Default" not in attrs : required [ name ] = attrs return required
Returns all template parameters that do not have a default value .
25,742
def setup_parameters ( self ) : t = self . template parameters = self . get_parameter_definitions ( ) if not parameters : logger . debug ( "No parameters defined." ) return for name , attrs in parameters . items ( ) : p = build_parameter ( name , attrs ) t . add_parameter ( p )
Add any CloudFormation parameters to the template
25,743
def render_template ( self ) : self . import_mappings ( ) self . create_template ( ) if self . description : self . set_template_description ( self . description ) self . setup_parameters ( ) rendered = self . template . to_json ( indent = self . context . template_indent ) version = hashlib . md5 ( rendered . encode ( ) ) . hexdigest ( ) [ : 8 ] return ( version , rendered )
Render the Blueprint to a CloudFormation template
25,744
def to_json ( self , variables = None ) : variables_to_resolve = [ ] if variables : for key , value in variables . items ( ) : variables_to_resolve . append ( Variable ( key , value ) ) for k in self . get_parameter_definitions ( ) : if not variables or k not in variables : variables_to_resolve . append ( Variable ( k , 'unused_value' ) ) self . resolve_variables ( variables_to_resolve ) return self . render_template ( ) [ 1 ]
Render the blueprint and return the template in json form .
25,745
def read_user_data ( self , user_data_path ) : raw_user_data = read_value_from_path ( user_data_path ) variables = self . get_variables ( ) return parse_user_data ( variables , raw_user_data , self . name )
Reads and parses a user_data file .
25,746
def add_output ( self , name , value ) : self . template . add_output ( Output ( name , Value = value ) )
Simple helper for adding outputs .
25,747
def setup_logging ( verbosity , formats = None ) : if formats is None : formats = { } log_level = logging . INFO log_format = formats . get ( "info" , INFO_FORMAT ) if sys . stdout . isatty ( ) : log_format = formats . get ( "color" , COLOR_FORMAT ) if verbosity > 0 : log_level = logging . DEBUG log_format = formats . get ( "debug" , DEBUG_FORMAT ) if verbosity < 2 : logging . getLogger ( "botocore" ) . setLevel ( logging . CRITICAL ) hdlr = logging . StreamHandler ( ) hdlr . setFormatter ( ColorFormatter ( log_format , ISO_8601 ) ) logging . root . addHandler ( hdlr ) logging . root . setLevel ( log_level )
Configure a proper logger based on verbosity and optional log formats .
25,748
def _gather_variables ( stack_def ) : variable_values = copy . deepcopy ( stack_def . variables or { } ) return [ Variable ( k , v ) for k , v in variable_values . items ( ) ]
Merges context provided & stack defined variables .
25,749
def tags ( self ) : tags = self . definition . tags or { } return dict ( self . context . tags , ** tags )
Returns the tags that should be set on this stack . Includes both the global tags as well as any stack specific tags or overrides .
25,750
def resolve ( self , context , provider ) : resolve_variables ( self . variables , context , provider ) self . blueprint . resolve_variables ( self . variables )
Resolve the Stack variables .
25,751
def _parameterize_string ( raw ) : parts = [ ] s_index = 0 for match in _PARAMETER_PATTERN . finditer ( raw ) : parts . append ( raw [ s_index : match . start ( ) ] ) parts . append ( { u"Ref" : match . group ( 1 ) } ) s_index = match . end ( ) if not parts : return GenericHelperFn ( raw ) parts . append ( raw [ s_index : ] ) return GenericHelperFn ( { u"Fn::Join" : [ u"" , parts ] } )
Substitute placeholders in a string using CloudFormation references
25,752
def parameterized_codec ( raw , b64 ) : if isinstance ( raw , bytes ) : raw = raw . decode ( 'utf-8' ) result = _parameterize_string ( raw ) return Base64 ( result . data ) if b64 else result
Parameterize a string possibly encoding it as Base64 afterwards
25,753
def _parameterize_obj ( obj ) : if isinstance ( obj , Mapping ) : return dict ( ( key , _parameterize_obj ( value ) ) for key , value in obj . items ( ) ) elif isinstance ( obj , bytes ) : return _parameterize_string ( obj . decode ( 'utf8' ) ) elif isinstance ( obj , str ) : return _parameterize_string ( obj ) elif isinstance ( obj , Sequence ) : return list ( _parameterize_obj ( item ) for item in obj ) else : return obj
Recursively parameterize all strings contained in an object .
25,754
def handle ( cls , value , ** kwargs ) : try : codec , path = value . split ( ":" , 1 ) except ValueError : raise TypeError ( "File value must be of the format" " \"<codec>:<path>\" (got %s)" % ( value ) ) value = read_value_from_path ( path ) return CODECS [ codec ] ( value )
Translate a filename into the file contents .
25,755
def handle ( cls , value , ** kwargs ) : value = read_value_from_path ( value ) try : return os . environ [ value ] except KeyError : raise ValueError ( 'EnvVar "{}" does not exist' . format ( value ) )
Retrieve an environment variable .
25,756
def ask_for_approval ( full_changeset = None , params_diff = None , include_verbose = False ) : approval_options = [ 'y' , 'n' ] if include_verbose : approval_options . append ( 'v' ) approve = ui . ask ( "Execute the above changes? [{}] " . format ( '/' . join ( approval_options ) ) ) . lower ( ) if include_verbose and approve == "v" : if params_diff : logger . info ( "Full changeset:\n\n%s\n%s" , format_params_diff ( params_diff ) , yaml . safe_dump ( full_changeset ) , ) else : logger . info ( "Full changeset:\n%s" , yaml . safe_dump ( full_changeset ) , ) return ask_for_approval ( ) elif approve != "y" : raise exceptions . CancelExecution
Prompt the user for approval to execute a change set .
25,757
def output_summary ( fqn , action , changeset , params_diff , replacements_only = False ) : replacements = [ ] changes = [ ] for change in changeset : resource = change [ 'ResourceChange' ] replacement = resource . get ( 'Replacement' ) == 'True' summary = '- %s %s (%s)' % ( resource [ 'Action' ] , resource [ 'LogicalResourceId' ] , resource [ 'ResourceType' ] , ) if replacement : replacements . append ( summary ) else : changes . append ( summary ) summary = '' if params_diff : summary += summarize_params_diff ( params_diff ) if replacements : if not replacements_only : summary += 'Replacements:\n' summary += '\n' . join ( replacements ) if changes : if summary : summary += '\n' summary += 'Changes:\n%s' % ( '\n' . join ( changes ) ) logger . info ( '%s %s:\n%s' , fqn , action , summary )
Log a summary of the changeset .
25,758
def wait_till_change_set_complete ( cfn_client , change_set_id , try_count = 25 , sleep_time = .5 , max_sleep = 3 ) : complete = False response = None for i in range ( try_count ) : response = cfn_client . describe_change_set ( ChangeSetName = change_set_id , ) complete = response [ "Status" ] in ( "FAILED" , "CREATE_COMPLETE" ) if complete : break if sleep_time == max_sleep : logger . debug ( "Still waiting on changeset for another %s seconds" , sleep_time ) time . sleep ( sleep_time ) sleep_time = min ( sleep_time * 2 , max_sleep ) if not complete : raise exceptions . ChangesetDidNotStabilize ( change_set_id ) return response
Checks state of a changeset returning when it is in a complete state .
25,759
def check_tags_contain ( actual , expected ) : actual_set = set ( ( item [ "Key" ] , item [ "Value" ] ) for item in actual ) expected_set = set ( ( item [ "Key" ] , item [ "Value" ] ) for item in expected ) return actual_set >= expected_set
Check if a set of AWS resource tags is contained in another
25,760
def generate_cloudformation_args ( stack_name , parameters , tags , template , capabilities = DEFAULT_CAPABILITIES , change_set_type = None , service_role = None , stack_policy = None , change_set_name = None ) : args = { "StackName" : stack_name , "Parameters" : parameters , "Tags" : tags , "Capabilities" : capabilities , } if service_role : args [ "RoleARN" ] = service_role if change_set_name : args [ "ChangeSetName" ] = change_set_name if change_set_type : args [ "ChangeSetType" ] = change_set_type if template . url : args [ "TemplateURL" ] = template . url else : args [ "TemplateBody" ] = template . body if not change_set_name : args . update ( generate_stack_policy_args ( stack_policy ) ) return args
Used to generate the args for common cloudformation API interactions .
25,761
def generate_stack_policy_args ( stack_policy = None ) : args = { } if stack_policy : logger . debug ( "Stack has a stack policy" ) if stack_policy . url : raise NotImplementedError else : args [ "StackPolicyBody" ] = stack_policy . body return args
Converts a stack policy object into keyword args .
25,762
def build ( self , region = None , profile = None ) : with self . lock : key = "{}-{}" . format ( profile , region ) try : provider = self . providers [ key ] except KeyError : msg = "Missed memoized lookup ({}), creating new AWS Provider." logger . debug ( msg . format ( key ) ) if not region : region = self . region self . providers [ key ] = Provider ( get_session ( region = region , profile = profile ) , region = region , ** self . kwargs ) provider = self . providers [ key ] return provider
Get or create the provider for the given region and profile .
25,763
def get_rollback_status_reason ( self , stack_name ) : event = next ( ( item for item in self . get_events ( stack_name , False ) if item [ "ResourceStatus" ] == "UPDATE_ROLLBACK_IN_PROGRESS" ) , None ) if event : reason = event [ "ResourceStatusReason" ] return reason else : event = next ( ( item for item in self . get_events ( stack_name ) if item [ "ResourceStatus" ] == "ROLLBACK_IN_PROGRESS" ) , None ) reason = event [ "ResourceStatusReason" ] return reason
Process events and returns latest roll back reason
25,764
def create_stack ( self , fqn , template , parameters , tags , force_change_set = False , stack_policy = None , ** kwargs ) : logger . debug ( "Attempting to create stack %s:." , fqn ) logger . debug ( " parameters: %s" , parameters ) logger . debug ( " tags: %s" , tags ) if template . url : logger . debug ( " template_url: %s" , template . url ) else : logger . debug ( " no template url, uploading template " "directly." ) if force_change_set : logger . debug ( "force_change_set set to True, creating stack with " "changeset." ) _changes , change_set_id = create_change_set ( self . cloudformation , fqn , template , parameters , tags , 'CREATE' , service_role = self . service_role , ** kwargs ) self . cloudformation . execute_change_set ( ChangeSetName = change_set_id , ) else : args = generate_cloudformation_args ( fqn , parameters , tags , template , service_role = self . service_role , stack_policy = stack_policy , ) try : self . cloudformation . create_stack ( ** args ) except botocore . exceptions . ClientError as e : if e . response [ 'Error' ] [ 'Message' ] == ( 'TemplateURL must ' 'reference a valid S3 ' 'object to which you ' 'have access.' ) : s3_fallback ( fqn , template , parameters , tags , self . cloudformation . create_stack , self . service_role ) else : raise
Create a new Cloudformation stack .
25,765
def select_update_method ( self , force_interactive , force_change_set ) : if self . interactive or force_interactive : return self . interactive_update_stack elif force_change_set : return self . noninteractive_changeset_update else : return self . default_update_stack
Select the correct update method when updating a stack .
25,766
def prepare_stack_for_update ( self , stack , tags ) : if self . is_stack_destroyed ( stack ) : return False elif self . is_stack_completed ( stack ) : return True stack_name = self . get_stack_name ( stack ) stack_status = self . get_stack_status ( stack ) if self . is_stack_in_progress ( stack ) : raise exceptions . StackUpdateBadStatus ( stack_name , stack_status , 'Update already in-progress' ) if not self . is_stack_recreatable ( stack ) : raise exceptions . StackUpdateBadStatus ( stack_name , stack_status , 'Unsupported state for re-creation' ) if not self . recreate_failed : raise exceptions . StackUpdateBadStatus ( stack_name , stack_status , 'Stack re-creation is disabled. Run stacker again with the ' '--recreate-failed option to force it to be deleted and ' 'created from scratch.' ) stack_tags = self . get_stack_tags ( stack ) if not check_tags_contain ( stack_tags , tags ) : raise exceptions . StackUpdateBadStatus ( stack_name , stack_status , 'Tags differ from current configuration, possibly not created ' 'with stacker' ) if self . interactive : sys . stdout . write ( 'The \"%s\" stack is in a failed state (%s).\n' 'It cannot be updated, but it can be deleted and re-created.\n' 'All its current resources will IRREVERSIBLY DESTROYED.\n' 'Proceed carefully!\n\n' % ( stack_name , stack_status ) ) sys . stdout . flush ( ) ask_for_approval ( include_verbose = False ) logger . warn ( 'Destroying stack \"%s\" for re-creation' , stack_name ) self . destroy_stack ( stack ) return False
Prepare a stack for updating
25,767
def update_stack ( self , fqn , template , old_parameters , parameters , tags , force_interactive = False , force_change_set = False , stack_policy = None , ** kwargs ) : logger . debug ( "Attempting to update stack %s:" , fqn ) logger . debug ( " parameters: %s" , parameters ) logger . debug ( " tags: %s" , tags ) if template . url : logger . debug ( " template_url: %s" , template . url ) else : logger . debug ( " no template url, uploading template directly." ) update_method = self . select_update_method ( force_interactive , force_change_set ) return update_method ( fqn , template , old_parameters , parameters , stack_policy = stack_policy , tags = tags , ** kwargs )
Update a Cloudformation stack .
25,768
def deal_with_changeset_stack_policy ( self , fqn , stack_policy ) : if stack_policy : kwargs = generate_stack_policy_args ( stack_policy ) kwargs [ "StackName" ] = fqn logger . debug ( "Setting stack policy on %s." , fqn ) self . cloudformation . set_stack_policy ( ** kwargs )
Set a stack policy when using changesets .
25,769
def interactive_update_stack ( self , fqn , template , old_parameters , parameters , stack_policy , tags , ** kwargs ) : logger . debug ( "Using interactive provider mode for %s." , fqn ) changes , change_set_id = create_change_set ( self . cloudformation , fqn , template , parameters , tags , 'UPDATE' , service_role = self . service_role , ** kwargs ) old_parameters_as_dict = self . params_as_dict ( old_parameters ) new_parameters_as_dict = self . params_as_dict ( [ x if 'ParameterValue' in x else { 'ParameterKey' : x [ 'ParameterKey' ] , 'ParameterValue' : old_parameters_as_dict [ x [ 'ParameterKey' ] ] } for x in parameters ] ) params_diff = diff_parameters ( old_parameters_as_dict , new_parameters_as_dict ) action = "replacements" if self . replacements_only else "changes" full_changeset = changes if self . replacements_only : changes = requires_replacement ( changes ) if changes or params_diff : ui . lock ( ) try : output_summary ( fqn , action , changes , params_diff , replacements_only = self . replacements_only ) ask_for_approval ( full_changeset = full_changeset , params_diff = params_diff , include_verbose = True , ) finally : ui . unlock ( ) self . deal_with_changeset_stack_policy ( fqn , stack_policy ) self . cloudformation . execute_change_set ( ChangeSetName = change_set_id , )
Update a Cloudformation stack in interactive mode .
25,770
def noninteractive_changeset_update ( self , fqn , template , old_parameters , parameters , stack_policy , tags , ** kwargs ) : logger . debug ( "Using noninterative changeset provider mode " "for %s." , fqn ) _changes , change_set_id = create_change_set ( self . cloudformation , fqn , template , parameters , tags , 'UPDATE' , service_role = self . service_role , ** kwargs ) self . deal_with_changeset_stack_policy ( fqn , stack_policy ) self . cloudformation . execute_change_set ( ChangeSetName = change_set_id , )
Update a Cloudformation stack using a change set .
25,771
def default_update_stack ( self , fqn , template , old_parameters , parameters , tags , stack_policy = None , ** kwargs ) : logger . debug ( "Using default provider mode for %s." , fqn ) args = generate_cloudformation_args ( fqn , parameters , tags , template , service_role = self . service_role , stack_policy = stack_policy , ) try : self . cloudformation . update_stack ( ** args ) except botocore . exceptions . ClientError as e : if "No updates are to be performed." in str ( e ) : logger . debug ( "Stack %s did not change, not updating." , fqn , ) raise exceptions . StackDidNotChange elif e . response [ 'Error' ] [ 'Message' ] == ( 'TemplateURL must ' 'reference a valid ' 'S3 object to which ' 'you have access.' ) : s3_fallback ( fqn , template , parameters , tags , self . cloudformation . update_stack , self . service_role ) else : raise
Update a Cloudformation stack in default mode .
25,772
def get_stack_info ( self , stack ) : stack_name = stack [ 'StackId' ] try : template = self . cloudformation . get_template ( StackName = stack_name ) [ 'TemplateBody' ] except botocore . exceptions . ClientError as e : if "does not exist" not in str ( e ) : raise raise exceptions . StackDoesNotExist ( stack_name ) parameters = self . params_as_dict ( stack . get ( 'Parameters' , [ ] ) ) return [ json . dumps ( template ) , parameters ]
Get the template and parameters of the stack currently in AWS
25,773
def get_session ( region , profile = None ) : if profile is None : logger . debug ( "No AWS profile explicitly provided. " "Falling back to default." ) profile = default_profile logger . debug ( "Building session using profile \"%s\" in region \"%s\"" % ( profile , region ) ) session = boto3 . Session ( region_name = region , profile_name = profile ) c = session . _session . get_component ( 'credential_provider' ) provider = c . get_provider ( 'assume-role' ) provider . cache = credential_cache provider . _prompter = ui . getpass return session
Creates a boto3 session with a cache
25,774
def register_lookup_handler ( lookup_type , handler_or_path ) : handler = handler_or_path if isinstance ( handler_or_path , basestring ) : handler = load_object_from_string ( handler_or_path ) LOOKUP_HANDLERS [ lookup_type ] = handler if type ( handler ) != type : logger = logging . getLogger ( __name__ ) logger . warning ( "Registering lookup `%s`: Please upgrade to use the " "new style of Lookups." % lookup_type ) warnings . warn ( "Lookup `%s`: Please upgrade to use the new style of Lookups" "." % lookup_type , DeprecationWarning , stacklevel = 2 , )
Register a lookup handler .
25,775
def resolve_lookups ( variable , context , provider ) : resolved_lookups = { } for lookup in variable . lookups : try : handler = LOOKUP_HANDLERS [ lookup . type ] except KeyError : raise UnknownLookupType ( lookup ) try : resolved_lookups [ lookup ] = handler ( value = lookup . input , context = context , provider = provider , ) except Exception as e : raise FailedVariableLookup ( variable . name , lookup , e ) return resolved_lookups
Resolve a set of lookups .
25,776
def handle ( cls , value , ** kwargs ) : try : env_var_name , default_val = value . split ( "::" , 1 ) except ValueError : raise ValueError ( "Invalid value for default: %s. Must be in " "<env_var>::<default value> format." % value ) if env_var_name in kwargs [ 'context' ] . environment : return kwargs [ 'context' ] . environment [ env_var_name ] else : return default_val
Use a value from the environment or fall back to a default if the environment doesn t contain the variable .
25,777
def extract_lookups_from_string ( value ) : lookups = set ( ) for match in LOOKUP_REGEX . finditer ( value ) : groupdict = match . groupdict ( ) raw = match . groups ( ) [ 0 ] lookup_type = groupdict [ "type" ] lookup_input = groupdict [ "input" ] lookups . add ( Lookup ( lookup_type , lookup_input , raw ) ) return lookups
Extract any lookups within a string .
25,778
def extract_lookups ( value ) : lookups = set ( ) if isinstance ( value , basestring ) : lookups = lookups . union ( extract_lookups_from_string ( value ) ) elif isinstance ( value , list ) : for v in value : lookups = lookups . union ( extract_lookups ( v ) ) elif isinstance ( value , dict ) : for v in value . values ( ) : lookups = lookups . union ( extract_lookups ( v ) ) return lookups
Recursively extracts any stack lookups within the data structure .
25,779
def add_node ( self , node_name ) : graph = self . graph if node_name in graph : raise KeyError ( 'node %s already exists' % node_name ) graph [ node_name ] = set ( )
Add a node if it does not exist yet or error out .
25,780
def transpose ( self ) : graph = self . graph transposed = DAG ( ) for node , edges in graph . items ( ) : transposed . add_node ( node ) for node , edges in graph . items ( ) : for edge in edges : transposed . add_edge ( edge , node ) return transposed
Builds a new graph with the edges reversed .
25,781
def walk ( self , walk_func ) : nodes = self . topological_sort ( ) nodes . reverse ( ) for n in nodes : walk_func ( n )
Walks each node of the graph in reverse topological order . This can be used to perform a set of operations where the next operation depends on the previous operation . It s important to note that walking happens serially and is not paralellized .
25,782
def transitive_reduction ( self ) : combinations = [ ] for node , edges in self . graph . items ( ) : combinations += [ [ node , edge ] for edge in edges ] while True : new_combinations = [ ] for comb1 in combinations : for comb2 in combinations : if not comb1 [ - 1 ] == comb2 [ 0 ] : continue new_entry = comb1 + comb2 [ 1 : ] if new_entry not in combinations : new_combinations . append ( new_entry ) if not new_combinations : break combinations += new_combinations constructed = { ( c [ 0 ] , c [ - 1 ] ) for c in combinations if len ( c ) != 2 } for node , edges in self . graph . items ( ) : bad_nodes = { e for n , e in constructed if node == n } self . graph [ node ] = edges - bad_nodes
Performs a transitive reduction on the DAG . The transitive reduction of a graph is a graph with as few edges as possible with the same reachability as the original graph .
25,783
def rename_edges ( self , old_node_name , new_node_name ) : graph = self . graph for node , edges in graph . items ( ) : if node == old_node_name : graph [ new_node_name ] = copy ( edges ) del graph [ old_node_name ] else : if old_node_name in edges : edges . remove ( old_node_name ) edges . add ( new_node_name )
Change references to a node in existing edges .
25,784
def downstream ( self , node ) : graph = self . graph if node not in graph : raise KeyError ( 'node %s is not in graph' % node ) return list ( graph [ node ] )
Returns a list of all nodes this node has edges towards .
25,785
def filter ( self , nodes ) : filtered_dag = DAG ( ) for node in nodes : filtered_dag . add_node_if_not_exists ( node ) for edge in self . all_downstreams ( node ) : filtered_dag . add_node_if_not_exists ( edge ) for node , edges in self . graph . items ( ) : if node in filtered_dag . graph : filtered_dag . graph [ node ] = edges return filtered_dag
Returns a new DAG with only the given nodes and their dependencies .
25,786
def topological_sort ( self ) : graph = self . graph in_degree = { } for u in graph : in_degree [ u ] = 0 for u in graph : for v in graph [ u ] : in_degree [ v ] += 1 queue = deque ( ) for u in in_degree : if in_degree [ u ] == 0 : queue . appendleft ( u ) sorted_graph = [ ] while queue : u = queue . pop ( ) sorted_graph . append ( u ) for v in sorted ( graph [ u ] ) : in_degree [ v ] -= 1 if in_degree [ v ] == 0 : queue . appendleft ( v ) if len ( sorted_graph ) == len ( graph ) : return sorted_graph else : raise ValueError ( 'graph is not acyclic' )
Returns a topological ordering of the DAG .
25,787
def walk ( self , dag , walk_func ) : nodes = dag . topological_sort ( ) nodes . reverse ( ) threads = { } def wait_for ( nodes ) : for node in nodes : thread = threads [ node ] while thread . is_alive ( ) : threads [ node ] . join ( 0.5 ) for node in nodes : def fn ( n , deps ) : if deps : logger . debug ( "%s waiting for %s to complete" , n , ", " . join ( deps ) ) wait_for ( deps ) logger . debug ( "%s starting" , n ) self . semaphore . acquire ( ) try : return walk_func ( n ) finally : self . semaphore . release ( ) deps = dag . all_downstreams ( node ) threads [ node ] = Thread ( target = fn , args = ( node , deps ) , name = node ) for node in nodes : threads [ node ] . start ( ) wait_for ( nodes )
Walks each node of the graph in parallel if it can . The walk_func is only called when the nodes dependencies have been satisfied
25,788
def cf_tokenize ( s ) : t = [ ] parts = split_re . split ( s ) for part in parts : cf_func = replace_re . search ( part ) if cf_func : args = [ a . strip ( "'\" " ) for a in cf_func . group ( "args" ) . split ( "," ) ] t . append ( HELPERS [ cf_func . group ( "helper" ) ] ( * args ) . data ) else : t . append ( part ) return t
Parses UserData for Cloudformation helper functions .
25,789
def handle ( cls , value , ** kwargs ) : try : delimiter , text = value . split ( "::" , 1 ) except ValueError : raise ValueError ( "Invalid value for split: %s. Must be in " "<delimiter>::<text> format." % value ) return text . split ( delimiter )
Split the supplied string on the given delimiter providing a list .
25,790
def should_update ( stack ) : if stack . locked : if not stack . force : logger . debug ( "Stack %s locked and not in --force list. " "Refusing to update." , stack . name ) return False else : logger . debug ( "Stack %s locked, but is in --force " "list." , stack . name ) return True
Tests whether a stack should be submitted for updates to CF .
25,791
def _resolve_parameters ( parameters , blueprint ) : params = { } param_defs = blueprint . get_parameter_definitions ( ) for key , value in parameters . items ( ) : if key not in param_defs : logger . debug ( "Blueprint %s does not use parameter %s." , blueprint . name , key ) continue if value is None : logger . debug ( "Got None value for parameter %s, not submitting it " "to cloudformation, default value should be used." , key ) continue if isinstance ( value , bool ) : logger . debug ( "Converting parameter %s boolean \"%s\" to string." , key , value ) value = str ( value ) . lower ( ) params [ key ] = value return params
Resolves CloudFormation Parameters for a given blueprint .
25,792
def _handle_missing_parameters ( parameter_values , all_params , required_params , existing_stack = None ) : missing_params = list ( set ( all_params ) - set ( parameter_values . keys ( ) ) ) if existing_stack and 'Parameters' in existing_stack : stack_parameters = [ p [ "ParameterKey" ] for p in existing_stack [ "Parameters" ] ] for p in missing_params : if p in stack_parameters : logger . debug ( "Using previous value for parameter %s from existing " "stack" , p ) parameter_values [ p ] = UsePreviousParameterValue final_missing = list ( set ( required_params ) - set ( parameter_values . keys ( ) ) ) if final_missing : raise MissingParameterException ( final_missing ) return list ( parameter_values . items ( ) )
Handles any missing parameters .
25,793
def build_parameters ( self , stack , provider_stack = None ) : resolved = _resolve_parameters ( stack . parameter_values , stack . blueprint ) required_parameters = list ( stack . required_parameter_definitions ) all_parameters = list ( stack . all_parameter_definitions ) parameters = _handle_missing_parameters ( resolved , all_parameters , required_parameters , provider_stack ) param_list = [ ] for key , value in parameters : param_dict = { "ParameterKey" : key } if value is UsePreviousParameterValue : param_dict [ "UsePreviousValue" ] = True else : param_dict [ "ParameterValue" ] = str ( value ) param_list . append ( param_dict ) return param_list
Builds the CloudFormation Parameters for our stack .
25,794
def _template ( self , blueprint ) : if self . bucket_name : return Template ( url = self . s3_stack_push ( blueprint ) ) else : return Template ( body = blueprint . rendered )
Generates a suitable template based on whether or not an S3 bucket is set .
25,795
def create_domain ( provider , context , ** kwargs ) : session = get_session ( provider . region ) client = session . client ( "route53" ) domain = kwargs . get ( "domain" ) if not domain : logger . error ( "domain argument or BaseDomain variable not provided." ) return False zone_id = create_route53_zone ( client , domain ) return { "domain" : domain , "zone_id" : zone_id }
Create a domain within route53 .
25,796
def info ( self , * args , ** kwargs ) : self . lock ( ) try : return logger . info ( * args , ** kwargs ) finally : self . unlock ( )
Logs the line of the current thread owns the underlying lock or blocks .
25,797
def camel_to_snake ( name ) : s1 = re . sub ( "(.)([A-Z][a-z]+)" , r"\1_\2" , name ) return re . sub ( "([a-z0-9])([A-Z])" , r"\1_\2" , s1 ) . lower ( )
Converts CamelCase to snake_case .
25,798
def get_hosted_zone_by_name ( client , zone_name ) : p = client . get_paginator ( "list_hosted_zones" ) for i in p . paginate ( ) : for zone in i [ "HostedZones" ] : if zone [ "Name" ] == zone_name : return parse_zone_id ( zone [ "Id" ] ) return None
Get the zone id of an existing zone by name .
25,799
def get_or_create_hosted_zone ( client , zone_name ) : zone_id = get_hosted_zone_by_name ( client , zone_name ) if zone_id : return zone_id logger . debug ( "Zone %s does not exist, creating." , zone_name ) reference = uuid . uuid4 ( ) . hex response = client . create_hosted_zone ( Name = zone_name , CallerReference = reference ) return parse_zone_id ( response [ "HostedZone" ] [ "Id" ] )
Get the Id of an existing zone or create it .