docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Populates the Table with a list of tuples of strings.
Args:
matrix (list): list of iterables of strings (lists or something else).
Items in the matrix have to correspond to a key for the children.
|
def define_grid(self, matrix):
self.style['grid-template-areas'] = ''.join("'%s'"%(' '.join(x)) for x in matrix)
| 170,649
|
Sets the size value for each column
Args:
values (iterable of int or str): values are treated as percentage.
|
def set_column_sizes(self, values):
self.style['grid-template-columns'] = ' '.join(map(lambda value: (str(value) if str(value).endswith('%') else str(value) + '%') , values))
| 170,651
|
Sets the gap value between columns
Args:
value (int or str): gap value (i.e. 10 or "10px")
|
def set_column_gap(self, value):
value = str(value) + 'px'
value = value.replace('pxpx', 'px')
self.style['grid-column-gap'] = value
| 170,652
|
Sets the gap value between rows
Args:
value (int or str): gap value (i.e. 10 or "10px")
|
def set_row_gap(self, value):
value = str(value) + 'px'
value = value.replace('pxpx', 'px')
self.style['grid-row-gap'] = value
| 170,653
|
It allows to add child widgets to this.
The key allows to access the specific child in this way widget.children[key].
The key have to be numeric and determines the children order in the layout.
Args:
value (Widget): Child instance to be appended.
key (str): Unique identifier for the child. If key.isdigit()==True '0' '1'.. the value determines the order
in the layout
|
def append(self, value, key=''):
if type(value) in (list, tuple, dict):
if type(value)==dict:
for k in value.keys():
self.append(value[k], k)
return value.keys()
keys = []
for child in value:
keys.append( self.append(child) )
return keys
key = str(key)
if not isinstance(value, Widget):
raise ValueError('value should be a Widget (otherwise use add_child(key,other)')
if 'left' in value.style.keys():
del value.style['left']
if 'right' in value.style.keys():
del value.style['right']
if not 'order' in value.style.keys():
value.style.update({'position':'static', 'order':'-1'})
if key.isdigit():
value.style['order'] = key
key = value.identifier if key == '' else key
self.add_child(key, value)
return key
| 170,655
|
Sets the text content.
Args:
text (str): The string content that have to be appended as standard child identified by the key 'text'
|
def set_value(self, text):
if self.single_line:
text = text.replace('\n', '')
self.set_text(text)
| 170,664
|
Called when the user changes the TextInput content.
With single_line=True it fires in case of focus lost and Enter key pressed.
With single_line=False it fires at each key released.
Args:
new_value (str): the new string content of the TextInput.
|
def onchange(self, new_value):
self.disable_refresh()
self.set_value(new_value)
self.enable_refresh()
return (new_value, )
| 170,665
|
Adds a field to the dialog together with a descriptive label and a unique identifier.
Note: You can access to the fields content calling the function GenericDialog.get_field(key).
Args:
key (str): The unique identifier for the field.
label_description (str): The string content of the description label.
field (Widget): The instance of the field Widget. It can be for example a TextInput or maybe
a custom widget.
|
def add_field_with_label(self, key, label_description, field):
self.inputs[key] = field
label = Label(label_description)
label.style['margin'] = '0px 5px'
label.style['min-width'] = '30%'
container = HBox()
container.style.update({'justify-content':'space-between', 'overflow':'auto', 'padding':'3px'})
container.append(label, key='lbl' + key)
container.append(self.inputs[key], key=key)
self.container.append(container, key=key)
| 170,668
|
Adds a field to the dialog with a unique identifier.
Note: You can access to the fields content calling the function GenericDialog.get_field(key).
Args:
key (str): The unique identifier for the field.
field (Widget): The widget to be added to the dialog, TextInput or any Widget for example.
|
def add_field(self, key, field):
self.inputs[key] = field
container = HBox()
container.style.update({'justify-content':'space-between', 'overflow':'auto', 'padding':'3px'})
container.append(self.inputs[key], key=key)
self.container.append(container, key=key)
| 170,669
|
Populates the ListView with a string list.
Args:
items (list): list of strings to fill the widget with.
|
def new_from_list(cls, items, **kwargs):
obj = cls(**kwargs)
for item in items:
obj.append(ListItem(item))
return obj
| 170,674
|
Appends child items to the ListView. The items are accessible by list.children[key].
Args:
value (ListItem, or iterable of ListItems): The child to be appended. In case of a dictionary,
each item's key is used as 'key' param for the single append.
key (str): The unique string identifier for the child. Ignored in case of iterable 'value'
param.
|
def append(self, value, key=''):
if isinstance(value, type('')) or isinstance(value, type(u'')):
value = ListItem(value)
keys = super(ListView, self).append(value, key=key)
if type(value) in (list, tuple, dict):
for k in keys:
if not self.EVENT_ONCLICK in self.children[k].attributes:
self.children[k].onclick.connect(self.onselection)
self.children[k].attributes['selected'] = False
else:
# if an event listener is already set for the added item, it will not generate a selection event
if not self.EVENT_ONCLICK in value.attributes:
value.onclick.connect(self.onselection)
value.attributes['selected'] = False
return keys
| 170,675
|
Selects an item by its key.
Args:
key (str): The unique string identifier of the item that have to be selected.
|
def select_by_key(self, key):
self._selected_key = None
self._selected_item = None
for item in self.children.values():
item.attributes['selected'] = False
if key in self.children:
self.children[key].attributes['selected'] = True
self._selected_key = key
self._selected_item = self.children[key]
| 170,678
|
Selects an item by the text content of the child.
Args:
value (str): Text content of the item that have to be selected.
|
def select_by_value(self, value):
self._selected_key = None
self._selected_item = None
for k in self.children:
item = self.children[k]
item.attributes['selected'] = False
if value == item.get_value():
self._selected_key = k
self._selected_item = item
self._selected_item.attributes['selected'] = True
| 170,679
|
Selects an item by its unique string identifier.
Args:
key (str): Unique string identifier of the DropDownItem that have to be selected.
|
def select_by_key(self, key):
for item in self.children.values():
if 'selected' in item.attributes:
del item.attributes['selected']
self.children[key].attributes['selected'] = 'selected'
self._selected_key = key
self._selected_item = self.children[key]
| 170,685
|
Selects a DropDownItem by means of the contained text-
Args:
value (str): Textual content of the DropDownItem that have to be selected.
|
def select_by_value(self, value):
self._selected_key = None
self._selected_item = None
for k in self.children:
item = self.children[k]
if item.get_text() == value:
item.attributes['selected'] = 'selected'
self._selected_key = k
self._selected_item = item
else:
if 'selected' in item.attributes:
del item.attributes['selected']
| 170,686
|
Populates the Table with a list of tuples of strings.
Args:
content (list): list of tuples of strings. Each tuple is a row.
fill_title (bool): if true, the first tuple in the list will
be set as title
|
def new_from_list(cls, content, fill_title=True, **kwargs):
obj = cls(**kwargs)
obj.append_from_list(content, fill_title)
return obj
| 170,690
|
Appends rows created from the data contained in the provided
list of tuples of strings. The first tuple of the list can be
set as table title.
Args:
content (list): list of tuples of strings. Each tuple is a row.
fill_title (bool): if true, the first tuple in the list will
be set as title.
|
def append_from_list(self, content, fill_title=False):
row_index = 0
for row in content:
tr = TableRow()
column_index = 0
for item in row:
if row_index == 0 and fill_title:
ti = TableTitle(item)
else:
ti = TableItem(item)
tr.append(ti, str(column_index))
column_index = column_index + 1
self.append(tr, str(row_index))
row_index = row_index + 1
| 170,691
|
Returns the TableItem instance at row, column cordinates
Args:
row (int): zero based index
column (int): zero based index
|
def item_at(self, row, column):
return self.children[str(row)].children[str(column)]
| 170,694
|
Returns table_item's (row, column) cordinates.
Returns None in case of item not found.
Args:
table_item (TableItem): an item instance
|
def item_coords(self, table_item):
for row_key in self.children.keys():
for item_key in self.children[row_key].children.keys():
if self.children[row_key].children[item_key] == table_item:
return (int(row_key), int(item_key))
return None
| 170,695
|
Sets the table row count.
Args:
count (int): number of rows
|
def set_row_count(self, count):
current_row_count = self.row_count()
current_column_count = self.column_count()
if count > current_row_count:
cl = TableEditableItem if self._editable else TableItem
for i in range(current_row_count, count):
tr = TableRow()
for c in range(0, current_column_count):
tr.append(cl(), str(c))
if self._editable:
tr.children[str(c)].onchange.connect(
self.on_item_changed, int(i), int(c))
self.append(tr, str(i))
self._update_first_row()
elif count < current_row_count:
for i in range(count, current_row_count):
self.remove_child(self.children[str(i)])
| 170,696
|
Sets the table column count.
Args:
count (int): column of rows
|
def set_column_count(self, count):
current_row_count = self.row_count()
current_column_count = self.column_count()
if count > current_column_count:
cl = TableEditableItem if self._editable else TableItem
for r_key in self.children.keys():
row = self.children[r_key]
for i in range(current_column_count, count):
row.append(cl(), str(i))
if self._editable:
row.children[str(i)].onchange.connect(
self.on_item_changed, int(r_key), int(i))
self._update_first_row()
elif count < current_column_count:
for row in self.children.values():
for i in range(count, current_column_count):
row.remove_child(row.children[str(i)])
self._column_count = count
| 170,697
|
Event for the item change.
Args:
emitter (TableWidget): The emitter of the event.
item (TableItem): The TableItem instance.
new_value (str): New text content.
row (int): row index.
column (int): column index.
|
def on_item_changed(self, item, new_value, row, column):
return (item, new_value, row, column)
| 170,698
|
Sets the origin and size of the viewbox, describing a virtual view area.
Args:
x (int): x coordinate of the viewbox origin
y (int): y coordinate of the viewbox origin
w (int): width of the viewbox
h (int): height of the viewbox
|
def set_viewbox(self, x, y, w, h):
self.attributes['viewBox'] = "%s %s %s %s" % (x, y, w, h)
self.attributes['preserveAspectRatio'] = 'none'
| 170,737
|
Sets the shape position.
Args:
x (int): the x coordinate
y (int): the y coordinate
|
def set_position(self, x, y):
self.attributes['x'] = str(x)
self.attributes['y'] = str(y)
| 170,739
|
Sets the rectangle size.
Args:
w (int): width of the rectangle
h (int): height of the rectangle
|
def set_size(self, w, h):
self.attributes['width'] = str(w)
self.attributes['height'] = str(h)
| 170,742
|
Make the columns in df categorical
Parameters:
-----------
categories: dict
Of the form {str: list},
where the key the column name and the value is
the ordered category list
|
def _ordered_categories(df, categories):
for col, cats in categories.items():
df[col] = df[col].astype(CategoricalDtype(cats, ordered=True))
return df
| 171,324
|
Given a list of variables, resolve all of them.
Args:
variables (list of :class:`stacker.variables.Variable`): list of
variables
context (:class:`stacker.context.Context`): stacker context
provider (:class:`stacker.provider.base.BaseProvider`): subclass of the
base provider
|
def resolve_variables(variables, context, provider):
for variable in variables:
variable.resolve(context, provider)
| 171,424
|
Recursively resolve any lookups with the Variable.
Args:
context (:class:`stacker.context.Context`): Current context for
building the stack
provider (:class:`stacker.provider.base.BaseProvider`): subclass of
the base provider
|
def resolve(self, context, provider):
try:
self._value.resolve(context, provider)
except FailedLookup as e:
raise FailedVariableLookup(self.name, e.lookup, e.error)
| 171,427
|
Builds a graph of steps.
Args:
steps (list): a list of :class:`Step` objects to execute.
|
def build_graph(steps):
graph = Graph()
for step in steps:
graph.add_step(step)
for step in steps:
for dep in step.requires:
graph.connect(step.name, dep)
for parent in step.required_by:
graph.connect(parent, step.name)
return graph
| 171,445
|
Sets the current step's status.
Args:
status (:class:`Status <Status>` object): The status to set the
step to.
|
def set_status(self, status):
if status is not self.status:
logger.debug("Setting %s state to %s.", self.stack.name,
status.name)
self.status = status
self.last_updated = time.time()
if self.stack.logging:
log_step(self)
| 171,449
|
Print an outline of the actions the plan is going to take.
The outline will represent the rough ordering of the steps that will be
taken.
Args:
level (int, optional): a valid log level that should be used to log
the outline
message (str, optional): a message that will be logged to
the user after the outline has been logged.
|
def outline(self, level=logging.INFO, message=""):
steps = 1
logger.log(level, "Plan \"%s\":", self.description)
for step in self.steps:
logger.log(
level,
" - step: %s: target: \"%s\", action: \"%s\"",
steps,
step.name,
step.fn.__name__,
)
steps += 1
if message:
logger.log(level, message)
| 171,459
|
Walks each step in the underlying graph, in topological order.
Args:
walker (func): a walker function to be passed to
:class:`stacker.dag.DAG` to walk the graph.
|
def walk(self, walker):
def walk_func(step):
# Before we execute the step, we need to ensure that it's
# transitive dependencies are all in an "ok" state. If not, we
# won't execute this step.
for dep in self.graph.downstream(step.name):
if not dep.ok:
step.set_status(FailedStatus("dependency has failed"))
return step.ok
return step.run()
return self.graph.walk(walker, walk_func)
| 171,462
|
Create the troposphere type from the value.
Args:
value (Union[dict, list]): A dictionary or list of dictionaries
(see class documentation for details) to use as parameters to
create the Troposphere type instance.
Each dictionary will be passed to the `from_dict` method of the
type.
Returns:
Union[list, type]: Returns the value converted to the troposphere
type
|
def create(self, value):
# Explicitly check with len such that non-sequence types throw.
if self._optional and (value is None or len(value) == 0):
return None
if hasattr(self._type, 'resource_type'):
# Our type is a resource, so ensure we have a dict of title to
# parameters
if not isinstance(value, dict):
raise ValueError("Resources must be specified as a dict of "
"title to parameters")
if not self._many and len(value) > 1:
raise ValueError("Only one resource can be provided for this "
"TroposphereType variable")
result = [
self._type.from_dict(title, v) for title, v in value.items()
]
else:
# Our type is for properties, not a resource, so don't use
# titles
if self._many:
result = [self._type.from_dict(None, v) for v in value]
elif not isinstance(value, dict):
raise ValueError("TroposphereType for a single non-resource"
"type must be specified as a dict of "
"parameters")
else:
result = [self._type.from_dict(None, value)]
if self._validate:
for v in result:
v._validate_props()
return result[0] if not self._many else result
| 171,464
|
Given cleaned up keys, this will return a projection expression for
the dynamodb lookup.
Args:
clean_table_keys (dict): keys without the data types attached
Returns:
str: A projection expression for the dynamodb lookup.
|
def _build_projection_expression(clean_table_keys):
projection_expression = ''
for key in clean_table_keys[:-1]:
projection_expression += ('{},').format(key)
projection_expression += clean_table_keys[-1]
return projection_expression
| 171,468
|
Given a dictionary of dynamodb data (including the datatypes) and a
properly structured keylist, it will return the value of the lookup
Args:
data (dict): the raw dynamodb data
keylist(list): a list of keys to lookup. This must include the
datatype
Returns:
various: It returns the value from the dynamodb record, and casts it
to a matching python datatype
|
def _get_val_from_ddb_data(data, keylist):
next_type = None
# iterate through the keylist to find the matching key/datatype
for k in keylist:
for k1 in k:
if next_type is None:
data = data[k[k1]]
else:
temp_dict = data[next_type]
data = temp_dict[k[k1]]
next_type = k1
if next_type == 'L':
# if type is list, convert it to a list and return
return _convert_ddb_list_to_list(data[next_type])
if next_type == 'N':
# TODO: handle various types of 'number' datatypes, (e.g. int, double)
# if a number, convert to an int and return
return int(data[next_type])
# else, just assume its a string and return
return str(data[next_type])
| 171,469
|
Given a dynamodb list, it will return a python list without the dynamodb
datatypes
Args:
conversion_list (dict): a dynamodb list which includes the
datatypes
Returns:
list: Returns a sanitized list without the dynamodb datatypes
|
def _convert_ddb_list_to_list(conversion_list):
ret_list = []
for v in conversion_list:
for v1 in v:
ret_list.append(v[v1])
return ret_list
| 171,470
|
Given a blueprint, produce an appropriate key name.
Args:
blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint
object to create the key from.
Returns:
string: Key name resulting from blueprint.
|
def stack_template_key_name(blueprint):
name = blueprint.name
return "stack_templates/%s/%s-%s.json" % (blueprint.context.get_fqn(name),
name,
blueprint.version)
| 171,474
|
Produces an s3 url for a given blueprint.
Args:
bucket_name (string): The name of the S3 bucket where the resulting
templates are stored.
blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint
object to create the URL to.
endpoint (string): The s3 endpoint used for the bucket.
Returns:
string: S3 URL.
|
def stack_template_url(bucket_name, blueprint, endpoint):
key_name = stack_template_key_name(blueprint)
return "%s/%s/%s" % (endpoint, bucket_name, key_name)
| 171,475
|
Returns a hash of all of the given files at the given root.
Args:
files (list[str]): file names to include in the hash calculation,
relative to ``root``.
root (str): base directory to analyze files in.
Returns:
str: A hash of the hashes of the given files.
|
def _calculate_hash(files, root):
file_hash = hashlib.md5()
for fname in sorted(files):
f = os.path.join(root, fname)
file_hash.update((fname + "\0").encode())
with open(f, "rb") as fd:
for chunk in iter(lambda: fd.read(4096), ""):
if not chunk:
break
file_hash.update(chunk)
file_hash.update("\0".encode())
return file_hash.hexdigest()
| 171,484
|
Diffs two single dimension dictionaries
Returns the number of changes and an unordered list
expressing the common entries and changes.
Args:
old_dict(dict): old dictionary
new_dict(dict): new dictionary
Returns: list()
int: number of changed records
list: [DictValue]
|
def diff_dictionaries(old_dict, new_dict):
old_set = set(old_dict)
new_set = set(new_dict)
added_set = new_set - old_set
removed_set = old_set - new_set
common_set = old_set & new_set
changes = 0
output = []
for key in added_set:
changes += 1
output.append(DictValue(key, None, new_dict[key]))
for key in removed_set:
changes += 1
output.append(DictValue(key, old_dict[key], None))
for key in common_set:
output.append(DictValue(key, old_dict[key], new_dict[key]))
if str(old_dict[key]) != str(new_dict[key]):
changes += 1
output.sort(key=attrgetter("key"))
return [changes, output]
| 171,500
|
Handles the formatting of differences in parameters.
Args:
parameter_diff (list): A list of DictValues detailing the
differences between two dicts returned by
:func:`stacker.actions.diff.diff_dictionaries`
Returns:
string: A formatted string that represents a parameter diff
|
def format_params_diff(parameter_diff):
params_output = '\n'.join([line for v in parameter_diff
for line in v.changes()])
return % params_output
| 171,501
|
Compares the old vs. new parameters and returns a "diff"
If there are no changes, we return an empty list.
Args:
old_params(dict): old paramters
new_params(dict): new parameters
Returns:
list: A list of differences
|
def diff_parameters(old_params, new_params):
[changes, diff] = diff_dictionaries(old_params, new_params)
if changes == 0:
return []
return diff
| 171,502
|
Normalize our template for diffing.
Args:
template(str): string representing the template
Returns:
list: json representation of the parameters
|
def normalize_json(template):
obj = parse_cloudformation_template(template)
json_str = json.dumps(
obj, sort_keys=True, indent=4, default=str, separators=(',', ': '),
)
result = []
lines = json_str.split("\n")
for line in lines:
result.append(line + "\n")
return result
| 171,503
|
Used to create the ecsServieRole, which has to be named exactly that
currently, so cannot be created via CloudFormation. See:
http://docs.aws.amazon.com/AmazonECS/latest/developerguide/IAM_policies.html#service_IAM_role
Args:
provider (:class:`stacker.providers.base.BaseProvider`): provider
instance
context (:class:`stacker.context.Context`): context instance
Returns: boolean for whether or not the hook succeeded.
|
def create_ecs_service_role(provider, context, **kwargs):
role_name = kwargs.get("role_name", "ecsServiceRole")
client = get_session(provider.region).client('iam')
try:
client.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json()
)
except ClientError as e:
if "already exists" in str(e):
pass
else:
raise
policy = Policy(
Statement=[
Statement(
Effect=Allow,
Resource=["*"],
Action=[ecs.CreateCluster, ecs.DeregisterContainerInstance,
ecs.DiscoverPollEndpoint, ecs.Poll,
ecs.Action("Submit*")]
)
])
client.put_role_policy(
RoleName=role_name,
PolicyName="AmazonEC2ContainerServiceRolePolicy",
PolicyDocument=policy.to_json()
)
return True
| 171,516
|
Builds parameters with server cert file contents.
Args:
kwargs(dict): The keyword args passed to ensure_server_cert_exists,
optionally containing the paths to the cert, key and chain files.
Returns:
dict: A dictionary containing the appropriate parameters to supply to
upload_server_certificate. An empty dictionary if there is a
problem.
|
def get_cert_contents(kwargs):
paths = {
"certificate": kwargs.get("path_to_certificate"),
"private_key": kwargs.get("path_to_private_key"),
"chain": kwargs.get("path_to_chain"),
}
for key, value in paths.items():
if value is not None:
continue
path = input("Path to %s (skip): " % (key,))
if path == "skip" or not path.strip():
continue
paths[key] = path
parameters = {
"ServerCertificateName": kwargs.get("cert_name"),
}
for key, path in paths.items():
if not path:
continue
# Allow passing of file like object for tests
try:
contents = path.read()
except AttributeError:
with open(utils.full_path(path)) as read_file:
contents = read_file.read()
if key == "certificate":
parameters["CertificateBody"] = contents
elif key == "private_key":
parameters["PrivateKey"] = contents
elif key == "chain":
parameters["CertificateChain"] = contents
return parameters
| 171,518
|
Find raw template in working directory or in sys.path.
template_path from config may refer to templates colocated with the Stacker
config, or files in remote package_sources. Here, we emulate python module
loading to find the path to the template.
Args:
filename (str): Template filename.
Returns:
Optional[str]: Path to file, or None if no file found
|
def get_template_path(filename):
if os.path.isfile(filename):
return os.path.abspath(filename)
for i in sys.path:
if os.path.isfile(os.path.join(i, filename)):
return os.path.abspath(os.path.join(i, filename))
return None
| 171,520
|
Resolve the values of the blueprint variables.
This will resolve the values of the template parameters with values
from the env file, the config, and any lookups resolved. The
resolution is run twice, in case the blueprint is jinja2 templated
and requires provided variables to render.
Args:
provided_variables (list of :class:`stacker.variables.Variable`):
list of provided variables
|
def resolve_variables(self, provided_variables):
# Pass 1 to set resolved_variables to provided variables
self.resolved_variables = {}
variable_dict = dict((var.name, var) for var in provided_variables)
for var_name, _var_def in variable_dict.items():
value = resolve_variable(
variable_dict.get(var_name),
self.name
)
if value is not None:
self.resolved_variables[var_name] = value
# Pass 2 to render the blueprint and set resolved_variables according
# to defined variables
defined_variables = self.get_parameter_definitions()
self.resolved_variables = {}
variable_dict = dict((var.name, var) for var in provided_variables)
for var_name, _var_def in defined_variables.items():
value = resolve_variable(
variable_dict.get(var_name),
self.name
)
if value is not None:
self.resolved_variables[var_name] = value
| 171,523
|
Creates ECS clusters.
Expects a "clusters" argument, which should contain a list of cluster
names to create.
Args:
provider (:class:`stacker.providers.base.BaseProvider`): provider
instance
context (:class:`stacker.context.Context`): context instance
Returns: boolean for whether or not the hook succeeded.
|
def create_clusters(provider, context, **kwargs):
conn = get_session(provider.region).client('ecs')
try:
clusters = kwargs["clusters"]
except KeyError:
logger.error("setup_clusters hook missing \"clusters\" argument")
return False
if isinstance(clusters, basestring):
clusters = [clusters]
cluster_info = {}
for cluster in clusters:
logger.debug("Creating ECS cluster: %s", cluster)
r = conn.create_cluster(clusterName=cluster)
cluster_info[r["cluster"]["clusterName"]] = r
return {"clusters": cluster_info}
| 171,526
|
Fetch an output from the designated stack.
Args:
value (str): string with the following format:
<stack_name>::<output_name>, ie. some-stack::SomeOutput
context (:class:`stacker.context.Context`): stacker context
Returns:
str: output from the specified stack
|
def handle(cls, value, context=None, **kwargs):
if context is None:
raise ValueError('Context is required')
d = deconstruct(value)
stack = context.get_stack(d.stack_name)
return stack.outputs[d.output_name]
| 171,531
|
Builds a troposphere Parameter with the given properties.
Args:
name (string): The name of the parameter.
properties (dict): Contains the properties that will be applied to the
parameter. See:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html
Returns:
:class:`troposphere.Parameter`: The created parameter object.
|
def build_parameter(name, properties):
p = Parameter(name, Type=properties.get("type"))
for name, attr in PARAMETER_PROPERTIES.items():
if name in properties:
setattr(p, attr, properties[name])
return p
| 171,533
|
Support a variable defining which values it allows.
Args:
allowed_values (Optional[list]): A list of allowed values from the
variable definition
value (obj): The object representing the value provided for the
variable
Returns:
bool: Boolean for whether or not the value is valid.
|
def validate_allowed_values(allowed_values, value):
# ignore CFNParameter, troposphere handles these for us
if not allowed_values or isinstance(value, CFNParameter):
return True
return value in allowed_values
| 171,535
|
Wrapper around a value to indicate a CloudFormation Parameter.
Args:
name (str): the name of the CloudFormation Parameter
value (str, list, int or bool): the value we're going to submit as
a CloudFormation Parameter.
|
def __init__(self, name, value):
acceptable_types = [basestring, bool, list, int]
acceptable = False
for acceptable_type in acceptable_types:
if isinstance(value, acceptable_type):
acceptable = True
if acceptable_type == bool:
logger.debug("Converting parameter %s boolean '%s' "
"to string.", name, value)
value = str(value).lower()
break
if acceptable_type == int:
logger.debug("Converting parameter %s integer '%s' "
"to string.", name, value)
value = str(value)
break
if not acceptable:
raise ValueError(
"CFNParameter (%s) value must be one of %s got: %s" % (
name, "str, int, bool, or list", value))
self.name = name
self.value = value
| 171,538
|
Resolve the values of the blueprint variables.
This will resolve the values of the `VARIABLES` with values from the
env file, the config, and any lookups resolved.
Args:
provided_variables (list of :class:`stacker.variables.Variable`):
list of provided variables
|
def resolve_variables(self, provided_variables):
self.resolved_variables = {}
defined_variables = self.defined_variables()
variable_dict = dict((var.name, var) for var in provided_variables)
for var_name, var_def in defined_variables.items():
value = resolve_variable(
var_name,
var_def,
variable_dict.get(var_name),
self.name
)
self.resolved_variables[var_name] = value
| 171,545
|
Render the blueprint and return the template in json form.
Args:
variables (dict):
Optional dictionary providing/overriding variable values.
Returns:
str: the rendered CFN JSON template
|
def to_json(self, variables=None):
variables_to_resolve = []
if variables:
for key, value in variables.items():
variables_to_resolve.append(Variable(key, value))
for k in self.get_parameter_definitions():
if not variables or k not in variables:
# The provided value for a CFN parameter has no effect in this
# context (generating the CFN template), so any string can be
# provided for its value - just needs to be something
variables_to_resolve.append(Variable(k, 'unused_value'))
self.resolve_variables(variables_to_resolve)
return self.render_template()[1]
| 171,548
|
Reads and parses a user_data file.
Args:
user_data_path (str):
path to the userdata file
Returns:
str: the parsed user data file
|
def read_user_data(self, user_data_path):
raw_user_data = read_value_from_path(user_data_path)
variables = self.get_variables()
return parse_user_data(variables, raw_user_data, self.name)
| 171,549
|
Simple helper for adding outputs.
Args:
name (str): The name of the output to create.
value (str): The value to put in the output.
|
def add_output(self, name, value):
self.template.add_output(Output(name, Value=value))
| 171,550
|
Configure a proper logger based on verbosity and optional log formats.
Args:
verbosity (int): 0, 1, 2
formats (dict): Optional, looks for `info`, `color`, and `debug` keys
which may override the associated default log formats.
|
def setup_logging(verbosity, formats=None):
if formats is None:
formats = {}
log_level = logging.INFO
log_format = formats.get("info", INFO_FORMAT)
if sys.stdout.isatty():
log_format = formats.get("color", COLOR_FORMAT)
if verbosity > 0:
log_level = logging.DEBUG
log_format = formats.get("debug", DEBUG_FORMAT)
if verbosity < 2:
logging.getLogger("botocore").setLevel(logging.CRITICAL)
hdlr = logging.StreamHandler()
hdlr.setFormatter(ColorFormatter(log_format, ISO_8601))
logging.root.addHandler(hdlr)
logging.root.setLevel(log_level)
| 171,553
|
Resolve the Stack variables.
This resolves the Stack variables and then prepares the Blueprint for
rendering by passing the resolved variables to the Blueprint.
Args:
context (:class:`stacker.context.Context`): stacker context
provider (:class:`stacker.provider.base.BaseProvider`): subclass of
the base provider
|
def resolve(self, context, provider):
resolve_variables(self.variables, context, provider)
self.blueprint.resolve_variables(self.variables)
| 171,588
|
Parameterize a string, possibly encoding it as Base64 afterwards
Args:
raw (`str` | `bytes`): String to be processed. Byte strings will be
interpreted as UTF-8.
b64 (`bool`): Whether to wrap the output in a Base64 CloudFormation
call
Returns:
:class:`troposphere.AWSHelperFn`: output to be included in a
CloudFormation template.
|
def parameterized_codec(raw, b64):
if isinstance(raw, bytes):
raw = raw.decode('utf-8')
result = _parameterize_string(raw)
# Note, since we want a raw JSON object (not a string) output in the
# template, we wrap the result in GenericHelperFn (not needed if we're
# using Base64)
return Base64(result.data) if b64 else result
| 171,590
|
Returns a dict of key/values for the outputs for a given CF stack.
Args:
stack (dict): The stack object to get
outputs from.
Returns:
dict: A dictionary with key/values for each output on the stack.
|
def get_output_dict(stack):
outputs = {}
if 'Outputs' not in stack:
return outputs
for output in stack['Outputs']:
logger.debug(" %s %s: %s", stack['StackName'], output['OutputKey'],
output['OutputValue'])
outputs[output['OutputKey']] = output['OutputValue']
return outputs
| 171,597
|
Prompt the user for approval to execute a change set.
Args:
full_changeset (list, optional): A list of the full changeset that will
be output if the user specifies verbose.
params_diff (list, optional): A list of DictValue detailing the
differences between two parameters returned by
:func:`stacker.actions.diff.diff_dictionaries`
include_verbose (bool, optional): Boolean for whether or not to include
the verbose option
|
def ask_for_approval(full_changeset=None, params_diff=None,
include_verbose=False):
approval_options = ['y', 'n']
if include_verbose:
approval_options.append('v')
approve = ui.ask("Execute the above changes? [{}] ".format(
'/'.join(approval_options))).lower()
if include_verbose and approve == "v":
if params_diff:
logger.info(
"Full changeset:\n\n%s\n%s",
format_params_diff(params_diff),
yaml.safe_dump(full_changeset),
)
else:
logger.info(
"Full changeset:\n%s",
yaml.safe_dump(full_changeset),
)
return ask_for_approval()
elif approve != "y":
raise exceptions.CancelExecution
| 171,599
|
Converts a stack policy object into keyword args.
Args:
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
Returns:
dict: A dictionary of keyword arguments to be used elsewhere.
|
def generate_stack_policy_args(stack_policy=None):
args = {}
if stack_policy:
logger.debug("Stack has a stack policy")
if stack_policy.url:
# stacker currently does not support uploading stack policies to
# S3, so this will never get hit (unless your implementing S3
# uploads, and then you're probably reading this comment about why
# the exception below was raised :))
#
# args["StackPolicyURL"] = stack_policy.url
raise NotImplementedError
else:
args["StackPolicyBody"] = stack_policy.body
return args
| 171,606
|
Select the correct update method when updating a stack.
Args:
force_interactive (str): Whether or not to force interactive mode
no matter what mode the provider is in.
force_change_set (bool): Whether or not to force change set use.
Returns:
function: The correct object method to use when updating.
|
def select_update_method(self, force_interactive, force_change_set):
if self.interactive or force_interactive:
return self.interactive_update_stack
elif force_change_set:
return self.noninteractive_changeset_update
else:
return self.default_update_stack
| 171,617
|
Set a stack policy when using changesets.
ChangeSets don't allow you to set stack policies in the same call to
update them. This sets it before executing the changeset if the
stack policy is passed in.
Args:
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
|
def deal_with_changeset_stack_policy(self, fqn, stack_policy):
if stack_policy:
kwargs = generate_stack_policy_args(stack_policy)
kwargs["StackName"] = fqn
logger.debug("Setting stack policy on %s.", fqn)
self.cloudformation.set_stack_policy(**kwargs)
| 171,620
|
Creates a boto3 session with a cache
Args:
region (str): The region for the session
profile (str): The profile for the session
Returns:
:class:`boto3.session.Session`: A boto3 session with
credential caching
|
def get_session(region, profile=None):
if profile is None:
logger.debug("No AWS profile explicitly provided. "
"Falling back to default.")
profile = default_profile
logger.debug("Building session using profile \"%s\" in region \"%s\""
% (profile, region))
session = boto3.Session(region_name=region, profile_name=profile)
c = session._session.get_component('credential_provider')
provider = c.get_provider('assume-role')
provider.cache = credential_cache
provider._prompter = ui.getpass
return session
| 171,627
|
Register a lookup handler.
Args:
lookup_type (str): Name to register the handler under
handler_or_path (OneOf[func, str]): a function or a path to a handler
|
def register_lookup_handler(lookup_type, handler_or_path):
handler = handler_or_path
if isinstance(handler_or_path, basestring):
handler = load_object_from_string(handler_or_path)
LOOKUP_HANDLERS[lookup_type] = handler
if type(handler) != type:
# Hander is a not a new-style handler
logger = logging.getLogger(__name__)
logger.warning("Registering lookup `%s`: Please upgrade to use the "
"new style of Lookups." % lookup_type)
warnings.warn(
# For some reason, this does not show up...
# Leaving it in anyway
"Lookup `%s`: Please upgrade to use the new style of Lookups"
"." % lookup_type,
DeprecationWarning,
stacklevel=2,
)
| 171,637
|
Resolve a set of lookups.
Args:
variable (:class:`stacker.variables.Variable`): The variable resolving
it's lookups.
context (:class:`stacker.context.Context`): stacker context
provider (:class:`stacker.provider.base.BaseProvider`): subclass of the
base provider
Returns:
dict: dict of Lookup -> resolved value
|
def resolve_lookups(variable, context, provider):
resolved_lookups = {}
for lookup in variable.lookups:
try:
handler = LOOKUP_HANDLERS[lookup.type]
except KeyError:
raise UnknownLookupType(lookup)
try:
resolved_lookups[lookup] = handler(
value=lookup.input,
context=context,
provider=provider,
)
except Exception as e:
raise FailedVariableLookup(variable.name, lookup, e)
return resolved_lookups
| 171,638
|
Extract any lookups within a string.
Args:
value (str): string value we're extracting lookups from
Returns:
list: list of :class:`stacker.lookups.Lookup` if any
|
def extract_lookups_from_string(value):
lookups = set()
for match in LOOKUP_REGEX.finditer(value):
groupdict = match.groupdict()
raw = match.groups()[0]
lookup_type = groupdict["type"]
lookup_input = groupdict["input"]
lookups.add(Lookup(lookup_type, lookup_input, raw))
return lookups
| 171,644
|
Recursively extracts any stack lookups within the data structure.
Args:
value (one of str, list, dict): a structure that contains lookups to
output values
Returns:
list: list of lookups if any
|
def extract_lookups(value):
lookups = set()
if isinstance(value, basestring):
lookups = lookups.union(extract_lookups_from_string(value))
elif isinstance(value, list):
for v in value:
lookups = lookups.union(extract_lookups(v))
elif isinstance(value, dict):
for v in value.values():
lookups = lookups.union(extract_lookups(v))
return lookups
| 171,645
|
Add a node if it does not exist yet, or error out.
Args:
node_name (str): The unique name of the node to add.
Raises:
KeyError: Raised if a node with the same name already exist in the
graph
|
def add_node(self, node_name):
graph = self.graph
if node_name in graph:
raise KeyError('node %s already exists' % node_name)
graph[node_name] = set()
| 171,646
|
Deletes this node and all edges referencing it.
Args:
node_name (str): The name of the node to delete.
Raises:
KeyError: Raised if the node does not exist in the graph.
|
def delete_node(self, node_name):
graph = self.graph
if node_name not in graph:
raise KeyError('node %s does not exist' % node_name)
graph.pop(node_name)
for node, edges in graph.items():
if node_name in edges:
edges.remove(node_name)
| 171,647
|
Add an edge (dependency) between the specified nodes.
Args:
ind_node (str): The independent node to add an edge to.
dep_node (str): The dependent node that has a dependency on the
ind_node.
Raises:
KeyError: Either the ind_node, or dep_node do not exist.
DAGValidationError: Raised if the resulting graph is invalid.
|
def add_edge(self, ind_node, dep_node):
graph = self.graph
if ind_node not in graph:
raise KeyError('independent node %s does not exist' % ind_node)
if dep_node not in graph:
raise KeyError('dependent node %s does not exist' % dep_node)
test_graph = deepcopy(graph)
test_graph[ind_node].add(dep_node)
test_dag = DAG()
test_dag.graph = test_graph
is_valid, message = test_dag.validate()
if is_valid:
graph[ind_node].add(dep_node)
else:
raise DAGValidationError(message)
| 171,648
|
Delete an edge from the graph.
Args:
ind_node (str): The independent node to delete an edge from.
dep_node (str): The dependent node that has a dependency on the
ind_node.
Raises:
KeyError: Raised when the edge doesn't already exist.
|
def delete_edge(self, ind_node, dep_node):
graph = self.graph
if dep_node not in graph.get(ind_node, []):
raise KeyError(
"No edge exists between %s and %s." % (ind_node, dep_node)
)
graph[ind_node].remove(dep_node)
| 171,649
|
Walks each node of the graph in reverse topological order.
This can be used to perform a set of operations, where the next
operation depends on the previous operation. It's important to note
that walking happens serially, and is not paralellized.
Args:
walk_func (:class:`types.FunctionType`): The function to be called
on each node of the graph.
|
def walk(self, walk_func):
nodes = self.topological_sort()
# Reverse so we start with nodes that have no dependencies.
nodes.reverse()
for n in nodes:
walk_func(n)
| 171,651
|
Change references to a node in existing edges.
Args:
old_node_name (str): The old name for the node.
new_node_name (str): The new name for the node.
|
def rename_edges(self, old_node_name, new_node_name):
graph = self.graph
for node, edges in graph.items():
if node == old_node_name:
graph[new_node_name] = copy(edges)
del graph[old_node_name]
else:
if old_node_name in edges:
edges.remove(old_node_name)
edges.add(new_node_name)
| 171,653
|
Returns a list of all nodes this node has edges towards.
Args:
node (str): The node whose downstream nodes you want to find.
Returns:
list: A list of nodes that are immediately downstream from the
node.
|
def downstream(self, node):
graph = self.graph
if node not in graph:
raise KeyError('node %s is not in graph' % node)
return list(graph[node])
| 171,654
|
Returns a list of all nodes ultimately downstream
of the given node in the dependency graph, in
topological order.
Args:
node (str): The node whose downstream nodes you want to find.
Returns:
list: A list of nodes that are downstream from the node.
|
def all_downstreams(self, node):
nodes = [node]
nodes_seen = set()
i = 0
while i < len(nodes):
downstreams = self.downstream(nodes[i])
for downstream_node in downstreams:
if downstream_node not in nodes_seen:
nodes_seen.add(downstream_node)
nodes.append(downstream_node)
i += 1
return [
node_ for node_ in self.topological_sort() if node_ in nodes_seen
]
| 171,655
|
Returns a new DAG with only the given nodes and their
dependencies.
Args:
nodes (list): The nodes you are interested in.
Returns:
:class:`stacker.dag.DAG`: The filtered graph.
|
def filter(self, nodes):
filtered_dag = DAG()
# Add only the nodes we need.
for node in nodes:
filtered_dag.add_node_if_not_exists(node)
for edge in self.all_downstreams(node):
filtered_dag.add_node_if_not_exists(edge)
# Now, rebuild the graph for each node that's present.
for node, edges in self.graph.items():
if node in filtered_dag.graph:
filtered_dag.graph[node] = edges
return filtered_dag
| 171,656
|
Reset the graph and build it from the passed dictionary.
The dictionary takes the form of {node_name: [directed edges]}
Args:
graph_dict (dict): The dictionary used to create the graph.
Raises:
TypeError: Raised if the value of items in the dict are not lists.
|
def from_dict(self, graph_dict):
self.reset_graph()
for new_node in graph_dict:
self.add_node(new_node)
for ind_node, dep_nodes in graph_dict.items():
if not isinstance(dep_nodes, collections.Iterable):
raise TypeError('%s: dict values must be lists' % ind_node)
for dep_node in dep_nodes:
self.add_edge(ind_node, dep_node)
| 171,657
|
Fetch an output from the designated stack.
Args:
value (str): string with the following format:
<stack_name>::<output_name>, ie. some-stack::SomeOutput
provider (:class:`stacker.provider.base.BaseProvider`): subclass of
the base provider
Returns:
str: output from the specified stack
|
def handle(cls, value, provider=None, **kwargs):
if provider is None:
raise ValueError('Provider is required')
d = deconstruct(value)
stack_fqn = d.stack_name
output = provider.get_output(stack_fqn, d.output_name)
return output
| 171,662
|
Tests whether a stack should be submitted for updates to CF.
Args:
stack (:class:`stacker.stack.Stack`): The stack object to check.
Returns:
bool: If the stack should be updated, return True.
|
def should_update(stack):
if stack.locked:
if not stack.force:
logger.debug("Stack %s locked and not in --force list. "
"Refusing to update.", stack.name)
return False
else:
logger.debug("Stack %s locked, but is in --force "
"list.", stack.name)
return True
| 171,668
|
Tests whether a stack should be submitted to CF for update/create
Args:
stack (:class:`stacker.stack.Stack`): The stack object to check.
Returns:
bool: If the stack should be submitted, return True.
|
def should_submit(stack):
if stack.enabled:
return True
logger.debug("Stack %s is not enabled. Skipping.", stack.name)
return False
| 171,669
|
Builds the CloudFormation Parameters for our stack.
Args:
stack (:class:`stacker.stack.Stack`): A stacker stack
provider_stack (dict): An optional Stacker provider object
Returns:
dict: The parameters for the given stack
|
def build_parameters(self, stack, provider_stack=None):
resolved = _resolve_parameters(stack.parameter_values, stack.blueprint)
required_parameters = list(stack.required_parameter_definitions)
all_parameters = list(stack.all_parameter_definitions)
parameters = _handle_missing_parameters(resolved, all_parameters,
required_parameters,
provider_stack)
param_list = []
for key, value in parameters:
param_dict = {"ParameterKey": key}
if value is UsePreviousParameterValue:
param_dict["UsePreviousValue"] = True
else:
param_dict["ParameterValue"] = str(value)
param_list.append(param_dict)
return param_list
| 171,673
|
Create a domain within route53.
Args:
provider (:class:`stacker.providers.base.BaseProvider`): provider
instance
context (:class:`stacker.context.Context`): context instance
Returns: boolean for whether or not the hook succeeded.
|
def create_domain(provider, context, **kwargs):
session = get_session(provider.region)
client = session.client("route53")
domain = kwargs.get("domain")
if not domain:
logger.error("domain argument or BaseDomain variable not provided.")
return False
zone_id = create_route53_zone(client, domain)
return {"domain": domain, "zone_id": zone_id}
| 171,680
|
Converts CamelCase to snake_case.
Args:
name (string): The name to convert from CamelCase to snake_case.
Returns:
string: Converted string.
|
def camel_to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
| 171,682
|
Get the zone id of an existing zone by name.
Args:
client (:class:`botocore.client.Route53`): The connection used to
interact with Route53's API.
zone_name (string): The name of the DNS hosted zone to create.
Returns:
string: The Id of the Hosted Zone.
|
def get_hosted_zone_by_name(client, zone_name):
p = client.get_paginator("list_hosted_zones")
for i in p.paginate():
for zone in i["HostedZones"]:
if zone["Name"] == zone_name:
return parse_zone_id(zone["Id"])
return None
| 171,683
|
Get the Id of an existing zone, or create it.
Args:
client (:class:`botocore.client.Route53`): The connection used to
interact with Route53's API.
zone_name (string): The name of the DNS hosted zone to create.
Returns:
string: The Id of the Hosted Zone.
|
def get_or_create_hosted_zone(client, zone_name):
zone_id = get_hosted_zone_by_name(client, zone_name)
if zone_id:
return zone_id
logger.debug("Zone %s does not exist, creating.", zone_name)
reference = uuid.uuid4().hex
response = client.create_hosted_zone(Name=zone_name,
CallerReference=reference)
return parse_zone_id(response["HostedZone"]["Id"])
| 171,684
|
Gets the SOA record for zone_name from zone_id.
Args:
client (:class:`botocore.client.Route53`): The connection used to
interact with Route53's API.
zone_id (string): The AWS Route53 zone id of the hosted zone to query.
zone_name (string): The name of the DNS hosted zone to create.
Returns:
:class:`stacker.util.SOARecord`: An object representing the parsed SOA
record returned from AWS Route53.
|
def get_soa_record(client, zone_id, zone_name):
response = client.list_resource_record_sets(HostedZoneId=zone_id,
StartRecordName=zone_name,
StartRecordType="SOA",
MaxItems="1")
return SOARecord(response["ResourceRecordSets"][0])
| 171,685
|
Creates the given zone_name if it doesn't already exists.
Also sets the SOA negative caching TTL to something short (300 seconds).
Args:
client (:class:`botocore.client.Route53`): The connection used to
interact with Route53's API.
zone_name (string): The name of the DNS hosted zone to create.
Returns:
string: The zone id returned from AWS for the existing, or newly
created zone.
|
def create_route53_zone(client, zone_name):
if not zone_name.endswith("."):
zone_name += "."
zone_id = get_or_create_hosted_zone(client, zone_name)
old_soa = get_soa_record(client, zone_id, zone_name)
# If the negative cache value is already 300, don't update it.
if old_soa.text.min_ttl == "300":
return zone_id
new_soa = copy.deepcopy(old_soa)
logger.debug("Updating negative caching value on zone %s to 300.",
zone_name)
new_soa.text.min_ttl = "300"
client.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Comment": "Update SOA min_ttl to 300.",
"Changes": [
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": zone_name,
"Type": "SOA",
"TTL": old_soa.ttl,
"ResourceRecords": [
{
"Value": str(new_soa.text)
}
]
}
},
]
}
)
return zone_id
| 171,686
|
Provides yaml.load alternative with preserved dictionary order.
Args:
stream (string): YAML string to load.
loader (:class:`yaml.loader`): PyYAML loader class. Defaults to safe
load.
Returns:
OrderedDict: Parsed YAML.
|
def yaml_to_ordered_dict(stream, loader=yaml.SafeLoader):
class OrderedUniqueLoader(loader):
# keys which require no duplicate siblings.
NO_DUPE_SIBLINGS = ["stacks", "class_path"]
# keys which require no duplicate children keys.
NO_DUPE_CHILDREN = ["stacks"]
def _error_mapping_on_dupe(self, node, node_name):
if isinstance(node, MappingNode):
mapping = {}
for n in node.value:
a = n[0]
b = mapping.get(a.value, None)
if b:
msg = "{} mapping cannot have duplicate keys {} {}"
raise ConstructorError(
msg.format(node_name, b.start_mark, a.start_mark)
)
mapping[a.value] = a
def _validate_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(
None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise ConstructorError(
"while constructing a mapping", node.start_mark,
"found unhashable key (%s)" % exc, key_node.start_mark
)
# prevent duplicate sibling keys for certain "keywords".
if key in mapping and key in self.NO_DUPE_SIBLINGS:
msg = "{} key cannot have duplicate siblings {} {}"
raise ConstructorError(
msg.format(key, node.start_mark, key_node.start_mark)
)
if key in self.NO_DUPE_CHILDREN:
# prevent duplicate children keys for this mapping.
self._error_mapping_on_dupe(value_node, key_node.value)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_mapping(self, node, deep=False):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return self._validate_mapping(node, deep=deep)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
OrderedUniqueLoader.add_constructor(
u'tag:yaml.org,2002:map', OrderedUniqueLoader.construct_yaml_map,
)
return yaml.load(stream, OrderedUniqueLoader)
| 171,689
|
Ensure an s3 bucket exists, if it does not then create it.
Args:
s3_client (:class:`botocore.client.Client`): An s3 client used to
verify and create the bucket.
bucket_name (str): The bucket being checked/created.
bucket_region (str, optional): The region to create the bucket in. If
not provided, will be determined by s3_client's region.
|
def ensure_s3_bucket(s3_client, bucket_name, bucket_region):
try:
s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Message'] == "Not Found":
logger.debug("Creating bucket %s.", bucket_name)
create_args = {"Bucket": bucket_name}
location_constraint = s3_bucket_location_constraint(
bucket_region
)
if location_constraint:
create_args["CreateBucketConfiguration"] = {
"LocationConstraint": location_constraint
}
s3_client.create_bucket(**create_args)
elif e.response['Error']['Message'] == "Forbidden":
logger.exception("Access denied for bucket %s. Did " +
"you remember to use a globally unique name?",
bucket_name)
raise
else:
logger.exception("Error creating bucket %s. Error %s",
bucket_name, e.response)
raise
| 171,693
|
Process a config's defined package sources.
Args:
sources (dict): Package sources from Stacker config dictionary
stacker_cache_dir (string): Path where remote sources will be
cached.
|
def __init__(self, sources, stacker_cache_dir=None):
if not stacker_cache_dir:
stacker_cache_dir = os.path.expanduser("~/.stacker")
package_cache_dir = os.path.join(stacker_cache_dir, 'packages')
self.stacker_cache_dir = stacker_cache_dir
self.package_cache_dir = package_cache_dir
self.sources = sources
self.configs_to_merge = []
self.create_cache_directories()
| 171,699
|
Make a local path available to current stacker config.
Args:
config (dict): 'local' path config dictionary
|
def fetch_local_package(self, config):
# Update sys.path & merge in remote configs (if necessary)
self.update_paths_and_config(config=config,
pkg_dir_name=config['source'],
pkg_cache_dir=os.getcwd())
| 171,702
|
Make a remote S3 archive available for local use.
Args:
config (dict): git config dictionary
|
def fetch_s3_package(self, config):
extractor_map = {'.tar.gz': TarGzipExtractor,
'.tar': TarExtractor,
'.zip': ZipExtractor}
extractor = None
for suffix, klass in extractor_map.items():
if config['key'].endswith(suffix):
extractor = klass()
logger.debug("Using extractor %s for S3 object \"%s\" in "
"bucket %s.",
klass.__name__,
config['key'],
config['bucket'])
dir_name = self.sanitize_uri_path(
"s3-%s-%s" % (config['bucket'],
config['key'][:-len(suffix)])
)
break
if extractor is None:
raise ValueError(
"Archive type could not be determined for S3 object \"%s\" "
"in bucket %s." % (config['key'], config['bucket'])
)
session = get_session(region=None)
extra_s3_args = {}
if config.get('requester_pays', False):
extra_s3_args['RequestPayer'] = 'requester'
# We can skip downloading the archive if it's already been cached
if config.get('use_latest', True):
try:
# LastModified should always be returned in UTC, but it doesn't
# hurt to explicitly convert it to UTC again just in case
modified_date = session.client('s3').head_object(
Bucket=config['bucket'],
Key=config['key'],
**extra_s3_args
)['LastModified'].astimezone(dateutil.tz.tzutc())
except botocore.exceptions.ClientError as client_error:
logger.error("Error checking modified date of "
"s3://%s/%s : %s",
config['bucket'],
config['key'],
client_error)
sys.exit(1)
dir_name += "-%s" % modified_date.strftime(self.ISO8601_FORMAT)
cached_dir_path = os.path.join(self.package_cache_dir, dir_name)
if not os.path.isdir(cached_dir_path):
logger.debug("Remote package s3://%s/%s does not appear to have "
"been previously downloaded - starting download and "
"extraction to %s",
config['bucket'],
config['key'],
cached_dir_path)
tmp_dir = tempfile.mkdtemp(prefix='stacker')
tmp_package_path = os.path.join(tmp_dir, dir_name)
try:
extractor.set_archive(os.path.join(tmp_dir, dir_name))
logger.debug("Starting remote package download from S3 to %s "
"with extra S3 options \"%s\"",
extractor.archive,
str(extra_s3_args))
session.resource('s3').Bucket(config['bucket']).download_file(
config['key'],
extractor.archive,
ExtraArgs=extra_s3_args
)
logger.debug("Download complete; extracting downloaded "
"package to %s",
tmp_package_path)
extractor.extract(tmp_package_path)
logger.debug("Moving extracted package directory %s to the "
"Stacker cache at %s",
dir_name,
self.package_cache_dir)
shutil.move(tmp_package_path, self.package_cache_dir)
finally:
shutil.rmtree(tmp_dir)
else:
logger.debug("Remote package s3://%s/%s appears to have "
"been previously downloaded to %s -- bypassing "
"download",
config['bucket'],
config['key'],
cached_dir_path)
# Update sys.path & merge in remote configs (if necessary)
self.update_paths_and_config(config=config,
pkg_dir_name=dir_name)
| 171,703
|
Make a remote git repository available for local use.
Args:
config (dict): git config dictionary
|
def fetch_git_package(self, config):
# only loading git here when needed to avoid load errors on systems
# without git installed
from git import Repo
ref = self.determine_git_ref(config)
dir_name = self.sanitize_git_path(uri=config['uri'], ref=ref)
cached_dir_path = os.path.join(self.package_cache_dir, dir_name)
# We can skip cloning the repo if it's already been cached
if not os.path.isdir(cached_dir_path):
logger.debug("Remote repo %s does not appear to have been "
"previously downloaded - starting clone to %s",
config['uri'],
cached_dir_path)
tmp_dir = tempfile.mkdtemp(prefix='stacker')
try:
tmp_repo_path = os.path.join(tmp_dir, dir_name)
with Repo.clone_from(config['uri'], tmp_repo_path) as repo:
repo.head.reference = ref
repo.head.reset(index=True, working_tree=True)
shutil.move(tmp_repo_path, self.package_cache_dir)
finally:
shutil.rmtree(tmp_dir)
else:
logger.debug("Remote repo %s appears to have been previously "
"cloned to %s -- bypassing download",
config['uri'],
cached_dir_path)
# Update sys.path & merge in remote configs (if necessary)
self.update_paths_and_config(config=config,
pkg_dir_name=dir_name)
| 171,704
|
Handle remote source defined sys.paths & configs.
Args:
config (dict): git config dictionary
pkg_dir_name (string): directory name of the stacker archive
pkg_cache_dir (string): fully qualified path to stacker cache
cache directory
|
def update_paths_and_config(self, config, pkg_dir_name,
pkg_cache_dir=None):
if pkg_cache_dir is None:
pkg_cache_dir = self.package_cache_dir
cached_dir_path = os.path.join(pkg_cache_dir, pkg_dir_name)
# Add the appropriate directory (or directories) to sys.path
if config.get('paths'):
for path in config['paths']:
path_to_append = os.path.join(cached_dir_path,
path)
logger.debug("Appending \"%s\" to python sys.path",
path_to_append)
sys.path.append(path_to_append)
else:
sys.path.append(cached_dir_path)
# If the configuration defines a set of remote config yamls to
# include, add them to the list for merging
if config.get('configs'):
for config_filename in config['configs']:
self.configs_to_merge.append(os.path.join(cached_dir_path,
config_filename))
| 171,705
|
Determine the latest commit id for a given ref.
Args:
uri (string): git URI
ref (string): git ref
Returns:
str: A commit id
|
def git_ls_remote(self, uri, ref):
logger.debug("Invoking git to retrieve commit id for repo %s...", uri)
lsremote_output = subprocess.check_output(['git',
'ls-remote',
uri,
ref])
if b"\t" in lsremote_output:
commit_id = lsremote_output.split(b"\t")[0]
logger.debug("Matching commit id found: %s", commit_id)
return commit_id
else:
raise ValueError("Ref \"%s\" not found for repo %s." % (ref, uri))
| 171,706
|
Determine the ref to be used for 'git checkout'.
Args:
config (dict): git config dictionary
Returns:
str: A commit id or tag name
|
def determine_git_ref(self, config):
# First ensure redundant config keys aren't specified (which could
# cause confusion as to which take precedence)
ref_config_keys = 0
for i in ['commit', 'tag', 'branch']:
if config.get(i):
ref_config_keys += 1
if ref_config_keys > 1:
raise ImportError("Fetching remote git sources failed: "
"conflicting revisions (e.g. 'commit', 'tag', "
"'branch') specified for a package source")
# Now check for a specific point in time referenced and return it if
# present
if config.get('commit'):
ref = config['commit']
elif config.get('tag'):
ref = config['tag']
else:
# Since a specific commit/tag point in time has not been specified,
# check the remote repo for the commit id to use
ref = self.git_ls_remote(
config['uri'],
self.determine_git_ls_remote_ref(config)
)
if sys.version_info[0] > 2 and isinstance(ref, bytes):
return ref.decode()
return ref
| 171,707
|
Take a git URI and ref and converts it to a directory safe path.
Args:
uri (string): git URI
(e.g. git@github.com:foo/bar.git)
ref (string): optional git ref to be appended to the path
Returns:
str: Directory name for the supplied uri
|
def sanitize_git_path(self, uri, ref=None):
if uri.endswith('.git'):
dir_name = uri[:-4] # drop .git
else:
dir_name = uri
dir_name = self.sanitize_uri_path(dir_name)
if ref is not None:
dir_name += "-%s" % ref
return dir_name
| 171,708
|
Set hook data for the given key.
Args:
key(str): The key to store the hook data in.
data(:class:`collections.Mapping`): A dictionary of data to store,
as returned from a hook.
|
def set_hook_data(self, key, data):
if not isinstance(data, collections.Mapping):
raise ValueError("Hook (key: %s) data must be an instance of "
"collections.Mapping (a dictionary for "
"example)." % key)
if key in self.hook_data:
raise KeyError("Hook data for key %s already exists, each hook "
"must have a unique data_key.", key)
self.hook_data[key] = data
| 171,728
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.