docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
remove cluster and data stuff
Args:
cluster_id - cluster identity
|
def remove(self, cluster_id):
cluster = self._storage.pop(cluster_id)
cluster.cleanup()
| 661,759
|
Checks if the secret string used in the authentication attempt
matches the "known" secret string. Some mechanisms will override this
method to control how this comparison is made.
Args:
secret: The secret string to compare against what was used in the
authentication attempt.
Returns:
True if the given secret matches the authentication attempt.
|
def check_secret(self, secret):
try:
return hmac.compare_digest(secret, self.secret)
except AttributeError: # pragma: no cover
return secret == self.secret
| 662,113
|
Like :meth:`.get`, but only mechanisms inheriting
:class:`ServerMechanism` will be returned.
Args:
name: The SASL mechanism name.
Returns:
The mechanism object or ``None``
|
def get_server(self, name):
mech = self.get(name)
return mech if isinstance(mech, ServerMechanism) else None
| 662,122
|
Like :meth:`.get`, but only mechanisms inheriting
:class:`ClientMechanism` will be returned.
Args:
name: The SASL mechanism name.
Returns:
The mechanism object or ``None``
|
def get_client(self, name):
mech = self.get(name)
return mech if isinstance(mech, ClientMechanism) else None
| 662,123
|
Do a shallow initialization of an object
Arguments:
- row<dict>: dict of data like depth=1, i.e. many_refs are only ids
|
def initialize_object(B, res, row):
B = get_backend()
field_groups = FieldGroups(B.get_concrete(res))
try:
obj = B.get_object(B.get_concrete(res), row['id'])
except B.object_missing_error(B.get_concrete(res)):
tbl = B.get_concrete(res)
obj = tbl()
# Set attributes, refs
for fname, field in field_groups['scalars'].items():
value = row.get(fname, getattr(obj, fname, None))
value = B.convert_field(obj.__class__, fname, value)
setattr(obj, fname, value)
# _debug('res, row: %s, %s', res, row)
# Already-fetched, and id-only refs
fetched, dangling = defaultdict(dict), defaultdict(set)
# To handle subrows that might be shallow (id) or deep (dict)
def _handle_subrow(R, subrow):
if isinstance(subrow, dict):
pk = subrow['id']
fetched[R][pk] = subrow
else:
pk = subrow
dangling[R].add(pk)
return pk
for fname, field in field_groups['one_refs'].items():
fieldres = _field_resource(B, B.get_concrete(res), fname)
key = field.column
subrow = row.get(key)
if subrow is None: # e.g. use "org" if "org_id" is missing
key = fname
subrow = row[key]
pk = _handle_subrow(fieldres, subrow)
setattr(obj, key, pk)
for fname, field in field_groups['many_refs'].items():
fieldres = _field_resource(B, B.get_concrete(res), fname)
pks = [
_handle_subrow(fieldres, subrow) for subrow in row.get(fname, [])
]
return obj, fetched, dangling
| 662,314
|
Write config values to a file.
Arguments:
- conf_dir<str>: path to output directory
- codec<str>: output field format
- backup_existing<bool>: if a config file exists,
make a copy before overwriting
|
def write_config(data, conf_dir=DEFAULT_CONFIG_DIR, codec="yaml",
backup_existing=False):
if not codec:
codec = 'yaml'
codec = munge.get_codec(codec)()
conf_dir = os.path.expanduser(conf_dir)
if not os.path.exists(conf_dir):
os.mkdir(conf_dir)
# Check for existing file, back up if necessary
outpath = os.path.join(conf_dir, 'config.' + codec.extensions[0])
if backup_existing and os.path.exists(outpath):
os.rename(outpath, outpath + '.bak')
codec.dump(data, open(outpath, 'w'))
| 662,321
|
Utility function to recursively prompt for config values
Arguments:
- defaults<dict>: default values used for empty inputs
- path<str>: path to prepend to config keys (eg. "path.keyname")
|
def prompt_config(sch, defaults=None, path=None):
out = {}
for name, attr in sch.attributes():
fullpath = name
if path:
fullpath = '{}.{}'.format(path, name)
if defaults is None:
defaults = {}
default = defaults.get(name)
if isinstance(attr, _schema.Schema):
# recurse on sub-schema
value = prompt_config(attr, defaults=default, path=fullpath)
else:
if default is None:
default = attr.default
if default is None:
default = ''
value = prompt(fullpath, default)
out[name] = value
return sch.validate(out)
| 662,322
|
Returns wlecome message of FTP server.
Parameters:
- connId(optional) - connection identifier. By default equals 'default'
|
def get_welcome(self, connId='default'):
thisConn = self.__getConnection(connId)
outputMsg = ""
try:
outputMsg += thisConn.getwelcome()
except ftplib.all_errors as e:
raise FtpLibraryError(str(e))
if self.printOutput:
logger.info(outputMsg)
return outputMsg
| 662,509
|
Returns list of raw lines returned as contens of current directory.
Parameters:
- connId(optional) - connection identifier. By default equals 'default'
|
def dir(self, connId='default'):
dirList = []
thisConn = self.__getConnection(connId)
outputMsg = ""
try:
thisConn.dir(dirList.append)
for d in dirList:
outputMsg += str(d) + "\n"
except ftplib.all_errors as e:
raise FtpLibraryError(str(e))
if self.printOutput:
logger.info(outputMsg)
return dirList
| 662,510
|
Returns list of files (and/or directories) of current directory.
Parameters:
- connId(optional) - connection identifier. By default equals 'default'
|
def dir_names(self, connId='default'):
files_list = []
thisConn = self.__getConnection(connId)
try:
files_list = thisConn.nlst()
except:
files_list = []
return files_list
| 662,511
|
Sends any command to FTP server. Returns server output.
Parameters:
- command - any valid command to be sent (invalid will result in exception).
- connId(optional) - connection identifier. By default equals 'default'
Example:
| send cmd | HELP |
|
def send_cmd(self, command, connId='default'):
thisConn = self.__getConnection(connId)
outputMsg = ""
try:
outputMsg += str(thisConn.sendcmd(command))
except ftplib.all_errors as e:
raise FtpLibraryError(str(e))
if self.printOutput:
logger.info(outputMsg)
return outputMsg
| 662,515
|
Closes FTP connection. Returns None.
Parameters:
- connId(optional) - connection identifier. By default equals 'default'
|
def ftp_close(self, connId='default'):
thisConn = self.__getConnection(connId)
try:
thisConn.quit()
self.__removeConnection(connId)
except Exception as e:
try:
thisConn.close()
self.__removeConnection(connId)
except ftplib.all_errors as x:
raise FtpLibraryError(str(x))
| 662,516
|
Gets the thread.local data (dict) for a given namespace.
Args:
namespace (string): The namespace, or key, of the data dict.
Returns:
(dict)
|
def data(self, namespace):
assert namespace
if namespace in self._data:
return self._data[namespace]
new_data = {}
self._data[namespace] = new_data
return new_data
| 664,488
|
Creates a request cache with the provided namespace.
Args:
namespace (string): (optional) uses 'default' if not provided.
|
def __init__(self, namespace=None):
assert namespace != DEFAULT_REQUEST_CACHE_NAMESPACE,\
'Optional namespace can not be {}.'.format(DEFAULT_REQUEST_CACHE_NAMESPACE)
self.namespace = namespace or DEFAULT_REQUEST_CACHE_NAMESPACE
| 664,489
|
Retrieves a CachedResponse for the provided key.
Args:
key (string)
Returns:
A CachedResponse with is_found status and value.
|
def get_cached_response(self, key):
cached_value = self.data.get(key, _CACHE_MISS)
is_found = cached_value is not _CACHE_MISS
return CachedResponse(is_found, key, cached_value)
| 664,490
|
Retrieves a CachedResponse for the provided key.
Args:
key (string)
Returns:
A CachedResponse with is_found status and value.
|
def get_cached_response(cls, key):
request_cached_response = DEFAULT_REQUEST_CACHE.get_cached_response(key)
if not request_cached_response.is_found:
django_cached_response = cls._get_cached_response_from_django_cache(key)
cls._set_request_cache_if_django_cache_hit(key, django_cached_response)
return django_cached_response
return request_cached_response
| 664,491
|
Caches the value for the provided key in both the request cache and the
django cache.
Args:
key (string)
value (object)
django_cache_timeout (int): (Optional) Timeout used to determine
if and for how long to cache in the django cache. A timeout of
0 will skip the django cache. If timeout is provided, use that
timeout for the key; otherwise use the default cache timeout.
|
def set_all_tiers(key, value, django_cache_timeout=DEFAULT_TIMEOUT):
DEFAULT_REQUEST_CACHE.set(key, value)
django_cache.set(key, value, django_cache_timeout)
| 664,492
|
Retrieves a CachedResponse for the given key from the django cache.
If the request was set to force cache misses, then this will always
return a cache miss response.
Args:
key (string)
Returns:
A CachedResponse with is_found status and value.
|
def _get_cached_response_from_django_cache(key):
if TieredCache._should_force_django_cache_miss():
return CachedResponse(is_found=False, key=key, value=None)
cached_value = django_cache.get(key, _CACHE_MISS)
is_found = cached_value is not _CACHE_MISS
return CachedResponse(is_found, key, cached_value)
| 664,493
|
Sets the value in the request cache if the django cached response was a hit.
Args:
key (string)
django_cached_response (CachedResponse)
|
def _set_request_cache_if_django_cache_hit(key, django_cached_response):
if django_cached_response.is_found:
DEFAULT_REQUEST_CACHE.set(key, django_cached_response.value)
| 664,494
|
Creates a cached response object.
Args:
is_found (bool): True if the key was found in the cache, False
otherwise.
key (string): The key originally used to retrieve the value.
value (object)
|
def __init__(self, is_found, key, value):
self.key = key
self.is_found = is_found
if self.is_found:
self.value = value
| 664,497
|
A Sniffer that merges packets into a stream
Params:
``iface`` The interface in which to listen
``port`` The TCP port that we care about
``stream_handler`` The callback for each stream
``offline`` Path to a pcap file
``ip`` A list of IPs that we care about
|
def __init__(self, iface, port, stream_handler=None, offline=None, ip=None):
super(Sniffer, self).__init__()
self.setDaemon(True)
self._iface = iface
self._port = port
self._offline = offline
self._ip = ip if ip else []
self._queue = deque() # TODO: maxlen?
self._dispatcher = Dispatcher(self._queue)
self._dispatcher.add_handler(stream_handler)
self._wants_stop = False
self.start()
| 665,391
|
Sample from the prior distribution over datasets
Args:
size (Optional[int]): The number of samples to draw.
Returns:
array[n] or array[size, n]: The samples from the prior
distribution over datasets.
|
def sample(self, size=None):
self._recompute()
if size is None:
n = np.random.randn(len(self._t))
else:
n = np.random.randn(len(self._t), size)
n = self.solver.dot_L(n)
if size is None:
return self.mean.get_value(self._t) + n[:, 0]
return self.mean.get_value(self._t)[None, :] + n.T
| 665,606
|
Compute the value of the term for an array of lags
Args:
tau (array[...]): An array of lags where the term should be
evaluated.
Returns:
The value of the term for each ``tau``. This will have the same
shape as ``tau``.
|
def get_value(self, tau):
tau = np.asarray(tau)
(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag) = self.coefficients
k = get_kernel_value(
alpha_real, beta_real,
alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag,
tau.flatten(),
)
return np.asarray(k).reshape(tau.shape)
| 665,608
|
Compute the PSD of the term for an array of angular frequencies
Args:
omega (array[...]): An array of frequencies where the PSD should
be evaluated.
Returns:
The value of the PSD for each ``omega``. This will have the same
shape as ``omega``.
|
def get_psd(self, omega):
w = np.asarray(omega)
(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag) = self.coefficients
p = get_psd_value(
alpha_real, beta_real,
alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag,
w.flatten(),
)
return p.reshape(w.shape)
| 665,609
|
Return if the spreadsheet has a worksheet with the given id.
Args:
id (int): numeric id of the worksheet
Returns:
bool: ``True`` if such a worksheet is present else ``False``
Raises:
TypeError: if ``id`` is not an ``int``
|
def __contains__(self, id):
if not isinstance(id, int):
raise TypeError(id)
return id in self._map
| 666,571
|
Return the worksheet with the given id.
Args:
id: numeric id of the worksheet
Returns:
WorkSheet: contained worksheet object
Raises:
TypeError: if ``id`` is not an ``int``
KeyError: if the spreadsheet has no worksheet with the given ``id``
|
def __getitem__(self, id):
if not isinstance(id, int):
raise TypeError(id)
return self._map[id]
| 666,572
|
Return the first worksheet with the given title.
Args:
title(str): title/name of the worksheet to return
Returns:
WorkSheet: contained worksheet object
Raises:
KeyError: if the spreadsheet has no no worksheet with the given ``title``
|
def find(self, title):
if title not in self._titles:
raise KeyError(title)
return self._titles[title][0]
| 666,573
|
Return a list of worksheets with the given title.
Args:
title(str): title/name of the worksheets to return, or ``None`` for all
Returns:
list: list of contained worksheet instances (possibly empty)
|
def findall(self, title=None):
if title is None:
return list(self._sheets)
if title not in self._titles:
return []
return list(self._titles[title])
| 666,574
|
Return a list of contained worksheet titles.
Args:
unique (bool): drop duplicates
Returns:
list: list of titles/name strings
|
def titles(self, unique=False):
if unique:
return tools.uniqued(s.title for s in self._items)
return [s.title for s in self._items]
| 666,578
|
Return the value(s) of the given cell(s).
Args:
index (str): cell/row/col index ('A1', '2', 'B') or slice ('A1':'C3')
Returns:
value (cell), list(col, row), or nested list (two-dimentional slice)
Raises:
TypeError: if ``index`` is not a string or slice of strings
ValueError: if ``index`` canot be parsed
IndexError: if ``index`` is out of range
|
def __getitem__(self, index):
getter = coordinates.Coordinates.from_string(index)
return getter(self._values)
| 666,584
|
Return the value at the given cell position.
Args:
row (int): zero-based row number
col (int): zero-based column number
Returns:
cell value
Raises:
TypeError: if ``row`` or ``col`` is not an ``int``
IndexError: if the position is out of range
|
def at(self, row, col):
if not (isinstance(row, int) and isinstance(col, int)):
raise TypeError(row, col)
return self._values[row][col]
| 666,585
|
Return a nested list with the worksheet values.
Args:
column_major (bool): as list of columns (default list of rows)
Returns:
list: list of lists with values
|
def values(self, column_major=False):
if column_major:
return list(map(list, zip(*self._values)))
return [row[:] for row in self._values]
| 666,586
|
r"""Return a pandas DataFrame loaded from the worksheet data.
Args:
\**kwargs: passed to ``pandas.read_csv()`` (e.g. ``header``, ``index_col``)
Returns:
pandas.DataFrame: new ``DataFrame`` instance
|
def to_frame(self, **kwargs):
r
df = export.write_dataframe(self._values, **kwargs)
df.name = self.title
return df
| 666,588
|
Return a spreadsheet collection making OAauth 2.0 credentials.
Args:
secrets (str): location of secrets file (default: ``%r``)
storage (str): location of storage file (default: ``%r``)
scopes: scope URL(s) or ``'read'`` or ``'write'`` (default: ``%r``)
no_webserver (bool): URL/code prompt instead of webbrowser auth
Returns:
Sheets: new Sheets instance with OAauth 2.0 credentials
|
def from_files(cls, secrets=None, storage=None, scopes=None, no_webserver=False):
creds = oauth2.get_credentials(scopes, secrets, storage, no_webserver)
return cls(creds)
| 666,589
|
Return if there is a spreadsheet with the given id.
Args:
id (str): unique alphanumeric id of the spreadsheet
Returns:
bool: ``True`` if it can be fetched else ``False``
|
def __contains__(self, id):
try:
backend.spreadsheet(self._sheets, id)
except KeyError:
return False
else:
return True
| 666,591
|
Fetch and return the spreadsheet with the given id.
Args:
id (str): unique alphanumeric id of the spreadsheet
Returns:
SpreadSheet: new SpreadSheet instance
Raises:
KeyError: if no spreadsheet with the given ``id`` is found
|
def __getitem__(self, id):
if id == slice(None, None):
return list(self)
response = backend.spreadsheet(self._sheets, id)
result = models.SpreadSheet._from_response(response, self._sheets)
result._api = self
return result
| 666,592
|
Fetch and return the spreadsheet with the given id or url.
Args:
id_or_url (str): unique alphanumeric id or URL of the spreadsheet
Returns:
New SpreadSheet instance or given default if none is found
Raises:
ValueError: if an URL is given from which no id could be extracted
|
def get(self, id_or_url, default=None):
if '/' in id_or_url:
id = urls.SheetUrl.from_string(id_or_url).id
else:
id = id_or_url
try:
return self[id]
except KeyError:
return default
| 666,593
|
Fetch and return the first spreadsheet with the given title.
Args:
title(str): title/name of the spreadsheet to return
Returns:
SpreadSheet: new SpreadSheet instance
Raises:
KeyError: if no spreadsheet with the given ``title`` is found
|
def find(self, title):
files = backend.iterfiles(self._drive, name=title)
try:
return next(self[id] for id, _ in files)
except StopIteration:
raise KeyError(title)
| 666,594
|
Fetch and return a list of spreadsheets with the given title.
Args:
title(str): title/name of the spreadsheets to return, or ``None`` for all
Returns:
list: list of new SpreadSheet instances (possibly empty)
|
def findall(self, title=None):
if title is None:
return list(self)
files = backend.iterfiles(self._drive, name=title)
return [self[id] for id, _ in files]
| 666,595
|
Return a list of all available spreadsheet titles.
Args:
unique (bool): drop duplicates
Returns:
list: list of title/name strings
|
def titles(self, unique=False):
if unique:
return tools.uniqued(title for _, title in self.iterfiles())
return [title for _, title in self.iterfiles()]
| 666,596
|
Recursive dict merge.
Inspired by :meth:``dict.update()``, instead of updating only top-level
keys, dict_merge recurses down into dicts nested to an arbitrary depth,
updating keys. The ``merge_dct`` is merged into ``dct``.
From https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
Arguments:
dct: dict onto which the merge is executed
merge_dct: dct merged into dct
|
def _dict_merge(dct, merge_dct):
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
_dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
| 666,853
|
Convert a Schema Object to a Python object.
Args:
schema: An ``OrderedDict`` representing the schema object.
|
def _parse_schema(schema, method):
if method and schema.get('readOnly', False):
return _READONLY_PROPERTY
# allOf: Must be valid against all of the subschemas
if 'allOf' in schema:
schema_ = copy.deepcopy(schema['allOf'][0])
for x in schema['allOf'][1:]:
_dict_merge(schema_, x)
return _parse_schema(schema_, method)
# anyOf: Must be valid against any of the subschemas
# TODO(stephenfin): Handle anyOf
# oneOf: Must be valid against exactly one of the subschemas
if 'oneOf' in schema:
# we only show the first one since we can't show everything
return _parse_schema(schema['oneOf'][0], method)
if 'enum' in schema:
# we only show the first one since we can't show everything
return schema['enum'][0]
schema_type = schema.get('type', 'object')
if schema_type == 'array':
# special case oneOf so that we can show examples for all possible
# combinations
if 'oneOf' in schema['items']:
return [
_parse_schema(x, method) for x in schema['items']['oneOf']]
return [_parse_schema(schema['items'], method)]
if schema_type == 'object':
if method and all(v.get('readOnly', False)
for v in schema['properties'].values()):
return _READONLY_PROPERTY
results = []
for name, prop in schema.get('properties', {}).items():
result = _parse_schema(prop, method)
if result != _READONLY_PROPERTY:
results.append((name, result))
return collections.OrderedDict(results)
if (schema_type, schema.get('format')) in _TYPE_MAPPING:
return _TYPE_MAPPING[(schema_type, schema.get('format'))]
return _TYPE_MAPPING[(schema_type, None)]
| 666,854
|
Does this version key allow 'latest' as an option (e.g. "latest AMI" makes sense and is allowed)
Args:
version_key_name: the version key to check for "allow_latest"
Returns:
True if the version key allows latest, False if it does not
Raises:
ValueError if the key was not found
|
def allows_latest(self, version_key_name):
if not self.version_keys().has_key(version_key_name):
raise RuntimeError("service registry doesn't have a version key entry for: {}".format(version_key_name))
if not self.version_keys()[version_key_name].has_key("allow_latest"):
raise RuntimeError("service registry key {} doesn't have an 'allow_latest' value".format(
version_key_name))
return self.version_keys()[version_key_name]["allow_latest"]
| 666,891
|
Decorator for ef plugin classes. Any wrapped classes should contain a run() method which executes the plugin code.
Args:
service_name (str): The name of the service being extended.
Example:
@ef_plugin('ef-generate')
class NewRelicPlugin(object):
def run(self):
exec_code()
|
def ef_plugin(service_name):
def class_rebuilder(cls):
class EFPlugin(cls):
def __init__(self, context, clients):
self.service = service_name
self.context = context
self.clients = clients
self.oInstance = cls()
def __getattribute__(self, s):
try:
x = super(EFPlugin, self).__getattribute__(s)
except AttributeError:
pass
else:
return x
return self.oInstance.__getattribute__(s)
return EFPlugin
return class_rebuilder
| 666,911
|
Executes all loaded plugins designated for the service calling the function.
Args:
context_obj (obj:EFContext): The EFContext object created by the service.
boto3_clients (dict): Dictionary of boto3 clients created by ef_utils.create_aws_clients()
|
def run_plugins(context_obj, boto3_clients):
def print_if_verbose(message):
if context_obj.verbose:
print(message)
service_name = os.path.basename(sys.argv[0]).replace(".py", "")
try:
import plugins
except ImportError:
print_if_verbose("no plugins detected.")
return
else:
for plugin_importer, plugin_name, plugin_ispkg in pkgutil.iter_modules(plugins.__path__):
if plugin_ispkg:
plugin_package = importlib.import_module("plugins.{}".format(plugin_name))
for importer, modname, ispkg in pkgutil.iter_modules(plugin_package.__path__):
plugin_module = importlib.import_module("plugins.{}.{}".format(plugin_name, modname))
for name, obj in inspect.getmembers(plugin_module):
if inspect.isclass(obj) and obj.__name__ == "EFPlugin":
plugin_class = getattr(plugin_module, name)
plugin_instance = plugin_class(context=context_obj, clients=boto3_clients)
if plugin_instance.service == service_name:
print_if_verbose("plugin '{}' loaded".format(plugin_name))
if not context_obj.commit:
print_if_verbose("dryrun: skipping plugin execution.")
else:
try:
plugin_instance.run()
except AttributeError:
print("error executing plugin '{}'".format(modname))
| 666,912
|
Generate a random secret consisting of mixed-case letters and numbers
Args:
length (int): Length of the generated password
Returns:
a randomly generated secret string
Raises:
None
|
def generate_secret(length=32):
alphabet = string.ascii_letters + string.digits
random_bytes = os.urandom(length)
indices = [int(len(alphabet) * (ord(byte) / 256.0)) for byte in random_bytes]
return "".join([alphabet[index] for index in indices])
| 666,943
|
Generate a parameter files with it's secrets encrypted in KMS
Args:
file_path (string): Path to the parameter file to be encrypted
pattern (string): Pattern to do fuzzy string matching
service (string): Service to use KMS key to encrypt file
environment (string): Environment to encrypt values
clients (dict): KMS AWS client that has been instantiated
Returns:
None
Raises:
IOError: If the file does not exist
|
def generate_secret_file(file_path, pattern, service, environment, clients):
changed = False
with open(file_path) as json_file:
data = json.load(json_file, object_pairs_hook=OrderedDict)
try:
for key, value in data["params"][environment].items():
if pattern in key:
if "aws:kms:decrypt" in value:
print("Found match, key {} but value is encrypted already; skipping...".format(key))
else:
print("Found match, encrypting key {}".format(key))
encrypted_password = ef_utils.kms_encrypt(clients['kms'], service, environment, value)
data["params"][environment][key] = format_secret(encrypted_password)
changed = True
except KeyError:
ef_utils.fail("Error env: {} does not exist in parameters file".format(environment))
if changed:
with open(file_path, "w") as encrypted_file:
json.dump(data, encrypted_file, indent=2, separators=(',', ': '))
# Writing new line here so it conforms to WG14 N1256 5.1.1.1 (so github doesn't complain)
encrypted_file.write("\n")
| 666,944
|
Sets context.env, context.env_short, and context.account_alias if env is valid
For envs of the form "global.<account>" and "mgmt.<account_alias>",
env is captured as "global" or "mgmt" and account_alias is parsed
out of the full env rather than looked up
Args:
value: the fully-qualified env value
Raises:
ValueError if env is not valid
|
def env(self, value):
env_valid(value)
self._env_full = value
if value.find(".") == -1:
# plain environment, e.g. prod, staging, proto<n>
self._env = value
self._account_alias = get_account_alias(value)
else:
# "<env>.<account_alias>" form, e.g. global.ellationeng or mgmt.ellationeng
self._env, self._account_alias = value.split(".")
# since we extracted an env, must reconfirm that it's legit
global_env_valid(self._env)
self._env_short = get_env_short(value)
| 667,002
|
Sets service registry object in context, doesn't check it
Args:
sr: EFServiceRegistry object
|
def service_registry(self, sr):
if type(sr) is not EFServiceRegistry:
raise TypeError("sr value must be type 'EFServiceRegistry'")
self._service_registry = sr
| 667,003
|
Sets the current account id
Args:
value: current account id (string)
Returns:
None
|
def account_id(self, value):
if type(value) is not str:
raise TypeError("commit value must be string")
self._account_id = value
| 667,004
|
Get AWS client if it exists (must have been formerly stored with set_aws_clients)
If client_id is not provided, returns the dictionary of all clients
Args:
client_id: label for the client, e.g. 'ec2'; omit to get a dictionary of all clients
Returns:
aws client if found, or None if not
|
def aws_client(self, client_id=None):
if client_id is None:
return self._aws_clients
elif self._aws_clients is not None and self._aws_clients.has_key(client_id):
return self._aws_clients[client_id]
else:
return None
| 667,005
|
Stash a dictionary of AWS clients in the context object
Args:
clients: dictionary of clients
|
def set_aws_clients(self, clients):
if type(clients) is not dict:
raise TypeError("clients must be a dict")
self._aws_clients = clients
| 667,006
|
Set the key for the current context.
Args:
context: a populated EFVersionContext object
|
def validate_context(context):
# Service must exist in service registry
if not context.service_registry.service_record(context.service_name):
fail("service: {} not found in service registry: {}".format(
context.service_name, context.service_registry.filespec))
service_type = context.service_registry.service_record(context.service_name)["type"]
# Key must be valid
if context.key not in EFConfig.VERSION_KEYS:
fail("invalid key: {}; see VERSION_KEYS in ef_config for supported keys".format(context.key))
# Lookup allowed key for service type
if "allowed_types" in EFConfig.VERSION_KEYS[context.key] and \
service_type not in EFConfig.VERSION_KEYS[context.key]["allowed_types"]:
fail("service_type: {} is not allowed for key {}; see VERSION_KEYS[KEY]['allowed_types']"
"in ef_config and validate service registry entry".format(service_type, context.key))
return True
| 667,011
|
Is the AMI in service the same as the AMI marked current in the version records?
This tool won't update records unless the world state is coherent.
Args:
context: a populated EFVersionContext object
Returns:
True if ok to proceed
Raises:
RuntimeError if not ok to proceed
|
def precheck_ami_id(context):
# get the current AMI
key = "{}/{}".format(context.env, context.service_name)
print_if_verbose("precheck_ami_id with key: {}".format(key))
current_ami = context.versionresolver.lookup("ami-id,{}".format(key))
print_if_verbose("ami found: {}".format(current_ami))
# If bootstrapping (this will be the first entry in the version history)
# then we can't check it vs. running version
if current_ami is None:
print_if_verbose("precheck passed without check because current AMI is None")
return True
# Otherwise perform a consistency check
# 1. get IDs of instances running the AMI - will find instances in all environments
instances_running_ami = context.aws_client("ec2").describe_instances(
Filters=[{
'Name': 'image-id',
'Values': [current_ami]
}]
)["Reservations"]
if instances_running_ami:
instances_running_ami = [resv["Instances"][0]["InstanceId"] for resv in instances_running_ami]
print_if_verbose("instances running ami {}:\n{}".format(current_ami, repr(instances_running_ami)))
# 2. Get IDs of instances running as <context.env>-<context.service_name>
env_service = "{}-{}".format(context.env, context.service_name)
instances_running_as_env_service = context.aws_client("ec2").describe_instances(
Filters=[{
'Name': 'iam-instance-profile.arn',
'Values': ["arn:aws:iam::*:instance-profile/{}-{}".format(context.env, context.service_name)]
}]
)["Reservations"]
if instances_running_as_env_service:
instances_running_as_env_service = \
[resv["Instances"][0]["InstanceId"] for resv in instances_running_as_env_service]
print_if_verbose("instances running as {}".format(env_service))
print_if_verbose(repr(instances_running_as_env_service))
# 3. Instances running as env-service should be a subset of instances running the AMI
for instance_id in instances_running_as_env_service:
if instance_id not in instances_running_ami:
raise RuntimeError("Instance: {} not running expected ami: {}".format(instance_id, current_ami))
# Check passed - all is well
return True
| 667,012
|
Is the dist in service the same as the dist marked current in the version records?
This tool won't update records unless the world state is coherent.
Args:
context: a populated EFVersionContext object
Returns:
True if ok to proceed
Raises:
RuntimeError if not ok to proceed
|
def precheck_dist_hash(context):
# get the current dist-hash
key = "{}/{}/dist-hash".format(context.service_name, context.env)
print_if_verbose("precheck_dist_hash with key: {}".format(key))
try:
current_dist_hash = Version(context.aws_client("s3").get_object(
Bucket=EFConfig.S3_VERSION_BUCKET,
Key=key
))
print_if_verbose("dist-hash found: {}".format(current_dist_hash.value))
except ClientError as error:
if error.response["Error"]["Code"] == "NoSuchKey":
# If bootstrapping (this will be the first entry in the version history)
# then we can't check it vs. current version, thus we cannot get the key
print_if_verbose("precheck passed without check because current dist-hash is None")
return True
else:
fail("Exception while prechecking dist_hash for {} {}: {}".format(context.service_name, context.env, error))
# Otherwise perform a consistency check
# 1. get dist version in service for environment
try:
response = urllib2.urlopen(current_dist_hash.location, None, 5)
if response.getcode() != 200:
raise IOError("Non-200 response " + str(response.getcode()) + " reading " + current_dist_hash.location)
dist_hash_in_service = response.read().strip()
except urllib2.URLError as error:
raise IOError("URLError in http_get_dist_version: " + repr(error))
# 2. dist version in service should be the same as "current" dist version
if dist_hash_in_service != current_dist_hash.value:
raise RuntimeError("{} dist-hash in service: {} but expected dist-hash: {}"
.format(key, dist_hash_in_service, current_dist_hash.value))
# Check passed - all is well
return True
| 667,013
|
calls a function named "precheck_<key>" where <key> is context_key with '-' changed to '_'
(e.g. "precheck_ami_id")
Checking function should return True if OK, or raise RuntimeError w/ message if not
Args:
context: a populated EFVersionContext object
Returns:
True if the precheck passed, or if there was no precheck function for context.key
Raises:
RuntimeError if precheck failed, with explanatory message
|
def precheck(context):
if context.noprecheck:
return True
func_name = "precheck_" + context.key.replace("-", "_")
if func_name in globals() and isfunction(globals()[func_name]):
return globals()[func_name](context)
else:
return True
| 667,014
|
Get the latest version that matches the provided ami-id
Args:
context: a populated EFVersionContext object
value: the value of the version to look for
|
def get_version_by_value(context, value):
versions = get_versions(context)
for version in versions:
if version.value == value:
return version
fail("Didn't find a matching version for: "
"{}:{} in env/service: {}/{}".format(
context.key, value,
context.env, context.service_name))
| 667,018
|
Roll back by finding the most recent "stable" tagged version, and putting it again, so that
it's the new "current" version.
Args:
context: a populated EFVersionContext object
|
def cmd_rollback(context):
last_stable = get_versions(context, return_stable=True)
if len(last_stable) != 1:
fail("Didn't find a version marked stable for key: {} in env/service: {}/{}".format(
context.key, context.env, context.service_name))
context.value = last_stable[0].value
context.commit_hash = last_stable[0].commit_hash
context.build_number = last_stable[0].build_number
context.location = last_stable[0].location
context.stable = True
cmd_set(context)
| 667,019
|
Roll back by finding a specific version in the history of the service and
putting it as the new current version.
Args:
context: a populated EFVersionContext object
|
def cmd_rollback_to(context):
version = get_version_by_value(context, context.rollback_to)
context.value = version.value
context.commit_hash = version.commit_hash
context.build_number = version.build_number
context.location = version.location
context.stable = True
cmd_set(context)
| 667,020
|
Get the most recent AMI ID for a service
Args:
context: a populated EFVersionContext object
Returns:
ImageId or None if no images exist or on error
|
def _getlatest_ami_id(context):
try:
response = context.aws_client("ec2").describe_images(
Filters=[
{"Name": "is-public", "Values": ["false"]},
{"Name": "name", "Values": [context.service_name + EFConfig.AMI_SUFFIX + "*"]}
])
except:
return None
if len(response["Images"]) > 0:
return sorted(response["Images"], key=itemgetter('CreationDate'), reverse=True)[0]["ImageId"]
else:
return None
| 667,021
|
Set the new "current" value for a key.
If the existing current version and the new version have identical /value/ and /status,
then nothing is written, to avoid stacking up redundant entreis in the version table.
Args:
context: a populated EFVersionContext object
|
def cmd_set(context):
# If key value is a special symbol, see if this env allows it
if context.value in EFConfig.SPECIAL_VERSIONS and context.env_short not in EFConfig.SPECIAL_VERSION_ENVS:
fail("special version: {} not allowed in env: {}".format(context.value, context.env_short))
# If key value is a special symbol, the record cannot be marked "stable"
if context.value in EFConfig.SPECIAL_VERSIONS and context.stable:
fail("special versions such as: {} cannot be marked 'stable'".format(context.value))
# Resolve any references
if context.value == "=prod":
context.value = context.versionresolver.lookup("{},{}/{}".format(context.key, "prod", context.service_name))
elif context.value == "=staging":
context.value = context.versionresolver.lookup("{},{}/{}".format(context.key, "staging", context.service_name))
elif context.value == "=latest":
if not EFConfig.VERSION_KEYS[context.key]["allow_latest"]:
fail("=latest cannot be used with key: {}".format(context.key))
func_name = "_getlatest_" + context.key.replace("-", "_")
if func_name in globals() and isfunction(globals()[func_name]):
context.value = globals()[func_name](context)
else:
raise RuntimeError("{} version for {}/{} is '=latest' but can't look up because method not found: {}".format(
context.key, context.env, context.service_name, func_name))
# precheck to confirm coherent world state before attempting set - whatever that means for the current key type
try:
precheck(context)
except Exception as e:
fail("Precheck failed: {}".format(e.message))
s3_key = "{}/{}/{}".format(context.service_name, context.env, context.key)
s3_version_status = EFConfig.S3_VERSION_STATUS_STABLE if context.stable else EFConfig.S3_VERSION_STATUS_UNDEFINED
# If the set would put a value and status that are the same as the existing 'current' value/status, don't do it
context.limit = 1
current_version = get_versions(context)
# If there is no 'current version' it's ok, just means the set will write the first entry
if len(current_version) == 1 and current_version[0].status == s3_version_status and \
current_version[0].value == context.value:
print("Version not written because current version and new version have identical value and status: {} {}"
.format(current_version[0].value, current_version[0].status))
return
if not context.commit:
print("=== DRY RUN ===\nUse --commit to set value\n=== DRY RUN ===")
print("would set key: {} with value: {} {} {} {} {}".format(
s3_key, context.value, context.build_number, context.commit_hash, context.location, s3_version_status))
else:
context.aws_client("s3").put_object(
ACL='bucket-owner-full-control',
Body=context.value,
Bucket=EFConfig.S3_VERSION_BUCKET,
ContentEncoding=EFConfig.S3_VERSION_CONTENT_ENCODING,
Key=s3_key,
Metadata={
EFConfig.S3_VERSION_BUILDNUMBER_KEY: context.build_number,
EFConfig.S3_VERSION_COMMITHASH_KEY: context.commit_hash,
EFConfig.S3_VERSION_LOCATION_KEY: context.location,
EFConfig.S3_VERSION_MODIFIEDBY_KEY: context.aws_client("sts").get_caller_identity()["Arn"],
EFConfig.S3_VERSION_STATUS_KEY: s3_version_status
},
StorageClass='STANDARD'
)
print("set key: {} with value: {} {} {} {} {}".format(
s3_key, context.value, context.build_number, context.commit_hash, context.location, s3_version_status))
| 667,022
|
Hierarchically searches for 'symbol' in the parameters blob if there is one (would have
been retrieved by 'load()'). Order is: default, <env_short>, <env>
Args:
symbol: the key to resolve
Returns:
Hierarchically resolved value for 'symbol' in the environment set by the constructor,
or None if a match is not found or there are no parameters
|
def get_value(self, symbol):
default = "default"
if not self.parameters:
return None
# Hierarchically lookup the value
result = None
if default in self.parameters and symbol in self.parameters[default]:
result = self.parameters[default][symbol]
if self.env_short in self.parameters and symbol in self.parameters[self.env_short]:
result = self.parameters[self.env_short][symbol]
# This lookup is redundant when env_short == env, but it's also cheap
if self.env in self.parameters and symbol in self.parameters[self.env]:
result = self.parameters[self.env][symbol]
# Finally, convert any list of items into a single \n-delimited string
if isinstance(result, list):
result = "\n".join(result)
return result
| 667,034
|
Fetch AWS metadata from http://169.254.169.254/latest/meta-data/<metadata_path>
ARGS:
metadata_path - the optional path and required key to the EC2 metadata (e.g. "instance-id")
RETURN:
response content on success
RAISE:
URLError if there was a problem reading metadata
|
def http_get_metadata(metadata_path, timeout=__HTTP_DEFAULT_TIMEOUT_SEC):
metadata_path = __METADATA_PREFIX + metadata_path
try:
response = urllib2.urlopen(metadata_path, None, timeout)
if response.getcode() != 200:
raise IOError("Non-200 response " + str(response.getcode()) + " reading " + metadata_path)
return response.read()
except urllib2.URLError as error:
raise IOError("URLError in http_get_metadata: " + repr(error))
| 667,056
|
Given an env, return <account_alias> if env is valid
Args:
env: an environment, such as "prod", "staging", "proto<N>", "mgmt.<account_alias>"
Returns:
the alias of the AWS account that holds the env
Raises:
ValueError if env is misformatted or doesn't name a known environment
|
def get_account_alias(env):
env_valid(env)
# Env is a global env of the form "env.<account_alias>" (e.g. "mgmt.<account_alias>")
if env.find(".") > -1:
base, ext = env.split(".")
return ext
# Ordinary env, possibly a proto env ending with a digit that is stripped to look up the alias
else:
env_short = env.strip(".0123456789")
if env_short not in EFConfig.ENV_ACCOUNT_MAP:
raise ValueError("generic env: {} has no entry in ENV_ACCOUNT_MAP of ef_site_config.py".format(env_short))
return EFConfig.ENV_ACCOUNT_MAP[env_short]
| 667,063
|
Given an env, return <env_short> if env is valid
Args:
env: an environment, such as "prod", "staging", "proto<N>", "mgmt.<account_alias>"
Returns:
the shortname of the env, such as "prod", "staging", "proto", "mgmt"
Raises:
ValueError if env is misformatted or doesn't name a known environment
|
def get_env_short(env):
env_valid(env)
if env.find(".") > -1:
env_short, ext = env.split(".")
else:
env_short = env.strip(".0123456789")
return env_short
| 667,064
|
Given an env, determine if it's valid
Args:
env: the env to check
Returns:
True if the env is valid
Raises:
ValueError with message if the env is not valid
|
def env_valid(env):
if env not in EFConfig.ENV_LIST:
raise ValueError("unknown env: {}; env must be one of: ".format(env) + ", ".join(EFConfig.ENV_LIST))
return True
| 667,065
|
Given an env, determine if it's a valid "global" or "mgmt" env as listed in EFConfig
Args:
env: the env to check
Returns:
True if the env is a valid global env in EFConfig
Raises:
ValueError with message if the env is not valid
|
def global_env_valid(env):
if env not in EFConfig.ACCOUNT_SCOPED_ENVS:
raise ValueError("Invalid global env: {}; global envs are: {}".format(env, EFConfig.ACCOUNT_SCOPED_ENVS))
return True
| 667,066
|
Decrypt kms-encrypted string
Args:
kms_client (boto3 kms client object): Instantiated kms client object. Usually created through create_aws_clients.
secret (string): base64 encoded value to be decrypted
Returns:
a populated EFPWContext object
Raises:
SystemExit(1): If there is an error with the boto3 decryption call (ex. malformed secret)
|
def kms_decrypt(kms_client, secret):
try:
decrypted_secret = kms_client.decrypt(CiphertextBlob=base64.b64decode(secret))['Plaintext']
except TypeError:
fail("Malformed base64 string data")
except ClientError as error:
if error.response["Error"]["Code"] == "InvalidCiphertextException":
fail("The decrypt request was rejected because the specified ciphertext \
has been corrupted or is otherwise invalid.", error)
elif error.response["Error"]["Code"] == "NotFoundException":
fail("The decrypt request was rejected because the specified entity or resource could not be found.", error)
else:
fail("boto3 exception occurred while performing kms decrypt operation.", error)
return decrypted_secret
| 667,068
|
Obtain the full key arn based on the key alias provided
Args:
kms_client (boto3 kms client object): Instantiated kms client object. Usually created through create_aws_clients.
alias (string): alias of key, example alias/proto0-evs-drm.
Returns:
string of the full key arn
|
def kms_key_arn(kms_client, alias):
try:
response = kms_client.describe_key(KeyId=alias)
key_arn = response["KeyMetadata"]["Arn"]
except ClientError as error:
raise RuntimeError("Failed to obtain key arn for alias {}, error: {}".format(alias, error.response["Error"]["Message"]))
return key_arn
| 667,069
|
Checks for existance of parameters file against supported suffixes and returns parameters file path if found
Args:
template_full_path: full filepath for template file
Returns:
filename of parameters file if it exists
|
def get_template_parameters_file(template_full_path):
for suffix in EFConfig.PARAMETER_FILE_SUFFIXES:
parameters_file = template_full_path.replace("/templates", "/parameters") + suffix
if exists(parameters_file):
return parameters_file
else:
continue
return None
| 667,070
|
Checks for existance of parameters object in S3 against supported suffixes and returns parameters file key if found
Args:
template_key: S3 key for template file. omit bucket.
s3_resource: a boto3 s3 resource
Returns:
filename of parameters file if it exists
|
def get_template_parameters_s3(template_key, s3_resource):
for suffix in EFConfig.PARAMETER_FILE_SUFFIXES:
parameters_key = template_key.replace("/templates", "/parameters") + suffix
try:
obj = s3_resource.Object(EFConfig.S3_CONFIG_BUCKET, parameters_key)
obj.get()
return parameters_key
except ClientError:
continue
return None
| 667,071
|
Return:
the ID of a single subnet or default/None if no match
Args:
lookup: the friendly name of the subnet to look up (subnet-<env>-a or subnet-<env>-b)
default: the optional value to return if lookup failed; returns None if not set
|
def ec2_subnet_subnet_id(self, lookup, default=None):
subnets = EFAwsResolver.__CLIENTS["ec2"].describe_subnets(Filters=[{
'Name': 'tag:Name',
'Values': [lookup]
}])
if len(subnets["Subnets"]) > 0:
return subnets["Subnets"][0]["SubnetId"]
else:
return default
| 667,083
|
Create security groups as needed; name and number created depend on service_type
Args:
env: the environment the SG will be created in
service_name: name of the service in service registry
service_type: service registry service type: 'aws_ec2', 'aws_lambda', 'aws_security_group', or 'http_service'
|
def conditionally_create_security_groups(env, service_name, service_type):
if service_type not in SG_SERVICE_TYPES:
print_if_verbose("not eligible for security group(s); service type: {}".format(service_type))
return
target_name = "{}-{}".format(env, service_name)
if service_type == "aws_ec2":
sg_names = ["{}-ec2".format(target_name)]
elif service_type == "aws_lambda":
sg_names = ["{}-lambda".format(target_name)]
elif service_type == "http_service":
sg_names = [
"{}-ec2".format(target_name),
"{}-elb".format(target_name)
]
elif service_type == "aws_security_group":
sg_names = [target_name]
else:
fail("Unexpected service_type: {} when creating security group for: {}".format(service_type, target_name))
for sg_name in sg_names:
if not AWS_RESOLVER.ec2_security_group_security_group_id(sg_name):
vpc_name = "vpc-{}".format(env)
print("Create security group: {} in vpc: {}".format(sg_name, vpc_name))
vpc = AWS_RESOLVER.ec2_vpc_vpc_id(vpc_name)
if not vpc:
fail("Error: could not get VPC by name: {}".format(vpc_name))
# create security group
if CONTEXT.commit:
try:
new_sg = CLIENTS["ec2"].create_security_group(GroupName=sg_name, VpcId=vpc, Description=sg_name)
except:
fail("Exception creating security group named: {} in VpcId: {}".format(sg_name, vpc_name), sys.exc_info())
print(new_sg["GroupId"])
else:
print_if_verbose("security group already exists: {}".format(sg_name))
| 667,106
|
If 'aws_managed_policies' key lists the names of AWS managed policies to bind to the role,
attach them to the role
Args:
role_name: name of the role to attach the policies to
sr_entry: service registry entry
|
def conditionally_attach_managed_policies(role_name, sr_entry):
service_type = sr_entry['type']
if not (service_type in SERVICE_TYPE_ROLE and "aws_managed_policies" in sr_entry):
print_if_verbose("not eligible for policies; service_type: {} is not valid for policies "
"or no 'aws_managed_policies' key in service registry for this role".format(service_type))
return
for policy_name in sr_entry['aws_managed_policies']:
print_if_verbose("loading policy: {} for role: {}".format(policy_name, role_name))
if CONTEXT.commit:
try:
CLIENTS["iam"].attach_role_policy(RoleName=role_name, PolicyArn='arn:aws:iam::aws:policy/' + policy_name)
except:
fail("Exception putting policy: {} onto role: {}".format(policy_name, role_name), sys.exc_info())
| 667,109
|
If 'policies' key lists the filename prefixes of policies to bind to the role,
load them from the expected path and inline them onto the role
Args:
role_name: name of the role to attach the policies to
sr_entry: service registry entry
|
def conditionally_inline_policies(role_name, sr_entry):
service_type = sr_entry['type']
if not (service_type in SERVICE_TYPE_ROLE and "policies" in sr_entry):
print_if_verbose("not eligible for policies; service_type: {} is not valid for policies "
"or no 'policies' key in service registry for this role".format(service_type))
return
for policy_name in sr_entry['policies']:
print_if_verbose("loading policy: {} for role: {}".format(policy_name, role_name))
try:
policy_document = resolve_policy_document(policy_name)
except:
fail("Exception loading policy: {} for role: {}".format(policy_name, role_name), sys.exc_info())
# inline the policy onto the role
if CONTEXT.commit:
try:
CLIENTS["iam"].put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy_document)
except:
fail("Exception putting policy: {} onto role: {}".format(policy_name, role_name), sys.exc_info())
| 667,110
|
Create KMS Master Key for encryption/decryption of sensitive values in cf templates and latebind configs
Args:
role_name: name of the role that kms key is being created for; it will be given decrypt privileges.
service_type: service registry service type: 'aws_ec2', 'aws_fixture', 'aws_lambda', or 'http_service'
|
def conditionally_create_kms_key(role_name, service_type):
if service_type not in KMS_SERVICE_TYPES:
print_if_verbose("not eligible for kms; service_type: {} is not valid for kms".format(service_type))
return
# Converting all periods to underscores because they are invalid in KMS alias names
key_alias = role_name.replace('.', '_')
try:
kms_key = CLIENTS["kms"].describe_key(KeyId='alias/{}'.format(key_alias))
except ClientError as error:
if error.response['Error']['Code'] == 'NotFoundException':
kms_key = None
else:
fail("Exception describing KMS key: {} {}".format(role_name, error))
if service_type == "aws_fixture":
kms_key_policy = + CONTEXT.account_id +
else:
formatted_principal = '"AWS": "arn:aws:iam::{}:role/{}"'.format(CONTEXT.account_id, role_name)
kms_key_policy = + CONTEXT.account_id + + formatted_principal + + CONTEXT.account_id + + CONTEXT.account_id +
if not kms_key:
print("Create KMS key: {}".format(key_alias))
if CONTEXT.commit:
# Create KMS Master Key. Due to AWS eventual consistency a newly created IAM role may not be
# immediately visible to KMS. Retrying up to 5 times (25 seconds) to account for this behavior.
create_key_failures = 0
while create_key_failures <= 5:
try:
new_kms_key = CLIENTS["kms"].create_key(
Policy=kms_key_policy,
Description='Master Key for {}'.format(role_name)
)
break
except ClientError as error:
if error.response['Error']['Code'] == 'MalformedPolicyDocumentException':
if create_key_failures == 5:
fail("Exception creating kms key: {} {}".format(role_name, error))
else:
create_key_failures += 1
time.sleep(5)
else:
fail("Exception creating kms key: {} {}".format(role_name, error))
# Assign key an alias. This is used for all future references to it (rather than the key ARN)
try:
CLIENTS["kms"].create_alias(
AliasName='alias/{}'.format(key_alias),
TargetKeyId=new_kms_key['KeyMetadata']['KeyId']
)
except ClientError as error:
fail("Exception creating alias for kms key: {} {}".format(role_name, error))
else:
print_if_verbose("KMS key already exists: {}".format(key_alias))
| 667,111
|
Given a context containing path to template, env, and service:
merge config into template and output the result to stdout
Args:
context: a populated context object
|
def merge_files(context):
resolver = EFTemplateResolver(
profile=context.profile,
region=context.region,
env=context.env,
service=context.service
)
try:
with open(context.template_path, 'r') as f:
template_body = f.read()
f.close()
except IOError as error:
raise IOError("Error loading template file: {} {}".format(context.template_path, repr(error)))
if context.no_params is False:
try:
with open(context.param_path, 'r') as f:
param_body = f.read()
f.close()
except IOError as error:
raise IOError("Error loading param file: {} {}".format(context.param_path, repr(error)))
dest = yaml.safe_load(param_body)["dest"]
# if 'dest' for the current object contains an 'environments' list, check it
if "environments" in dest:
if not resolver.resolved["ENV_SHORT"] in dest["environments"]:
print("Environment: {} not enabled for {}".format(resolver.resolved["ENV_SHORT"], context.template_path))
return
# Process the template_body - apply context + parameters
resolver.load(template_body, param_body)
else:
resolver.load(template_body)
rendered_body = resolver.render()
if not resolver.resolved_ok():
raise RuntimeError("Couldn't resolve all symbols; template has leftover {{ or }}: {}".format(resolver.unresolved_symbols()))
if context.lint:
if context.template_path.endswith(".json"):
try:
json.loads(rendered_body, strict=False)
print("JSON passed linting process.")
except ValueError as e:
fail("JSON failed linting process.", e)
elif context.template_path.endswith((".yml", ".yaml")):
conf = yamllint_config.YamlLintConfig(content='extends: relaxed')
lint_output = yamllinter.run(rendered_body, conf)
lint_level = 'error'
lint_errors = [issue for issue in lint_output if issue.level == lint_level]
if lint_errors:
split_body = rendered_body.splitlines()
for error in lint_errors:
print(error)
# printing line - 1 because lists start at 0, but files at 1
print("\t", split_body[error.line - 1])
fail("YAML failed linting process.")
if context.verbose:
print(context)
if context.no_params:
print('no_params flag set to true!')
print('Inline template resolution based on external symbol lookup only and no destination for file write.\n')
else:
dir_path = normpath(dirname(dest["path"]))
print("make directories: {} {}".format(dir_path, dest["dir_perm"]))
print("chmod file to: " + dest["file_perm"])
user, group = dest["user_group"].split(":")
print("chown last directory in path to user: {}, group: {}".format(user, group))
print("chown file to user: {}, group: {}\n".format(user, group))
print("template body:\n{}\nrendered body:\n{}\n".format(template_body, rendered_body))
elif context.silent:
print("Config template rendered successfully.")
else:
print(rendered_body)
| 667,143
|
Loads JSON from a config file
Args:
json_filespec: path/to/file.json
Returns:
a dict made from the JSON read, if successful
Raises:
IOError if the file could not be opened
ValueError if the JSON could not be read successfully
RuntimeError if something else went wrong
|
def load_json(json_filespec):
json_fh = open(json_filespec)
config_dict = json.load(json_fh)
json_fh.close()
return config_dict
| 667,147
|
Return request headers for fiss.
Inserts FISS as the User-Agent.
Initializes __SESSION if it hasn't been set.
Args:
headers (dict): Include additional headers as key-value pairs
|
def _fiss_agent_header(headers=None):
_set_session()
fiss_headers = {"User-Agent" : FISS_USER_AGENT}
if headers is not None:
fiss_headers.update(headers)
return fiss_headers
| 668,215
|
List the entity types present in a workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
Swagger:
https://api.firecloud.org/#!/Entities/getEntityTypes
|
def list_entity_types(namespace, workspace):
headers = _fiss_agent_header({"Content-type": "application/json"})
uri = "workspaces/{0}/{1}/entities".format(namespace, workspace)
return __get(uri, headers=headers)
| 668,220
|
Upload entities from tab-delimited string.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
entity_data (str): TSV string describing entites
Swagger:
https://api.firecloud.org/#!/Entities/importEntities
|
def upload_entities(namespace, workspace, entity_data):
body = urlencode({"entities" : entity_data})
headers = _fiss_agent_header({
'Content-type': "application/x-www-form-urlencoded"
})
uri = "workspaces/{0}/{1}/importEntities".format(namespace, workspace)
return __post(uri, headers=headers, data=body)
| 668,221
|
List entities of given type in a workspace.
Response content will be in JSON format.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
etype (str): Entity type
Swagger:
https://api.firecloud.org/#!/Entities/getEntities
|
def get_entities(namespace, workspace, etype):
uri = "workspaces/{0}/{1}/entities/{2}".format(namespace, workspace, etype)
return __get(uri)
| 668,224
|
List entities of given type in a workspace as a TSV.
Identical to get_entities(), but the response is a TSV.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
etype (str): Entity type
Swagger:
https://api.firecloud.org/#!/Entities/browserDownloadEntitiesTSV
|
def get_entities_tsv(namespace, workspace, etype):
uri = "workspaces/{0}/{1}/entities/{2}/tsv".format(namespace,
workspace, etype)
return __get(uri)
| 668,225
|
Request entity information.
Gets entity metadata and attributes.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
etype (str): Entity type
ename (str): The entity's unique id
Swagger:
https://api.firecloud.org/#!/Entities/getEntity
|
def get_entity(namespace, workspace, etype, ename):
uri = "workspaces/{0}/{1}/entities/{2}/{3}".format(namespace,
workspace, etype, ename)
return __get(uri)
| 668,226
|
Delete entities in a workspace.
Note: This action is not reversible. Be careful!
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
json_body:
[
{
"entityType": "string",
"entityName": "string"
}
]
Swagger:
https://api.firecloud.org/#!/Entities/deleteEntities
|
def delete_entities(namespace, workspace, json_body):
uri = "workspaces/{0}/{1}/entities/delete".format(namespace, workspace)
return __post(uri, json=json_body)
| 668,227
|
Delete entities in a workspace.
Note: This action is not reversible. Be careful!
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
etype (str): Entity type
ename (str, or iterable of str): unique entity id(s)
Swagger:
https://api.firecloud.org/#!/Entities/deleteEntities
|
def delete_entity_type(namespace, workspace, etype, ename):
uri = "workspaces/{0}/{1}/entities/delete".format(namespace, workspace)
if isinstance(ename, string_types):
body = [{"entityType":etype, "entityName":ename}]
elif isinstance(ename, Iterable):
body = [{"entityType":etype, "entityName":i} for i in ename]
return __post(uri, json=body)
| 668,228
|
Paginated version of get_entities_with_type.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
Swagger:
https://api.firecloud.org/#!/Entities/entityQuery
|
def get_entities_query(namespace, workspace, etype, page=1,
page_size=100, sort_direction="asc",
filter_terms=None):
# Initial parameters for pagination
params = {
"page" : page,
"pageSize" : page_size,
"sortDirection" : sort_direction
}
if filter_terms:
params['filterTerms'] = filter_terms
uri = "workspaces/{0}/{1}/entityQuery/{2}".format(namespace,workspace,etype)
return __get(uri, params=params)
| 668,229
|
Update entity attributes in a workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
etype (str): Entity type
ename (str): Entity name
updates (list(dict)): List of updates to entity from _attr_set, e.g.
Swagger:
https://api.firecloud.org/#!/Entities/update_entity
|
def update_entity(namespace, workspace, etype, ename, updates):
headers = _fiss_agent_header({"Content-type": "application/json"})
uri = "{0}workspaces/{1}/{2}/entities/{3}/{4}".format(fcconfig.root_url,
namespace, workspace, etype, ename)
# FIXME: create __patch method, akin to __get, __delete etc
return __SESSION.patch(uri, headers=headers, json=updates)
| 668,230
|
List method configurations in workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
Swagger:
https://api.firecloud.org/#!/Method_Configurations/listWorkspaceMethodConfigs
DUPLICATE: https://api.firecloud.org/#!/Workspaces/listWorkspaceMethodConfigs
|
def list_workspace_configs(namespace, workspace, allRepos=False):
uri = "workspaces/{0}/{1}/methodconfigs".format(namespace, workspace)
return __get(uri, params={'allRepos': allRepos})
| 668,231
|
Create method configuration in workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
body (json) : a filled-in JSON object for the new method config
(e.g. see return value of get_workspace_config)
Swagger:
https://api.firecloud.org/#!/Method_Configurations/postWorkspaceMethodConfig
DUPLICATE: https://api.firecloud.org/#!/Workspaces/postWorkspaceMethodConfig
|
def create_workspace_config(namespace, workspace, body):
#json_body = {
# "namespace" : mnamespace,
# "name" : method,
# "rootEntityType" : root_etype,
# "inputs" : {},
# "outputs" : {},
# "prerequisites" : {}
#}
uri = "workspaces/{0}/{1}/methodconfigs".format(namespace, workspace)
return __post(uri, json=body)
| 668,232
|
Delete method configuration in workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
mnamespace (str): Method namespace
method (str): Method name
Swagger:
https://api.firecloud.org/#!/Method_Configurations/deleteWorkspaceMethodConfig
|
def delete_workspace_config(namespace, workspace, cnamespace, config):
uri = "workspaces/{0}/{1}/method_configs/{2}/{3}".format(namespace,
workspace, cnamespace, config)
return __delete(uri)
| 668,233
|
Get method configuration in workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
cnamespace (str): Config namespace
config (str): Config name
Swagger:
https://api.firecloud.org/#!/Method_Configurations/getWorkspaceMethodConfig
|
def get_workspace_config(namespace, workspace, cnamespace, config):
uri = "workspaces/{0}/{1}/method_configs/{2}/{3}".format(namespace,
workspace, cnamespace, config)
return __get(uri)
| 668,234
|
Add or overwrite method configuration in workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
cnamespace (str): Configuration namespace
configname (str): Configuration name
body (json): new body (definition) of the method config
Swagger:
https://api.firecloud.org/#!/Method_Configurations/overwriteWorkspaceMethodConfig
|
def overwrite_workspace_config(namespace, workspace, cnamespace, configname, body):
headers = _fiss_agent_header({"Content-type": "application/json"})
uri = "workspaces/{0}/{1}/method_configs/{2}/{3}".format(namespace,
workspace, cnamespace, configname)
return __put(uri, headers=headers, json=body)
| 668,235
|
Update method configuration in workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
cnamespace (str): Configuration namespace
configname (str): Configuration name
body (json): new body (definition) of the method config
Swagger:
https://api.firecloud.org/#!/Method_Configurations/updateWorkspaceMethodConfig
|
def update_workspace_config(namespace, workspace, cnamespace, configname, body):
uri = "workspaces/{0}/{1}/method_configs/{2}/{3}".format(namespace,
workspace, cnamespace, configname)
return __post(uri, json=body)
| 668,236
|
Get syntax validation for a configuration.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
cnamespace (str): Configuration namespace
config (str): Configuration name
Swagger:
https://api.firecloud.org/#!/Method_Configurations/validate_method_configuration
|
def validate_config(namespace, workspace, cnamespace, config):
uri = "workspaces/{0}/{1}/method_configs/{2}/{3}/validate".format(namespace,
workspace, cnamespace, config)
return __get(uri)
| 668,237
|
Rename a method configuration in a workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
mnamespace (str): Config namespace
config (str): Config name
new_namespace (str): Updated config namespace
new_name (str): Updated method name
Swagger:
https://api.firecloud.org/#!/Method_Configurations/renameWorkspaceMethodConfig
|
def rename_workspace_config(namespace, workspace, cnamespace, config,
new_namespace, new_name):
body = {
"namespace" : new_namespace,
"name" : new_name,
# I have no idea why this is required by FC, but it is...
"workspaceName" : {
"namespace" : namespace,
"name" : workspace
}
}
uri = "workspaces/{0}/{1}/method_configs/{2}/{3}/rename".format(namespace,
workspace, cnamespace, config)
return __post(uri, json=body)
| 668,238
|
List method(s) in the methods repository.
Args:
namespace (str): Method Repository namespace
name (str): method name
snapshotId (int): method snapshot ID
Swagger:
https://api.firecloud.org/#!/Method_Repository/listMethodRepositoryMethods
|
def list_repository_methods(namespace=None, name=None, snapshotId=None):
params = {k:v for (k,v) in locals().items() if v is not None}
return __get("methods", params=params)
| 668,241
|
Get the configuration template for a method.
The method should exist in the methods repository.
Args:
namespace (str): Method's namespace
method (str): method name
version (int): snapshot_id of the method
Swagger:
https://api.firecloud.org/#!/Method_Repository/createMethodTemplate
|
def get_config_template(namespace, method, version):
body = {
"methodNamespace" : namespace,
"methodName" : method,
"methodVersion" : int(version)
}
return __post("template", json=body)
| 668,242
|
Get a description of the inputs and outputs for a method.
The method should exist in the methods repository.
Args:
namespace (str): Methods namespace
method (str): method name
snapshot_id (int): snapshot_id of the method
Swagger:
https://api.firecloud.org/#!/Method_Repository/getMethodIO
|
def get_inputs_outputs(namespace, method, snapshot_id):
body = {
"methodNamespace" : namespace,
"methodName" : method,
"methodVersion" : snapshot_id
}
return __post("inputsOutputs", json=body)
| 668,243
|
Get a method configuration from the methods repository.
Args:
namespace (str): Methods namespace
config (str): config name
snapshot_id (int): snapshot_id of the method
Swagger:
https://api.firecloud.org/#!/Method_Repository/getMethodRepositoryConfiguration
|
def get_repository_config(namespace, config, snapshot_id):
uri = "configurations/{0}/{1}/{2}".format(namespace, config, snapshot_id)
return __get(uri)
| 668,244
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.