repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger.paths
|
python
|
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
|
returns an iterator for the relative resource paths specified in the swagger file
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L910-L920
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger.deployment_label
|
python
|
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
|
this property returns the deployment label dictionary (mainly used by
stage description)
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L953-L965
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._one_or_more_stages_remain
|
python
|
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
|
Helper function to find whether there are other stages still associated with a deployment
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L968-L975
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger.no_more_deployments_remain
|
python
|
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
|
Helper function to find whether there are deployments left with stages associated
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L977-L994
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._get_current_deployment_id
|
python
|
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
|
Helper method to find the deployment id that the stage name is currently assocaited with.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L996-L1006
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._get_current_deployment_label
|
python
|
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
|
Helper method to find the deployment label that the stage_name is currently associated with.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1008-L1018
|
[
"def _get_current_deployment_id(self):\n '''\n Helper method to find the deployment id that the stage name is currently assocaited with.\n '''\n deploymentId = ''\n stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,\n stageName=self._stage_name,\n **self._common_aws_args).get('stage')\n if stage:\n deploymentId = stage.get('deploymentId')\n return deploymentId\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._get_desired_deployment_id
|
python
|
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
|
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1020-L1031
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger.overwrite_stage_variables
|
python
|
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
|
overwrite the given stage_name's stage variables with the given stage_variables
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1033-L1050
|
[
"def _log_changes(ret, changekey, changevalue):\n '''\n For logging create/update/delete operations to AWS ApiGateway\n '''\n cl = ret['changes'].get('new', [])\n cl.append({changekey: _object_reducer(changevalue)})\n ret['changes']['new'] = cl\n return ret\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._set_current_deployment
|
python
|
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
|
Helper method to associate the stage_name to the given deploymentId and make this current
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1052-L1080
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._resolve_api_id
|
python
|
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
|
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1082-L1095
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger.delete_stage
|
python
|
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
|
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1097-L1128
|
[
"def _one_or_more_stages_remain(self, deploymentId):\n '''\n Helper function to find whether there are other stages still associated with a deployment\n '''\n stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,\n deploymentId=deploymentId,\n **self._common_aws_args).get('stages')\n return bool(stages)\n",
"def _get_current_deployment_id(self):\n '''\n Helper method to find the deployment id that the stage name is currently assocaited with.\n '''\n deploymentId = ''\n stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,\n stageName=self._stage_name,\n **self._common_aws_args).get('stage')\n if stage:\n deploymentId = stage.get('deploymentId')\n return deploymentId\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger.verify_api
|
python
|
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
|
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1130-L1152
|
[
"def _get_current_deployment_label(self):\n '''\n Helper method to find the deployment label that the stage_name is currently associated with.\n '''\n deploymentId = self._get_current_deployment_id()\n deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,\n deploymentId=deploymentId,\n **self._common_aws_args).get('deployment')\n if deployment:\n return deployment.get('description')\n return None\n",
"def _get_desired_deployment_id(self):\n '''\n Helper method to return the deployment id matching the desired deployment label for\n this Swagger object based on the given api_name, swagger_file\n '''\n deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,\n **self._common_aws_args).get('deployments')\n if deployments:\n for deployment in deployments:\n if deployment.get('description') == self.deployment_label_json:\n return deployment.get('id')\n return ''\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger.publish_api
|
python
|
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
|
this method tie the given stage_name to a deployment matching the given swagger_file
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1154-L1187
|
[
"def _dict_to_json_pretty(d, sort_keys=True):\n '''\n helper function to generate pretty printed json output\n '''\n return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys)\n",
"def _log_changes(ret, changekey, changevalue):\n '''\n For logging create/update/delete operations to AWS ApiGateway\n '''\n cl = ret['changes'].get('new', [])\n cl.append({changekey: _object_reducer(changevalue)})\n ret['changes']['new'] = cl\n return ret\n",
"def _set_current_deployment(self, stage_desc_json, stage_variables):\n '''\n Helper method to associate the stage_name to the given deploymentId and make this current\n '''\n stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,\n stageName=self._stage_name,\n **self._common_aws_args).get('stage')\n if not stage:\n stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,\n stageName=self._stage_name,\n deploymentId=self._deploymentId,\n description=stage_desc_json,\n variables=stage_variables,\n **self._common_aws_args)\n if not stage.get('stage'):\n return {'set': False, 'error': stage.get('error')}\n else:\n # overwrite the stage variables\n overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,\n stageName=self._stage_name,\n variables=stage_variables,\n **self._common_aws_args)\n if not overwrite.get('stage'):\n return {'set': False, 'error': overwrite.get('error')}\n\n return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,\n stageName=self._stage_name,\n deploymentId=self._deploymentId,\n **self._common_aws_args)\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._cleanup_api
|
python
|
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
|
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1189-L1215
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger.deploy_api
|
python
|
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
|
this method create the top level rest api in AWS apigateway
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1217-L1243
|
[
"def _log_changes(ret, changekey, changevalue):\n '''\n For logging create/update/delete operations to AWS ApiGateway\n '''\n cl = ret['changes'].get('new', [])\n cl.append({changekey: _object_reducer(changevalue)})\n ret['changes']['new'] = cl\n return ret\n",
"def _cleanup_api(self):\n '''\n Helper method to clean up resources and models if we detected a change in the swagger file\n for a stage\n '''\n resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,\n **self._common_aws_args)\n if resources.get('resources'):\n res = resources.get('resources')[1:]\n res.reverse()\n for resource in res:\n delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,\n path=resource.get('path'),\n **self._common_aws_args)\n if not delres.get('deleted'):\n return delres\n\n models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)\n if models.get('models'):\n for model in models.get('models'):\n delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,\n modelName=model.get('name'),\n **self._common_aws_args)\n if not delres.get('deleted'):\n return delres\n\n return {'deleted': True}\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger.delete_api
|
python
|
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
|
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1245-L1278
|
[
"def _log_changes(ret, changekey, changevalue):\n '''\n For logging create/update/delete operations to AWS ApiGateway\n '''\n cl = ret['changes'].get('new', [])\n cl.append({changekey: _object_reducer(changevalue)})\n ret['changes']['new'] = cl\n return ret\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._aws_model_ref_from_swagger_ref
|
python
|
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
|
Helper function to reference models created on aws apigw
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1280-L1285
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._update_schema_to_aws_notation
|
python
|
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
|
Helper function to map model schema to aws notation
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1287-L1298
|
[
"def _aws_model_ref_from_swagger_ref(self, r):\n '''\n Helper function to reference models created on aws apigw\n '''\n model_name = r.split('/')[-1]\n return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)\n",
"def _update_schema_to_aws_notation(self, schema):\n '''\n Helper function to map model schema to aws notation\n '''\n result = {}\n for k, v in schema.items():\n if k == '$ref':\n v = self._aws_model_ref_from_swagger_ref(v)\n if isinstance(v, dict):\n v = self._update_schema_to_aws_notation(v)\n result[k] = v\n return result\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._build_dependent_model_list
|
python
|
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
|
Helper function to build the list of models the given object schema is referencing.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1300-L1323
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def _models(self):\n '''\n returns an iterator for the models specified in the swagger file\n '''\n models = self._cfg.get('definitions')\n if not models:\n raise ValueError('Definitions Object has no values, You need to define them in your swagger file')\n\n return models\n",
"def _build_dependent_model_list(self, obj_schema):\n '''\n Helper function to build the list of models the given object schema is referencing.\n '''\n dep_models_list = []\n\n if obj_schema:\n obj_schema['type'] = obj_schema.get('type', 'object')\n if obj_schema['type'] == 'array':\n dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))\n else:\n ref = obj_schema.get('$ref')\n if ref:\n ref_obj_model = ref.split(\"/\")[-1]\n ref_obj_schema = self._models().get(ref_obj_model)\n dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))\n dep_models_list.extend([ref_obj_model])\n else:\n # need to walk each property object\n properties = obj_schema.get('properties')\n if properties:\n for _, prop_obj_schema in six.iteritems(properties):\n dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))\n return list(set(dep_models_list))\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._build_all_dependencies
|
python
|
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
|
Helper function to build a map of model to their list of model reference dependencies
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1325-L1333
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def _models(self):\n '''\n returns an iterator for the models specified in the swagger file\n '''\n models = self._cfg.get('definitions')\n if not models:\n raise ValueError('Definitions Object has no values, You need to define them in your swagger file')\n\n return models\n",
"def _build_dependent_model_list(self, obj_schema):\n '''\n Helper function to build the list of models the given object schema is referencing.\n '''\n dep_models_list = []\n\n if obj_schema:\n obj_schema['type'] = obj_schema.get('type', 'object')\n if obj_schema['type'] == 'array':\n dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))\n else:\n ref = obj_schema.get('$ref')\n if ref:\n ref_obj_model = ref.split(\"/\")[-1]\n ref_obj_schema = self._models().get(ref_obj_model)\n dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))\n dep_models_list.extend([ref_obj_model])\n else:\n # need to walk each property object\n properties = obj_schema.get('properties')\n if properties:\n for _, prop_obj_schema in six.iteritems(properties):\n dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))\n return list(set(dep_models_list))\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._get_model_without_dependencies
|
python
|
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
|
Helper function to find the next model that should be created
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1335-L1358
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger.deploy_models
|
python
|
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
|
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1360-L1416
|
[
"def _dict_to_json_pretty(d, sort_keys=True):\n '''\n helper function to generate pretty printed json output\n '''\n return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys)\n",
"def _log_changes(ret, changekey, changevalue):\n '''\n For logging create/update/delete operations to AWS ApiGateway\n '''\n cl = ret['changes'].get('new', [])\n cl.append({changekey: _object_reducer(changevalue)})\n ret['changes']['new'] = cl\n return ret\n",
"def models(self):\n '''\n generator to return the tuple of model and its schema to create on aws.\n '''\n model_dict = self._build_all_dependencies()\n while True:\n model = self._get_model_without_dependencies(model_dict)\n if not model:\n break\n yield (model, self._models().get(model))\n",
"def _update_schema_to_aws_notation(self, schema):\n '''\n Helper function to map model schema to aws notation\n '''\n result = {}\n for k, v in schema.items():\n if k == '$ref':\n v = self._aws_model_ref_from_swagger_ref(v)\n if isinstance(v, dict):\n v = self._update_schema_to_aws_notation(v)\n result[k] = v\n return result\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._lambda_name
|
python
|
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
|
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1418-L1430
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._lambda_uri
|
python
|
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
|
Helper Method to construct the lambda uri for use in method integration
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1432-L1456
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._parse_method_data
|
python
|
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
|
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1458-L1479
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._get_pattern_for_schema
|
python
|
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
|
returns the pattern specified in a response schema
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1492-L1499
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._parse_method_response
|
python
|
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
|
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1510-L1536
| null |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger._deploy_method
|
python
|
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
|
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1538-L1650
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def _log_changes(ret, changekey, changevalue):\n '''\n For logging create/update/delete operations to AWS ApiGateway\n '''\n cl = ret['changes'].get('new', [])\n cl.append({changekey: _object_reducer(changevalue)})\n ret['changes']['new'] = cl\n return ret\n",
"def _log_error_and_abort(ret, obj):\n '''\n helper function to update errors in the return structure\n '''\n ret['result'] = False\n ret['abort'] = True\n if 'error' in obj:\n ret['comment'] = '{0}'.format(obj.get('error'))\n return ret\n",
"def _lambda_name(self, resourcePath, httpMethod):\n '''\n Helper method to construct lambda name based on the rule specified in doc string of\n boto_apigateway.api_present function\n '''\n lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,\n api=self.rest_api_name,\n resource=resourcePath,\n method=httpMethod)\n lambda_name = lambda_name.strip()\n lambda_name = re.sub(r'{|}', '', lambda_name)\n lambda_name = re.sub(r'\\s+|/', '_', lambda_name).lower()\n return re.sub(r'_+', '_', lambda_name)\n",
"def _lambda_uri(self, lambda_name, lambda_region):\n '''\n Helper Method to construct the lambda uri for use in method integration\n '''\n profile = self._common_aws_args.get('profile')\n region = self._common_aws_args.get('region')\n\n lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)\n apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)\n\n lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)\n\n if lambda_region != apigw_region:\n if not lambda_desc.get('function'):\n # try look up in the same region as the apigateway as well if previous lookup failed\n lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)\n\n if not lambda_desc.get('function'):\n raise ValueError('Could not find lambda function {0} in '\n 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))\n\n lambda_arn = lambda_desc.get('function').get('FunctionArn')\n lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'\n '/functions/{1}/invocations'.format(apigw_region, lambda_arn))\n return lambda_uri\n",
"def _parse_method_data(self, method_name, method_data):\n '''\n Helper function to construct the method request params, models, request_templates and\n integration_type values needed to configure method request integration/mappings.\n '''\n method_params = {}\n method_models = {}\n if 'parameters' in method_data:\n for param in method_data['parameters']:\n p = _Swagger.SwaggerParameter(param)\n if p.name:\n method_params[p.name] = True\n if p.schema:\n method_models['application/json'] = p.schema\n\n request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE\n integration_type = \"MOCK\" if method_name == 'options' else \"AWS\"\n\n return {'params': method_params,\n 'models': method_models,\n 'request_templates': request_templates,\n 'integration_type': integration_type}\n",
"def _parse_method_response(self, method_name, method_response, httpStatus):\n '''\n Helper function to construct the method response params, models, and integration_params\n values needed to configure method response integration/mappings.\n '''\n method_response_models = {}\n method_response_pattern = '.*'\n if method_response.schema:\n method_response_models['application/json'] = method_response.schema\n method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)\n\n method_response_params = {}\n method_integration_response_params = {}\n for header in method_response.headers:\n response_header = 'method.response.header.{0}'.format(header)\n method_response_params[response_header] = False\n header_data = method_response.headers.get(header)\n method_integration_response_params[response_header] = (\n \"'{0}'\".format(header_data.get('default')) if 'default' in header_data else \"'*'\")\n\n response_templates = self._get_response_template(method_name, httpStatus)\n\n return {'params': method_response_params,\n 'models': method_response_models,\n 'integration_params': method_integration_response_params,\n 'pattern': method_response_pattern,\n 'response_templates': response_templates}\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
saltstack/salt
|
salt/states/boto_apigateway.py
|
_Swagger.deploy_resources
|
python
|
def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
'''
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
for path, pathData in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId,
path=path,
**self._common_aws_args)
if not resource.get('created'):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for method, method_data in six.iteritems(pathData):
if method in _Swagger.SWAGGER_OPERATION_NAMES:
ret = self._deploy_method(ret, path, method, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type)
return ret
|
Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1652-L1685
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def _log_changes(ret, changekey, changevalue):\n '''\n For logging create/update/delete operations to AWS ApiGateway\n '''\n cl = ret['changes'].get('new', [])\n cl.append({changekey: _object_reducer(changevalue)})\n ret['changes']['new'] = cl\n return ret\n",
"def _log_error_and_abort(ret, obj):\n '''\n helper function to update errors in the return structure\n '''\n ret['result'] = False\n ret['abort'] = True\n if 'error' in obj:\n ret['comment'] = '{0}'.format(obj.get('error'))\n return ret\n",
"def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,\n lambda_integration_role, lambda_region, authorization_type):\n '''\n Method to create a method for the given resource path, along with its associated\n request and response integrations.\n\n ret\n a dictionary for returning status to Saltstack\n\n resource_path\n the full resource path where the named method_name will be associated with.\n\n method_name\n a string that is one of the following values: 'delete', 'get', 'head', 'options',\n 'patch', 'post', 'put'\n\n method_data\n the value dictionary for this method in the swagger definition file.\n\n api_key_required\n True or False, whether api key is required to access this method.\n\n lambda_integration_role\n name of the IAM role or IAM role arn that Api Gateway will assume when executing\n the associated lambda function\n\n lambda_region\n the region for the lambda function that Api Gateway will integrate to.\n\n authorization_type\n 'NONE' or 'AWS_IAM'\n\n '''\n method = self._parse_method_data(method_name.lower(), method_data)\n\n # for options method to enable CORS, api_key_required will be set to False always.\n # authorization_type will be set to 'NONE' always.\n if method_name.lower() == 'options':\n api_key_required = False\n authorization_type = 'NONE'\n\n m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,\n resourcePath=resource_path,\n httpMethod=method_name.upper(),\n authorizationType=authorization_type,\n apiKeyRequired=api_key_required,\n requestParameters=method.get('params'),\n requestModels=method.get('models'),\n **self._common_aws_args)\n if not m.get('created'):\n ret = _log_error_and_abort(ret, m)\n return ret\n\n ret = _log_changes(ret, '_deploy_method.create_api_method', m)\n\n lambda_uri = \"\"\n if method_name.lower() != 'options':\n lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),\n lambda_region=lambda_region)\n\n # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions\n # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context\n integration = (\n __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,\n resourcePath=resource_path,\n httpMethod=method_name.upper(),\n integrationType=method.get('integration_type'),\n integrationHttpMethod='POST',\n uri=lambda_uri,\n credentials=lambda_integration_role,\n requestTemplates=method.get('request_templates'),\n **self._common_aws_args))\n if not integration.get('created'):\n ret = _log_error_and_abort(ret, integration)\n return ret\n ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)\n\n if 'responses' in method_data:\n for response, response_data in six.iteritems(method_data['responses']):\n httpStatus = str(response) # future lint: disable=blacklisted-function\n method_response = self._parse_method_response(method_name.lower(),\n _Swagger.SwaggerMethodResponse(response_data), httpStatus)\n\n mr = __salt__['boto_apigateway.create_api_method_response'](\n restApiId=self.restApiId,\n resourcePath=resource_path,\n httpMethod=method_name.upper(),\n statusCode=httpStatus,\n responseParameters=method_response.get('params'),\n responseModels=method_response.get('models'),\n **self._common_aws_args)\n if not mr.get('created'):\n ret = _log_error_and_abort(ret, mr)\n return ret\n ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)\n\n mir = __salt__['boto_apigateway.create_api_integration_response'](\n restApiId=self.restApiId,\n resourcePath=resource_path,\n httpMethod=method_name.upper(),\n statusCode=httpStatus,\n selectionPattern=method_response.get('pattern'),\n responseParameters=method_response.get('integration_params'),\n responseTemplates=method_response.get('response_templates'),\n **self._common_aws_args)\n if not mir.get('created'):\n ret = _log_error_and_abort(ret, mir)\n return ret\n ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)\n else:\n raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))\n\n return ret\n"
] |
class _Swagger(object):
'''
this is a helper class that holds the swagger definition file and the associated logic
related to how to interpret the file and apply it to AWS Api Gateway.
The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources
methods.
'''
SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces',
'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions',
'security', 'tags', 'externalDocs')
# SWAGGER OBJECT V2 Fields that are required by boto apigateway states.
SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions')
# SWAGGER OPERATION NAMES
SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch')
SWAGGER_VERSIONS_SUPPORTED = ('2.0',)
# VENDOR SPECIFIC FIELD PATTERNS
VENDOR_EXT_PATTERN = re.compile('^x-')
# JSON_SCHEMA_REF
JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#'
# AWS integration templates for normal and options methods
REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
'"header_params" : {\n'
'#set ($map = $input.params().header)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"query_params" : {\n'
'#set ($map = $input.params().querystring)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"path_params" : {\n'
'#set ($map = $input.params().path)\n'
'#foreach( $param in $map.entrySet() )\n'
'"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n'
'#end\n'
'},\n'
'"apigw_context" : {\n'
'"apiId": "$context.apiId",\n'
'"httpMethod": "$context.httpMethod",\n'
'"requestId": "$context.requestId",\n'
'"resourceId": "$context.resourceId",\n'
'"resourcePath": "$context.resourcePath",\n'
'"stage": "$context.stage",\n'
'"identity": {\n'
' "user":"$context.identity.user",\n'
' "userArn":"$context.identity.userArn",\n'
' "userAgent":"$context.identity.userAgent",\n'
' "sourceIp":"$context.identity.sourceIp",\n'
' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n'
' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n'
' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n'
' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n'
' "caller":"$context.identity.caller",\n'
' "apiKey":"$context.identity.apiKey",\n'
' "accountId":"$context.identity.accountId"\n'
'}\n'
'},\n'
'"body_params" : $input.json(\'$\'),\n'
'"stage_variables": {\n'
'#foreach($variable in $stageVariables.keySet())\n'
'"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n'
'#if($foreach.hasNext), #end\n'
'#end\n'
'}\n'
'}'}
REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'}
# AWS integration response template mapping to convert stackTrace part or the error
# to a uniform format containing strings only. Swagger does not seem to allow defining
# an array of non-uniform types, to it is not possible to create error model to match
# exactly what comes out of lambda functions in case of error.
RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n'
'{\n'
' "errorMessage" : "$inputRoot.errorMessage",\n'
' "errorType" : "$inputRoot.errorType",\n'
' "stackTrace" : [\n'
'#foreach($stackTrace in $inputRoot.stackTrace)\n'
' [\n'
'#foreach($elem in $stackTrace)\n'
' "$elem"\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'#if($foreach.hasNext),#end\n'
'#end\n'
' ]\n'
'}'}
RESPONSE_OPTION_TEMPLATE = {}
# This string should not be modified, every API created by this state will carry the description
# below.
AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State",
"context": "See deployment or stage description"})
class SwaggerParameter(object):
'''
This is a helper class for the Swagger Parameter Object
'''
LOCATIONS = ('body', 'query', 'header', 'path')
def __init__(self, paramdict):
self._paramdict = paramdict
@property
def location(self):
'''
returns location in the swagger parameter object
'''
_location = self._paramdict.get('in')
if _location in _Swagger.SwaggerParameter.LOCATIONS:
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
@property
def name(self):
'''
returns parameter name in the swagger parameter object
'''
_name = self._paramdict.get('name')
if _name:
if self.location == 'header':
return 'method.request.header.{0}'.format(_name)
elif self.location == 'query':
return 'method.request.querystring.{0}'.format(_name)
elif self.location == 'path':
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger parameter object
'''
if self.location == 'body':
_schema = self._paramdict.get('schema')
if _schema:
if '$ref' in _schema:
schema_name = _schema.get('$ref').split('/')[-1]
return schema_name
raise ValueError(('Body parameter must have a JSON reference '
'to the schema definition due to Amazon API restrictions: {0}'.format(self.name)))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
class SwaggerMethodResponse(object):
'''
Helper class for Swagger Method Response Object
'''
def __init__(self, r):
self._r = r
@property
def schema(self):
'''
returns the name of the schema given the reference in the swagger method response object
'''
_schema = self._r.get('schema')
if _schema:
if '$ref' in _schema:
return _schema.get('$ref').split('/')[-1]
raise ValueError(('Method response must have a JSON reference '
'to the schema definition: {0}'.format(_schema)))
return None
@property
def headers(self):
'''
returns the headers dictionary in the method response object
'''
_headers = self._r.get('headers', {})
return _headers
def __init__(self, api_name, stage_name, lambda_funcname_format,
swagger_file_path, error_response_template, response_template, common_aws_args):
self._api_name = api_name
self._stage_name = stage_name
self._lambda_funcname_format = lambda_funcname_format
self._common_aws_args = common_aws_args
self._restApiId = ''
self._deploymentId = ''
self._error_response_template = error_response_template
self._response_template = response_template
if swagger_file_path is not None:
if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path):
self._swagger_file = swagger_file_path
self._md5_filehash = _gen_md5_filehash(self._swagger_file,
error_response_template,
response_template)
with salt.utils.files.fopen(self._swagger_file, 'rb') as sf:
self._cfg = salt.utils.yaml.safe_load(sf)
self._swagger_version = ''
else:
raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path))
self._validate_swagger_file()
self._validate_lambda_funcname_format()
self._resolve_api_id()
def _is_http_error_rescode(self, code):
'''
Helper function to determine if the passed code is in the 400~599 range of http error
codes
'''
return bool(re.match(r'^\s*[45]\d\d\s*$', code))
def _validate_error_response_model(self, paths, mods):
'''
Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration
'''
for path, ops in paths:
for opname, opobj in six.iteritems(ops):
if opname not in _Swagger.SWAGGER_OPERATION_NAMES:
continue
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
if 'schema' not in resobj:
raise ValueError('missing schema field in path {0}, '
'op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if '$ref' not in schemaobj:
raise ValueError('missing $ref field under schema in '
'path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[-1]
if modelname not in mods:
raise ValueError('model schema {0} reference not found '
'under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if model.get('type') != 'object':
raise ValueError('model schema {0} must be type object'.format(modelname))
if 'properties' not in model:
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if 'errorMessage' not in modelprops:
raise ValueError('model schema {0} must have errorMessage as a property to '
'match AWS convention. If pattern is not set, .+ will '
'be used'.format(modelname))
def _validate_lambda_funcname_format(self):
'''
Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error
'''
try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='',
api='',
resource='',
method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except Exception:
raise ValueError('Invalid lambda_funcname_format {0}. Please review '
'documentation for known substitutable keys'.format(self._lambda_funcname_format))
def _validate_swagger_file(self):
'''
High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.
'''
# check for any invalid fields for Swagger Object V2
for field in self._cfg:
if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and
not _Swagger.VENDOR_EXT_PATTERN.match(field)):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
# check for Required Swagger fields by Saltstack boto apigateway state
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if field not in self._cfg:
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
# check for Swagger Version
self._swagger_version = self._cfg.get('swagger')
if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED:
raise ValueError('Unsupported Swagger version: {0},'
'Supported versions are {1}'.format(self._swagger_version,
_Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
@property
def md5_filehash(self):
'''
returns md5 hash for the swagger file
'''
return self._md5_filehash
@property
def info(self):
'''
returns the swagger info object as a dictionary
'''
info = self._cfg.get('info')
if not info:
raise ValueError('Info Object has no values')
return info
@property
def info_json(self):
'''
returns the swagger info object as a pretty printed json string.
'''
return _dict_to_json_pretty(self.info)
@property
def rest_api_name(self):
'''
returns the name of the api
'''
return self._api_name
@property
def rest_api_version(self):
'''
returns the version field in the swagger info object
'''
version = self.info.get('version')
if not version:
raise ValueError('Missing version value in Info Object')
return version
def _models(self):
'''
returns an iterator for the models specified in the swagger file
'''
models = self._cfg.get('definitions')
if not models:
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
yield (model, self._models().get(model))
@property
def paths(self):
'''
returns an iterator for the relative resource paths specified in the swagger file
'''
paths = self._cfg.get('paths')
if not paths:
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if not path.startswith('/'):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
@property
def basePath(self):
'''
returns the base path field as defined in the swagger file
'''
basePath = self._cfg.get('basePath', '')
return basePath
@property
def restApiId(self):
'''
returns the rest api id as returned by AWS on creation of the rest api
'''
return self._restApiId
@restApiId.setter
def restApiId(self, restApiId):
'''
allows the assignment of the rest api id on creation of the rest api
'''
self._restApiId = restApiId
@property
def deployment_label_json(self):
'''
this property returns the unique description in pretty printed json for
a particular api deployment
'''
return _dict_to_json_pretty(self.deployment_label)
@property
def deployment_label(self):
'''
this property returns the deployment label dictionary (mainly used by
stage description)
'''
label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
# methods to interact with boto_apigateway execution modules
def _one_or_more_stages_remain(self, deploymentId):
'''
Helper function to find whether there are other stages still associated with a deployment
'''
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
return bool(stages)
def no_more_deployments_remain(self):
'''
Helper function to find whether there are deployments left with stages associated
'''
no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
def _get_current_deployment_id(self):
'''
Helper method to find the deployment id that the stage name is currently assocaited with.
'''
deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
def _get_desired_deployment_id(self):
'''
Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file
'''
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,
**self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if deployment.get('description') == self.deployment_label_json:
return deployment.get('id')
return ''
def overwrite_stage_variables(self, ret, stage_variables):
'''
overwrite the given stage_name's stage variables with the given stage_variables
'''
res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not res.get('overwrite'):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'overwrite_stage_variables',
res.get('stage'))
return ret
def _set_current_deployment(self, stage_desc_json, stage_variables):
'''
Helper method to associate the stage_name to the given deploymentId and make this current
'''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args).get('stage')
if not stage:
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
description=stage_desc_json,
variables=stage_variables,
**self._common_aws_args)
if not stage.get('stage'):
return {'set': False, 'error': stage.get('error')}
else:
# overwrite the stage variables
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId,
stageName=self._stage_name,
variables=stage_variables,
**self._common_aws_args)
if not overwrite.get('stage'):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
deploymentId=self._deploymentId,
**self._common_aws_args)
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
def delete_stage(self, ret):
'''
Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well
'''
deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,
stageName=self._stage_name,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
else:
# check if it is safe to delete the deployment as well.
if not self._one_or_more_stages_remain(deploymentId):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args)
if not result.get('deleted'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
# no matching stage_name/deployment found
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
def verify_api(self, ret):
'''
this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.
'''
if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if deployed_label_json == self.deployment_label_json:
ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '
'deployment label:\n{1}'.format(self._stage_name, deployed_label_json))
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
def publish_api(self, ret, stage_variables):
'''
this method tie the given stage_name to a deployment matching the given swagger_file
'''
stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
# just do a reassociate of stage_name to an already existing deployment
res = self._set_current_deployment(stage_desc_json, stage_variables)
if not res.get('set'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret,
'publish_api (reassociate deployment, set stage_variables)',
res.get('response'))
else:
# no deployment existed for the given swagger_file for this Swagger object
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,
stageName=self._stage_name,
stageDescription=stage_desc_json,
description=self.deployment_label_json,
variables=stage_variables,
**self._common_aws_args)
if not res.get('created'):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
def _cleanup_api(self):
'''
Helper method to clean up resources and models if we detected a change in the swagger file
for a stage
'''
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId,
path=resource.get('path'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId,
modelName=model.get('name'),
**self._common_aws_args)
if not delres.get('deleted'):
return delres
return {'deleted': True}
def deploy_api(self, ret):
'''
this method create the top level rest api in AWS apigateway
'''
if self.restApiId:
res = self._cleanup_api()
if not res.get('deleted'):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in response:
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
def delete_api(self, ret):
'''
Method to delete a Rest Api named defined in the swagger file's Info Object's title value.
ret
a dictionary for returning status to Saltstack
'''
exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args)
if not delete_api_response.get('deleted'):
ret['result'] = False
ret['abort'] = True
if 'error' in delete_api_response:
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = ('api already absent for swagger file: '
'{0}, desc: {1}'.format(self.rest_api_name, self.info_json))
return ret
def _aws_model_ref_from_swagger_ref(self, r):
'''
Helper function to reference models created on aws apigw
'''
model_name = r.split('/')[-1]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
def _update_schema_to_aws_notation(self, schema):
'''
Helper function to map model schema to aws notation
'''
result = {}
for k, v in schema.items():
if k == '$ref':
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
def _build_dependent_model_list(self, obj_schema):
'''
Helper function to build the list of models the given object schema is referencing.
'''
dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if obj_schema['type'] == 'array':
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split("/")[-1]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
# need to walk each property object
properties = obj_schema.get('properties')
if properties:
for _, prop_obj_schema in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
def _build_all_dependencies(self):
'''
Helper function to build a map of model to their list of model reference dependencies
'''
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
def _get_model_without_dependencies(self, models_dict):
'''
Helper function to find the next model that should be created
'''
next_model = None
if not models_dict:
return next_model
for model, dependencies in six.iteritems(models_dict):
if dependencies == []:
next_model = model
break
if next_model is None:
raise ValueError('incomplete model definitions, models in dependency '
'list not defined: {0}'.format(models_dict))
# remove the model from other depednencies before returning
models_dict.pop(next_model)
for model, dep_list in six.iteritems(models_dict):
if next_model in dep_list:
dep_list.remove(next_model)
return next_model
def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
def _lambda_name(self, resourcePath, httpMethod):
'''
Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function
'''
lambda_name = self._lambda_funcname_format.format(stage=self._stage_name,
api=self.rest_api_name,
resource=resourcePath,
method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub(r'{|}', '', lambda_name)
lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower()
return re.sub(r'_+', '_', lambda_name)
def _lambda_uri(self, lambda_name, lambda_region):
'''
Helper Method to construct the lambda uri for use in method integration
'''
profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if lambda_region != apigw_region:
if not lambda_desc.get('function'):
# try look up in the same region as the apigateway as well if previous lookup failed
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if not lambda_desc.get('function'):
raise ValueError('Could not find lambda function {0} in '
'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31'
'/functions/{1}/invocations'.format(apigw_region, lambda_arn))
return lambda_uri
def _parse_method_data(self, method_name, method_data):
'''
Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.
'''
method_params = {}
method_models = {}
if 'parameters' in method_data:
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE
integration_type = "MOCK" if method_name == 'options' else "AWS"
return {'params': method_params,
'models': method_models,
'request_templates': request_templates,
'integration_type': integration_type}
def _find_patterns(self, o):
result = []
if isinstance(o, dict):
for k, v in six.iteritems(o):
if isinstance(v, dict):
result.extend(self._find_patterns(v))
else:
if k == 'pattern':
result.append(v)
return result
def _get_pattern_for_schema(self, schema_name, httpStatus):
'''
returns the pattern specified in a response schema
'''
defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*'
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return patterns[0] if patterns else defaultPattern
def _get_response_template(self, method_name, http_status):
if method_name == 'options' or not self._is_http_error_rescode(http_status):
response_templates = {'application/json': self._response_template} \
if self._response_template else self.RESPONSE_OPTION_TEMPLATE
else:
response_templates = {'application/json': self._error_response_template} \
if self._error_response_template else self.RESPONSE_TEMPLATE
return response_templates
def _parse_method_response(self, method_name, method_response, httpStatus):
'''
Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.
'''
method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = (
"'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params,
'models': method_response_models,
'integration_params': method_integration_response_params,
'pattern': method_response_pattern,
'response_templates': response_templates}
def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required,
lambda_integration_role, lambda_region, authorization_type):
'''
Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: 'delete', 'get', 'head', 'options',
'patch', 'post', 'put'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
'NONE' or 'AWS_IAM'
'''
method = self._parse_method_data(method_name.lower(), method_data)
# for options method to enable CORS, api_key_required will be set to False always.
# authorization_type will be set to 'NONE' always.
if method_name.lower() == 'options':
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
authorizationType=authorization_type,
apiKeyRequired=api_key_required,
requestParameters=method.get('params'),
requestModels=method.get('models'),
**self._common_aws_args)
if not m.get('created'):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ""
if method_name.lower() != 'options':
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name),
lambda_region=lambda_region)
# NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions
# about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context
integration = (
__salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
integrationType=method.get('integration_type'),
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=lambda_integration_role,
requestTemplates=method.get('request_templates'),
**self._common_aws_args))
if not integration.get('created'):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
responseParameters=method_response.get('params'),
responseModels=method_response.get('models'),
**self._common_aws_args)
if not mr.get('created'):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](
restApiId=self.restApiId,
resourcePath=resource_path,
httpMethod=method_name.upper(),
statusCode=httpStatus,
selectionPattern=method_response.get('pattern'),
responseParameters=method_response.get('integration_params'),
responseTemplates=method_response.get('response_templates'),
**self._common_aws_args)
if not mir.get('created'):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
|
saltstack/salt
|
salt/states/pagerduty.py
|
create_event
|
python
|
def create_event(name, details, service_key, profile):
'''
Create an event on the PagerDuty service
.. code-block:: yaml
server-warning-message:
pagerduty.create_event:
- name: 'This is a server warning message'
- details: 'This is a much more detailed message'
- service_key: 9abcd123456789efabcde362783cdbaf
- profile: my-pagerduty-account
The following parameters are required:
name
This is a short description of the event.
details
This can be a more detailed description of the event.
service_key
This key can be found by using pagerduty.list_services.
profile
This refers to the configuration profile to use to connect to the
PagerDuty service.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Need to create event: {0}'.format(name)
return ret
__salt__['pagerduty.create_event'](
description=name,
details=details,
service_key=service_key,
profile=profile,
)
ret['result'] = True
ret['comment'] = 'Created event: {0}'.format(name)
return ret
|
Create an event on the PagerDuty service
.. code-block:: yaml
server-warning-message:
pagerduty.create_event:
- name: 'This is a server warning message'
- details: 'This is a much more detailed message'
- service_key: 9abcd123456789efabcde362783cdbaf
- profile: my-pagerduty-account
The following parameters are required:
name
This is a short description of the event.
details
This can be a more detailed description of the event.
service_key
This key can be found by using pagerduty.list_services.
profile
This refers to the configuration profile to use to connect to the
PagerDuty service.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pagerduty.py#L32-L75
| null |
# -*- coding: utf-8 -*-
'''
Create an Event in PagerDuty
============================
.. versionadded:: 2014.1.0
This state is useful for creating events on the PagerDuty service during state
runs.
.. code-block:: yaml
server-warning-message:
pagerduty.create_event:
- name: 'This is a server warning message'
- details: 'This is a much more detailed message'
- service_key: 9abcd123456789efabcde362783cdbaf
- profile: my-pagerduty-account
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
def __virtual__():
'''
Only load if the pygerduty module is available in __salt__
'''
return 'pagerduty' if 'pagerduty.create_event' in __salt__ else False
|
saltstack/salt
|
salt/states/mysql_query.py
|
run_file
|
python
|
def run_file(name,
database,
query_file=None,
output=None,
grain=None,
key=None,
overwrite=True,
saltenv=None,
check_db_exists=True,
**connection_args):
'''
Execute an arbitrary query on the specified database
.. versionadded:: 2017.7.0
name
Used only as an ID
database
The name of the database to execute the query_file on
query_file
The file of mysql commands to run
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
saltenv:
The saltenv to pull the query_file from
check_db_exists:
The state run will check that the specified database exists (default=True)
before running any queries
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(database)}
if any([query_file.startswith(proto) for proto in ['http://', 'https://', 'salt://', 's3://', 'swift://']]):
query_file = __salt__['cp.cache_file'](query_file, saltenv=saltenv or __env__)
if not os.path.exists(query_file):
ret['comment'] = 'File {0} does not exist'.format(query_file)
ret['result'] = False
return ret
# check if database exists
if check_db_exists and not __salt__['mysql.db_exists'](database, **connection_args):
err = _get_mysql_error()
if err is not None:
ret['comment'] = err
ret['result'] = False
return ret
ret['result'] = None
ret['comment'] = ('Database {0} is not present'
).format(database)
return ret
# Check if execution needed
if output == 'grain':
if grain is not None and key is None:
if not overwrite and grain in __salt__['grains.ls']():
ret['comment'] = 'No execution needed. Grain ' + grain\
+ ' already set'
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, storing result in '\
+ 'grain: ' + grain
return ret
elif grain is not None:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
if not overwrite and key in grain_value:
ret['comment'] = 'No execution needed. Grain ' + grain\
+ ':' + key + ' already set'
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, storing result in '\
+ 'grain: ' + grain + ':' + key
return ret
else:
ret['result'] = False
ret['comment'] = "Error: output type 'grain' needs the grain "\
+ "parameter\n"
return ret
elif output is not None:
if not overwrite and os.path.isfile(output):
ret['comment'] = 'No execution needed. File ' + output\
+ ' already set'
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, storing result in '\
+ 'file: ' + output
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, not storing result'
return ret
# The database is present, execute the query
query_result = __salt__['mysql.file_query'](database, query_file, **connection_args)
if query_result is False:
ret['result'] = False
return ret
mapped_results = []
if 'results' in query_result:
for res in query_result['results']:
mapped_line = {}
for idx, col in enumerate(query_result['columns']):
mapped_line[col] = res[idx]
mapped_results.append(mapped_line)
query_result['results'] = mapped_results
ret['comment'] = six.text_type(query_result)
if output == 'grain':
if grain is not None and key is None:
__salt__['grains.setval'](grain, query_result)
ret['changes']['query'] = "Executed. Output into grain: "\
+ grain
elif grain is not None:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
grain_value[key] = query_result
__salt__['grains.setval'](grain, grain_value)
ret['changes']['query'] = "Executed. Output into grain: "\
+ grain + ":" + key
elif output is not None:
ret['changes']['query'] = "Executed. Output into " + output
with salt.utils.files.fopen(output, 'w') as output_file:
if 'results' in query_result:
for res in query_result['results']:
for col, val in six.iteritems(res):
output_file.write(
salt.utils.stringutils.to_str(
col + ':' + val + '\n'
)
)
else:
output_file.write(
salt.utils.stringutils.to_str(query_result)
)
else:
ret['changes']['query'] = "Executed"
return ret
|
Execute an arbitrary query on the specified database
.. versionadded:: 2017.7.0
name
Used only as an ID
database
The name of the database to execute the query_file on
query_file
The file of mysql commands to run
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
saltenv:
The saltenv to pull the query_file from
check_db_exists:
The state run will check that the specified database exists (default=True)
before running any queries
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/mysql_query.py#L53-L222
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n",
"def _get_mysql_error():\n '''\n Look in module context for a MySQL error. Eventually we should make a less\n ugly way of doing this.\n '''\n return sys.modules[\n __salt__['test.ping'].__module__\n ].__context__.pop('mysql.error', None)\n"
] |
# -*- coding: utf-8 -*-
'''
Execution of MySQL queries
==========================
.. versionadded:: 2014.7.0
:depends: - MySQLdb Python module
:configuration: See :py:mod:`salt.modules.mysql` for setup instructions.
The mysql_query module is used to execute queries on MySQL databases.
Its output may be stored in a file or in a grain.
.. code-block:: yaml
query_id:
mysql_query.run
- database: my_database
- query: "SELECT * FROM table;"
- output: "/tmp/query_id.txt"
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import sys
import os.path
# Import Salt libs
import salt.utils.files
import salt.utils.stringutils
# Import 3rd-party libs
from salt.ext import six
def __virtual__():
'''
Only load if the mysql module is available in __salt__
'''
return 'mysql.query' in __salt__
def _get_mysql_error():
'''
Look in module context for a MySQL error. Eventually we should make a less
ugly way of doing this.
'''
return sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('mysql.error', None)
def run(name,
database,
query,
output=None,
grain=None,
key=None,
overwrite=True,
check_db_exists=True,
**connection_args):
'''
Execute an arbitrary query on the specified database
name
Used only as an ID
database
The name of the database to execute the query on
query
The query to execute
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
check_db_exists:
The state run will check that the specified database exists (default=True)
before running any queries
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(database)}
# check if database exists
if check_db_exists and not __salt__['mysql.db_exists'](database, **connection_args):
err = _get_mysql_error()
if err is not None:
ret['comment'] = err
ret['result'] = False
return ret
ret['result'] = None
ret['comment'] = ('Database {0} is not present'
).format(name)
return ret
# Check if execution needed
if output == 'grain':
if grain is not None and key is None:
if not overwrite and grain in __salt__['grains.ls']():
ret['comment'] = 'No execution needed. Grain ' + grain\
+ ' already set'
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, storing result in '\
+ 'grain: ' + grain
return ret
elif grain is not None:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
if not overwrite and key in grain_value:
ret['comment'] = 'No execution needed. Grain ' + grain\
+ ':' + key + ' already set'
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, storing result in '\
+ 'grain: ' + grain + ':' + key
return ret
else:
ret['result'] = False
ret['comment'] = "Error: output type 'grain' needs the grain "\
+ "parameter\n"
return ret
elif output is not None:
if not overwrite and os.path.isfile(output):
ret['comment'] = 'No execution needed. File ' + output\
+ ' already set'
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, storing result in '\
+ 'file: ' + output
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, not storing result'
return ret
# The database is present, execute the query
query_result = __salt__['mysql.query'](database, query, **connection_args)
mapped_results = []
if 'results' in query_result:
for res in query_result['results']:
mapped_line = {}
for idx, col in enumerate(query_result['columns']):
mapped_line[col] = res[idx]
mapped_results.append(mapped_line)
query_result['results'] = mapped_results
ret['comment'] = six.text_type(query_result)
if output == 'grain':
if grain is not None and key is None:
__salt__['grains.setval'](grain, query_result)
ret['changes']['query'] = "Executed. Output into grain: "\
+ grain
elif grain is not None:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
grain_value[key] = query_result
__salt__['grains.setval'](grain, grain_value)
ret['changes']['query'] = "Executed. Output into grain: "\
+ grain + ":" + key
elif output is not None:
ret['changes']['query'] = "Executed. Output into " + output
with salt.utils.files.fopen(output, 'w') as output_file:
if 'results' in query_result:
for res in query_result['results']:
for col, val in six.iteritems(res):
output_file.write(
salt.utils.stringutils.to_str(
col + ':' + val + '\n'
)
)
else:
if isinstance(query_result, six.text_type):
output_file.write(
salt.utils.stringutils.to_str(query_result)
)
else:
for col, val in six.iteritems(query_result):
output_file.write(
salt.utils.stringutils.to_str(
'{0}:{1}\n'.format(col, val)
)
)
else:
ret['changes']['query'] = "Executed"
return ret
|
saltstack/salt
|
salt/states/mysql_query.py
|
run
|
python
|
def run(name,
database,
query,
output=None,
grain=None,
key=None,
overwrite=True,
check_db_exists=True,
**connection_args):
'''
Execute an arbitrary query on the specified database
name
Used only as an ID
database
The name of the database to execute the query on
query
The query to execute
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
check_db_exists:
The state run will check that the specified database exists (default=True)
before running any queries
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(database)}
# check if database exists
if check_db_exists and not __salt__['mysql.db_exists'](database, **connection_args):
err = _get_mysql_error()
if err is not None:
ret['comment'] = err
ret['result'] = False
return ret
ret['result'] = None
ret['comment'] = ('Database {0} is not present'
).format(name)
return ret
# Check if execution needed
if output == 'grain':
if grain is not None and key is None:
if not overwrite and grain in __salt__['grains.ls']():
ret['comment'] = 'No execution needed. Grain ' + grain\
+ ' already set'
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, storing result in '\
+ 'grain: ' + grain
return ret
elif grain is not None:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
if not overwrite and key in grain_value:
ret['comment'] = 'No execution needed. Grain ' + grain\
+ ':' + key + ' already set'
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, storing result in '\
+ 'grain: ' + grain + ':' + key
return ret
else:
ret['result'] = False
ret['comment'] = "Error: output type 'grain' needs the grain "\
+ "parameter\n"
return ret
elif output is not None:
if not overwrite and os.path.isfile(output):
ret['comment'] = 'No execution needed. File ' + output\
+ ' already set'
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, storing result in '\
+ 'file: ' + output
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, not storing result'
return ret
# The database is present, execute the query
query_result = __salt__['mysql.query'](database, query, **connection_args)
mapped_results = []
if 'results' in query_result:
for res in query_result['results']:
mapped_line = {}
for idx, col in enumerate(query_result['columns']):
mapped_line[col] = res[idx]
mapped_results.append(mapped_line)
query_result['results'] = mapped_results
ret['comment'] = six.text_type(query_result)
if output == 'grain':
if grain is not None and key is None:
__salt__['grains.setval'](grain, query_result)
ret['changes']['query'] = "Executed. Output into grain: "\
+ grain
elif grain is not None:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
grain_value[key] = query_result
__salt__['grains.setval'](grain, grain_value)
ret['changes']['query'] = "Executed. Output into grain: "\
+ grain + ":" + key
elif output is not None:
ret['changes']['query'] = "Executed. Output into " + output
with salt.utils.files.fopen(output, 'w') as output_file:
if 'results' in query_result:
for res in query_result['results']:
for col, val in six.iteritems(res):
output_file.write(
salt.utils.stringutils.to_str(
col + ':' + val + '\n'
)
)
else:
if isinstance(query_result, six.text_type):
output_file.write(
salt.utils.stringutils.to_str(query_result)
)
else:
for col, val in six.iteritems(query_result):
output_file.write(
salt.utils.stringutils.to_str(
'{0}:{1}\n'.format(col, val)
)
)
else:
ret['changes']['query'] = "Executed"
return ret
|
Execute an arbitrary query on the specified database
name
Used only as an ID
database
The name of the database to execute the query on
query
The query to execute
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
check_db_exists:
The state run will check that the specified database exists (default=True)
before running any queries
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/mysql_query.py#L225-L381
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n",
"def _get_mysql_error():\n '''\n Look in module context for a MySQL error. Eventually we should make a less\n ugly way of doing this.\n '''\n return sys.modules[\n __salt__['test.ping'].__module__\n ].__context__.pop('mysql.error', None)\n"
] |
# -*- coding: utf-8 -*-
'''
Execution of MySQL queries
==========================
.. versionadded:: 2014.7.0
:depends: - MySQLdb Python module
:configuration: See :py:mod:`salt.modules.mysql` for setup instructions.
The mysql_query module is used to execute queries on MySQL databases.
Its output may be stored in a file or in a grain.
.. code-block:: yaml
query_id:
mysql_query.run
- database: my_database
- query: "SELECT * FROM table;"
- output: "/tmp/query_id.txt"
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import sys
import os.path
# Import Salt libs
import salt.utils.files
import salt.utils.stringutils
# Import 3rd-party libs
from salt.ext import six
def __virtual__():
'''
Only load if the mysql module is available in __salt__
'''
return 'mysql.query' in __salt__
def _get_mysql_error():
'''
Look in module context for a MySQL error. Eventually we should make a less
ugly way of doing this.
'''
return sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('mysql.error', None)
def run_file(name,
database,
query_file=None,
output=None,
grain=None,
key=None,
overwrite=True,
saltenv=None,
check_db_exists=True,
**connection_args):
'''
Execute an arbitrary query on the specified database
.. versionadded:: 2017.7.0
name
Used only as an ID
database
The name of the database to execute the query_file on
query_file
The file of mysql commands to run
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
saltenv:
The saltenv to pull the query_file from
check_db_exists:
The state run will check that the specified database exists (default=True)
before running any queries
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(database)}
if any([query_file.startswith(proto) for proto in ['http://', 'https://', 'salt://', 's3://', 'swift://']]):
query_file = __salt__['cp.cache_file'](query_file, saltenv=saltenv or __env__)
if not os.path.exists(query_file):
ret['comment'] = 'File {0} does not exist'.format(query_file)
ret['result'] = False
return ret
# check if database exists
if check_db_exists and not __salt__['mysql.db_exists'](database, **connection_args):
err = _get_mysql_error()
if err is not None:
ret['comment'] = err
ret['result'] = False
return ret
ret['result'] = None
ret['comment'] = ('Database {0} is not present'
).format(database)
return ret
# Check if execution needed
if output == 'grain':
if grain is not None and key is None:
if not overwrite and grain in __salt__['grains.ls']():
ret['comment'] = 'No execution needed. Grain ' + grain\
+ ' already set'
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, storing result in '\
+ 'grain: ' + grain
return ret
elif grain is not None:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
if not overwrite and key in grain_value:
ret['comment'] = 'No execution needed. Grain ' + grain\
+ ':' + key + ' already set'
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, storing result in '\
+ 'grain: ' + grain + ':' + key
return ret
else:
ret['result'] = False
ret['comment'] = "Error: output type 'grain' needs the grain "\
+ "parameter\n"
return ret
elif output is not None:
if not overwrite and os.path.isfile(output):
ret['comment'] = 'No execution needed. File ' + output\
+ ' already set'
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, storing result in '\
+ 'file: ' + output
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Query would execute, not storing result'
return ret
# The database is present, execute the query
query_result = __salt__['mysql.file_query'](database, query_file, **connection_args)
if query_result is False:
ret['result'] = False
return ret
mapped_results = []
if 'results' in query_result:
for res in query_result['results']:
mapped_line = {}
for idx, col in enumerate(query_result['columns']):
mapped_line[col] = res[idx]
mapped_results.append(mapped_line)
query_result['results'] = mapped_results
ret['comment'] = six.text_type(query_result)
if output == 'grain':
if grain is not None and key is None:
__salt__['grains.setval'](grain, query_result)
ret['changes']['query'] = "Executed. Output into grain: "\
+ grain
elif grain is not None:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
grain_value[key] = query_result
__salt__['grains.setval'](grain, grain_value)
ret['changes']['query'] = "Executed. Output into grain: "\
+ grain + ":" + key
elif output is not None:
ret['changes']['query'] = "Executed. Output into " + output
with salt.utils.files.fopen(output, 'w') as output_file:
if 'results' in query_result:
for res in query_result['results']:
for col, val in six.iteritems(res):
output_file.write(
salt.utils.stringutils.to_str(
col + ':' + val + '\n'
)
)
else:
output_file.write(
salt.utils.stringutils.to_str(query_result)
)
else:
ret['changes']['query'] = "Executed"
return ret
|
saltstack/salt
|
salt/utils/s3.py
|
query
|
python
|
def query(key, keyid, method='GET', params=None, headers=None,
requesturl=None, return_url=False, bucket=None, service_url=None,
path='', return_bin=False, action=None, local_file=None,
verify_ssl=True, full_headers=False, kms_keyid=None,
location=None, role_arn=None, chunk_size=16384, path_style=False,
https_enable=True):
'''
Perform a query against an S3-like API. This function requires that a
secret key and the id for that key are passed in. For instance:
s3.keyid: GKTADJGHEIQSXMKKRBJ08H
s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
If keyid or key is not specified, an attempt to fetch them from EC2 IAM
metadata service will be made.
A service_url may also be specified in the configuration:
s3.service_url: s3.amazonaws.com
If a service_url is not specified, the default is s3.amazonaws.com. This
may appear in various documentation as an "endpoint". A comprehensive list
for Amazon S3 may be found at::
http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
The service_url will form the basis for the final endpoint that is used to
query the service.
Path style can be enabled:
s3.path_style: True
This can be useful if you need to use salt with a proxy for an s3 compatible storage
You can use either https protocol or http protocol:
s3.https_enable: True
SSL verification may also be turned off in the configuration:
s3.verify_ssl: False
This is required if using S3 bucket names that contain a period, as
these will not match Amazon's S3 wildcard certificates. Certificate
verification is enabled by default.
A region may be specified:
s3.location: eu-central-1
If region is not specified, an attempt to fetch the region from EC2 IAM
metadata service will be made. Failing that, default is us-east-1
'''
if not HAS_REQUESTS:
log.error('There was an error: requests is required for s3 access')
if not headers:
headers = {}
if not params:
params = {}
if not service_url:
service_url = 's3.amazonaws.com'
if not bucket or path_style:
endpoint = service_url
else:
endpoint = '{0}.{1}'.format(bucket, service_url)
if path_style and bucket:
path = '{0}/{1}'.format(bucket, path)
# Try grabbing the credentials from the EC2 instance IAM metadata if available
if not key:
key = salt.utils.aws.IROLE_CODE
if not keyid:
keyid = salt.utils.aws.IROLE_CODE
if kms_keyid is not None and method in ('PUT', 'POST'):
headers['x-amz-server-side-encryption'] = 'aws:kms'
headers['x-amz-server-side-encryption-aws-kms-key-id'] = kms_keyid
if not location:
location = salt.utils.aws.get_location()
data = ''
fh = None
payload_hash = None
if method == 'PUT':
if local_file:
payload_hash = salt.utils.hashutils.get_hash(local_file, form='sha256')
if path is None:
path = ''
path = _quote(path)
if not requesturl:
requesturl = (('https' if https_enable else 'http')+'://{0}/{1}').format(endpoint, path)
headers, requesturl = salt.utils.aws.sig4(
method,
endpoint,
params,
data=data,
uri='/{0}'.format(path),
prov_dict={'id': keyid, 'key': key},
role_arn=role_arn,
location=location,
product='s3',
requesturl=requesturl,
headers=headers,
payload_hash=payload_hash,
)
log.debug('S3 Request: %s', requesturl)
log.debug('S3 Headers::')
log.debug(' Authorization: %s', headers['Authorization'])
if not data:
data = None
try:
if method == 'PUT':
if local_file:
fh = salt.utils.files.fopen(local_file, 'rb') # pylint: disable=resource-leakage
data = fh.read() # pylint: disable=resource-leakage
result = requests.request(method,
requesturl,
headers=headers,
data=data,
verify=verify_ssl,
stream=True,
timeout=300)
elif method == 'GET' and local_file and not return_bin:
result = requests.request(method,
requesturl,
headers=headers,
data=data,
verify=verify_ssl,
stream=True,
timeout=300)
else:
result = requests.request(method,
requesturl,
headers=headers,
data=data,
verify=verify_ssl,
timeout=300)
finally:
if fh is not None:
fh.close()
err_code = None
err_msg = None
if result.status_code >= 400:
# On error the S3 API response should contain error message
err_text = result.content or 'Unknown error'
log.debug(' Response content: %s', err_text)
# Try to get err info from response xml
try:
err_data = xml.to_dict(ET.fromstring(err_text))
err_code = err_data['Code']
err_msg = err_data['Message']
except (KeyError, ET.ParseError) as err:
log.debug(
'Failed to parse s3 err response. %s: %s',
type(err).__name__, err
)
err_code = 'http-{0}'.format(result.status_code)
err_msg = err_text
log.debug('S3 Response Status Code: %s', result.status_code)
if method == 'PUT':
if result.status_code != 200:
if local_file:
raise CommandExecutionError(
'Failed to upload from {0} to {1}. {2}: {3}'.format(
local_file, path, err_code, err_msg))
raise CommandExecutionError(
'Failed to create bucket {0}. {1}: {2}'.format(
bucket, err_code, err_msg))
if local_file:
log.debug('Uploaded from %s to %s', local_file, path)
else:
log.debug('Created bucket %s', bucket)
return
if method == 'DELETE':
if not six.text_type(result.status_code).startswith('2'):
if path:
raise CommandExecutionError(
'Failed to delete {0} from bucket {1}. {2}: {3}'.format(
path, bucket, err_code, err_msg))
raise CommandExecutionError(
'Failed to delete bucket {0}. {1}: {2}'.format(
bucket, err_code, err_msg))
if path:
log.debug('Deleted %s from bucket %s', path, bucket)
else:
log.debug('Deleted bucket %s', bucket)
return
# This can be used to save a binary object to disk
if local_file and method == 'GET':
if result.status_code < 200 or result.status_code >= 300:
raise CommandExecutionError(
'Failed to get file. {0}: {1}'.format(err_code, err_msg))
log.debug('Saving to local file: %s', local_file)
with salt.utils.files.fopen(local_file, 'wb') as out:
for chunk in result.iter_content(chunk_size=chunk_size):
out.write(chunk)
return 'Saved to local file: {0}'.format(local_file)
if result.status_code < 200 or result.status_code >= 300:
raise CommandExecutionError(
'Failed s3 operation. {0}: {1}'.format(err_code, err_msg))
# This can be used to return a binary object wholesale
if return_bin:
return result.content
if result.content:
items = ET.fromstring(result.content)
ret = []
for item in items:
ret.append(xml.to_dict(item))
if return_url is True:
return ret, requesturl
else:
if result.status_code != requests.codes.ok:
return
ret = {'headers': []}
if full_headers:
ret['headers'] = dict(result.headers)
else:
for header in result.headers:
ret['headers'].append(header.strip())
return ret
|
Perform a query against an S3-like API. This function requires that a
secret key and the id for that key are passed in. For instance:
s3.keyid: GKTADJGHEIQSXMKKRBJ08H
s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
If keyid or key is not specified, an attempt to fetch them from EC2 IAM
metadata service will be made.
A service_url may also be specified in the configuration:
s3.service_url: s3.amazonaws.com
If a service_url is not specified, the default is s3.amazonaws.com. This
may appear in various documentation as an "endpoint". A comprehensive list
for Amazon S3 may be found at::
http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
The service_url will form the basis for the final endpoint that is used to
query the service.
Path style can be enabled:
s3.path_style: True
This can be useful if you need to use salt with a proxy for an s3 compatible storage
You can use either https protocol or http protocol:
s3.https_enable: True
SSL verification may also be turned off in the configuration:
s3.verify_ssl: False
This is required if using S3 bucket names that contain a period, as
these will not match Amazon's S3 wildcard certificates. Certificate
verification is enabled by default.
A region may be specified:
s3.location: eu-central-1
If region is not specified, an attempt to fetch the region from EC2 IAM
metadata service will be made. Failing that, default is us-east-1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/s3.py#L32-L279
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def get_location(opts=None, provider=None):\n '''\n Return the region to use, in this order:\n opts['location']\n provider['location']\n get_region_from_metadata()\n DEFAULT_LOCATION\n '''\n if opts is None:\n opts = {}\n ret = opts.get('location')\n if ret is None and provider is not None:\n ret = provider.get('location')\n if ret is None:\n ret = get_region_from_metadata()\n if ret is None:\n ret = DEFAULT_LOCATION\n return ret\n",
"def sig4(method, endpoint, params, prov_dict,\n aws_api_version=DEFAULT_AWS_API_VERSION, location=None,\n product='ec2', uri='/', requesturl=None, data='', headers=None,\n role_arn=None, payload_hash=None):\n '''\n Sign a query against AWS services using Signature Version 4 Signing\n Process. This is documented at:\n\n http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html\n http://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html\n http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html\n '''\n timenow = datetime.utcnow()\n\n # Retrieve access credentials from meta-data, or use provided\n if role_arn is None:\n access_key_id, secret_access_key, token = creds(prov_dict)\n else:\n access_key_id, secret_access_key, token = assumed_creds(prov_dict, role_arn, location=location)\n\n if location is None:\n location = get_region_from_metadata()\n if location is None:\n location = DEFAULT_LOCATION\n\n params_with_headers = params.copy()\n if product not in ('s3', 'ssm'):\n params_with_headers['Version'] = aws_api_version\n keys = sorted(params_with_headers.keys())\n values = list(map(params_with_headers.get, keys))\n querystring = urlencode(list(zip(keys, values))).replace('+', '%20')\n\n amzdate = timenow.strftime('%Y%m%dT%H%M%SZ')\n datestamp = timenow.strftime('%Y%m%d')\n new_headers = {}\n if isinstance(headers, dict):\n new_headers = headers.copy()\n\n # Create payload hash (hash of the request body content). For GET\n # requests, the payload is an empty string ('').\n if not payload_hash:\n payload_hash = salt.utils.hashutils.sha256_digest(data)\n\n new_headers['X-Amz-date'] = amzdate\n new_headers['host'] = endpoint\n new_headers['x-amz-content-sha256'] = payload_hash\n a_canonical_headers = []\n a_signed_headers = []\n\n if token != '':\n new_headers['X-Amz-security-token'] = token\n\n for header in sorted(new_headers.keys(), key=six.text_type.lower):\n lower_header = header.lower()\n a_canonical_headers.append('{0}:{1}'.format(lower_header, new_headers[header].strip()))\n a_signed_headers.append(lower_header)\n canonical_headers = '\\n'.join(a_canonical_headers) + '\\n'\n signed_headers = ';'.join(a_signed_headers)\n\n algorithm = 'AWS4-HMAC-SHA256'\n\n # Combine elements to create create canonical request\n canonical_request = '\\n'.join((\n method,\n uri,\n querystring,\n canonical_headers,\n signed_headers,\n payload_hash\n ))\n\n # Create the string to sign\n credential_scope = '/'.join((datestamp, location, product, 'aws4_request'))\n string_to_sign = '\\n'.join((\n algorithm,\n amzdate,\n credential_scope,\n salt.utils.hashutils.sha256_digest(canonical_request)\n ))\n\n # Create the signing key using the function defined above.\n signing_key = _sig_key(\n secret_access_key,\n datestamp,\n location,\n product\n )\n\n # Sign the string_to_sign using the signing_key\n signature = hmac.new(\n signing_key,\n string_to_sign.encode('utf-8'),\n hashlib.sha256).hexdigest()\n\n # Add signing information to the request\n authorization_header = (\n '{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}'\n ).format(\n algorithm,\n access_key_id,\n credential_scope,\n signed_headers,\n signature,\n )\n\n new_headers['Authorization'] = authorization_header\n\n requesturl = '{0}?{1}'.format(requesturl, querystring)\n return new_headers, requesturl\n",
"def to_dict(xmltree, attr=False):\n '''\n Convert an XML tree into a dict. The tree that is passed in must be an\n ElementTree object.\n Args:\n xmltree: An ElementTree object.\n attr: If true, attributes will be parsed. If false, they will be ignored.\n\n '''\n if attr:\n return _to_full_dict(xmltree)\n else:\n return _to_dict(xmltree)\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for Amazon S3
:depends: requests
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
# Import 3rd-party libs
try:
import requests
HAS_REQUESTS = True # pylint: disable=W0612
except ImportError:
HAS_REQUESTS = False # pylint: disable=W0612
# Import Salt libs
import salt.utils.aws
import salt.utils.files
import salt.utils.hashutils
import salt.utils.xmlutil as xml
from salt._compat import ElementTree as ET
from salt.exceptions import CommandExecutionError
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext import six
log = logging.getLogger(__name__)
|
saltstack/salt
|
salt/utils/atomicfile.py
|
atomic_open
|
python
|
def atomic_open(filename, mode='w'):
'''
Works like a regular `open()` but writes updates into a temporary
file instead of the given file and moves it over when the file is
closed. The file returned behaves as if it was a regular Python
'''
if mode in ('r', 'rb', 'r+', 'rb+', 'a', 'ab'):
raise TypeError('Read or append modes don\'t work with atomic_open')
kwargs = {
'prefix': '.___atomic_write',
'dir': os.path.dirname(filename),
'delete': False,
}
if six.PY3 and 'b' not in mode:
kwargs['newline'] = ''
ntf = tempfile.NamedTemporaryFile(mode, **kwargs)
return _AtomicWFile(ntf, ntf.name, filename)
|
Works like a regular `open()` but writes updates into a temporary
file instead of the given file and moves it over when the file is
closed. The file returned behaves as if it was a regular Python
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/atomicfile.py#L153-L169
| null |
# -*- coding: utf-8 -*-
'''
A module written originally by Armin Ronacher to manage file transfers in an
atomic way
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import tempfile
import sys
import errno
import time
import random
import shutil
from salt.ext import six
# Import salt libs
import salt.utils.win_dacl
CAN_RENAME_OPEN_FILE = False
if os.name == 'nt': # pragma: no cover
_rename = lambda src, dst: False # pylint: disable=C0103
_rename_atomic = lambda src, dst: False # pylint: disable=C0103
try:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW # pylint: disable=C0103
def _rename(src, dst): # pylint: disable=E0102
if not isinstance(src, six.text_type):
src = six.text_type(src, sys.getfilesystemencoding())
if not isinstance(dst, six.text_type):
dst = six.text_type(dst, sys.getfilesystemencoding())
if _rename_atomic(src, dst):
return True
retry = 0
rval = False
while not rval and retry < 100:
rval = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH)
if not rval:
time.sleep(0.001)
retry += 1
return rval
# new in Vista and Windows Server 2008
# pylint: disable=C0103
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
_CloseHandle = ctypes.windll.kernel32.CloseHandle
# pylint: enable=C0103
CAN_RENAME_OPEN_FILE = True
def _rename_atomic(src, dst): # pylint: disable=E0102
tra = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Atomic rename')
if tra == -1:
return False
try:
retry = 0
rval = False
while not rval and retry < 100:
rval = _MoveFileTransacted(src, dst, None, None,
_MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH, tra)
if rval:
rval = _CommitTransaction(tra)
break
else:
time.sleep(0.001)
retry += 1
return rval
finally:
_CloseHandle(tra)
except Exception:
pass
def atomic_rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError as err:
if err.errno != errno.EEXIST:
raise
old = '{0}-{1:08x}'.format(dst, random.randint(0, sys.maxint))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
atomic_rename = os.rename # pylint: disable=C0103
CAN_RENAME_OPEN_FILE = True
class _AtomicWFile(object):
'''
Helper class for :func:`atomic_open`.
'''
def __init__(self, fhanle, tmp_filename, filename):
self._fh = fhanle
self._tmp_filename = tmp_filename
self._filename = filename
def __getattr__(self, attr):
return getattr(self._fh, attr)
def __enter__(self):
return self
def close(self):
if self._fh.closed:
return
self._fh.close()
if os.path.isfile(self._filename):
if salt.utils.win_dacl.HAS_WIN32:
salt.utils.win_dacl.copy_security(
source=self._filename, target=self._tmp_filename)
else:
shutil.copymode(self._filename, self._tmp_filename)
st = os.stat(self._filename)
os.chown(self._tmp_filename, st.st_uid, st.st_gid)
atomic_rename(self._tmp_filename, self._filename)
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.close()
else:
self._fh.close()
try:
os.remove(self._tmp_filename)
except OSError:
pass
def __repr__(self):
return '<{0} {1}{2}, mode {3}>'.format(
self.__class__.__name__,
self._fh.closed and 'closed ' or '',
self._filename,
self._fh.mode
)
|
saltstack/salt
|
salt/pillar/nodegroups.py
|
ext_pillar
|
python
|
def ext_pillar(minion_id, pillar, pillar_name=None):
'''
A salt external pillar which provides the list of nodegroups of which the minion is a member.
:param minion_id: used for compound matching nodegroups
:param pillar: provided by salt, but not used by nodegroups ext_pillar
:param pillar_name: optional name to use for the pillar, defaults to 'nodegroups'
:return: a dictionary which is included by the salt master in the pillars returned to the minion
'''
pillar_name = pillar_name or 'nodegroups'
all_nodegroups = __opts__['nodegroups']
nodegroups_minion_is_in = []
ckminions = None
for nodegroup_name in six.iterkeys(all_nodegroups):
ckminions = ckminions or CkMinions(__opts__)
_res = ckminions.check_minions(
all_nodegroups[nodegroup_name],
'compound')
match = _res['minions']
if minion_id in match:
nodegroups_minion_is_in.append(nodegroup_name)
return {pillar_name: nodegroups_minion_is_in}
|
A salt external pillar which provides the list of nodegroups of which the minion is a member.
:param minion_id: used for compound matching nodegroups
:param pillar: provided by salt, but not used by nodegroups ext_pillar
:param pillar_name: optional name to use for the pillar, defaults to 'nodegroups'
:return: a dictionary which is included by the salt master in the pillars returned to the minion
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/nodegroups.py#L49-L73
|
[
"def iterkeys(d, **kw):\n return d.iterkeys(**kw)\n"
] |
# -*- coding: utf-8 -*-
'''
Nodegroups Pillar
=================
Introspection: to which nodegroups does my minion belong?
Provides a pillar with the default name of `nodegroups`
which contains a list of nodegroups which match for a given minion.
.. versionadded:: 2016.11.0
Command Line
------------
.. code-block:: bash
salt-call pillar.get nodegroups
local:
- class_infra
- colo_sj
- state_active
- country_US
- type_saltmaster
Configuring Nodegroups Pillar
-----------------------------
.. code-block:: yaml
extension_modules: /srv/salt/ext
ext_pillar:
- nodegroups:
pillar_name: 'nodegroups'
'''
# Import futures
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
from salt.utils.minions import CkMinions
# Import 3rd-party libs
from salt.ext import six
__version__ = '0.0.2'
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
_compute_signature
|
python
|
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
|
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L105-L128
| null |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
query
|
python
|
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
|
Make a web call to QingCloud IaaS API.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L131-L201
|
[
"def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n",
"def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n",
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('access_key_id', 'secret_access_key', 'zone', 'key_filename')\n )\n",
"def _compute_signature(parameters, access_key_secret, method, path):\n '''\n Generate an API request signature. Detailed document can be found at:\n\n https://docs.qingcloud.com/api/common/signature.html\n '''\n parameters['signature_method'] = 'HmacSHA256'\n\n string_to_sign = '{0}\\n{1}\\n'.format(method.upper(), path)\n\n keys = sorted(parameters.keys())\n pairs = []\n for key in keys:\n val = six.text_type(parameters[key]).encode('utf-8')\n pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))\n qs = '&'.join(pairs)\n string_to_sign += qs\n\n h = hmac.new(access_key_secret, digestmod=sha256)\n h.update(string_to_sign)\n\n signature = base64.b64encode(h.digest()).strip()\n\n return signature\n"
] |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
avail_locations
|
python
|
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
|
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L204-L232
|
[
"def query(params=None):\n '''\n Make a web call to QingCloud IaaS API.\n '''\n path = 'https://api.qingcloud.com/iaas/'\n\n access_key_id = config.get_cloud_config_value(\n 'access_key_id', get_configured_provider(), __opts__, search_global=False\n )\n access_key_secret = config.get_cloud_config_value(\n 'secret_access_key', get_configured_provider(), __opts__, search_global=False\n )\n\n # public interface parameters\n real_parameters = {\n 'access_key_id': access_key_id,\n 'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,\n 'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),\n 'version': DEFAULT_QINGCLOUD_API_VERSION,\n }\n\n # include action or function parameters\n if params:\n for key, value in params.items():\n if isinstance(value, list):\n for i in range(1, len(value) + 1):\n if isinstance(value[i - 1], dict):\n for sk, sv in value[i - 1].items():\n if isinstance(sv, dict) or isinstance(sv, list):\n sv = salt.utils.json.dumps(sv, separators=(',', ':'))\n real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv\n else:\n real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]\n else:\n real_parameters[key] = value\n\n # Calculate the string for Signature\n signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')\n real_parameters['signature'] = signature\n\n # print('parameters:')\n # pprint.pprint(real_parameters)\n\n request = requests.get(path, params=real_parameters, verify=False)\n\n # print('url:')\n # print(request.url)\n\n if request.status_code != 200:\n raise SaltCloudSystemExit(\n 'An error occurred while querying QingCloud. HTTP Code: {0} '\n 'Error: \\'{1}\\''.format(\n request.status_code,\n request.text\n )\n )\n\n log.debug(request.url)\n\n content = request.text\n result = salt.utils.json.loads(content)\n\n # print('response:')\n # pprint.pprint(result)\n\n if result['ret_code'] != 0:\n raise SaltCloudSystemExit(\n pprint.pformat(result.get('message', {}))\n )\n\n return result\n"
] |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
avail_images
|
python
|
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
|
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L271-L304
|
[
"def query(params=None):\n '''\n Make a web call to QingCloud IaaS API.\n '''\n path = 'https://api.qingcloud.com/iaas/'\n\n access_key_id = config.get_cloud_config_value(\n 'access_key_id', get_configured_provider(), __opts__, search_global=False\n )\n access_key_secret = config.get_cloud_config_value(\n 'secret_access_key', get_configured_provider(), __opts__, search_global=False\n )\n\n # public interface parameters\n real_parameters = {\n 'access_key_id': access_key_id,\n 'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,\n 'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),\n 'version': DEFAULT_QINGCLOUD_API_VERSION,\n }\n\n # include action or function parameters\n if params:\n for key, value in params.items():\n if isinstance(value, list):\n for i in range(1, len(value) + 1):\n if isinstance(value[i - 1], dict):\n for sk, sv in value[i - 1].items():\n if isinstance(sv, dict) or isinstance(sv, list):\n sv = salt.utils.json.dumps(sv, separators=(',', ':'))\n real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv\n else:\n real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]\n else:\n real_parameters[key] = value\n\n # Calculate the string for Signature\n signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')\n real_parameters['signature'] = signature\n\n # print('parameters:')\n # pprint.pprint(real_parameters)\n\n request = requests.get(path, params=real_parameters, verify=False)\n\n # print('url:')\n # print(request.url)\n\n if request.status_code != 200:\n raise SaltCloudSystemExit(\n 'An error occurred while querying QingCloud. HTTP Code: {0} '\n 'Error: \\'{1}\\''.format(\n request.status_code,\n request.text\n )\n )\n\n log.debug(request.url)\n\n content = request.text\n result = salt.utils.json.loads(content)\n\n # print('response:')\n # pprint.pprint(result)\n\n if result['ret_code'] != 0:\n raise SaltCloudSystemExit(\n pprint.pformat(result.get('message', {}))\n )\n\n return result\n",
"def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('access_key_id', 'secret_access_key', 'zone', 'key_filename')\n )\n",
"def _get_specified_zone(kwargs=None, provider=None):\n if provider is None:\n provider = get_configured_provider()\n\n if isinstance(kwargs, dict):\n zone = kwargs.get('zone', None)\n if zone is not None:\n return zone\n\n zone = provider['zone']\n return zone\n"
] |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
show_image
|
python
|
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
|
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L327-L368
|
[
"def query(params=None):\n '''\n Make a web call to QingCloud IaaS API.\n '''\n path = 'https://api.qingcloud.com/iaas/'\n\n access_key_id = config.get_cloud_config_value(\n 'access_key_id', get_configured_provider(), __opts__, search_global=False\n )\n access_key_secret = config.get_cloud_config_value(\n 'secret_access_key', get_configured_provider(), __opts__, search_global=False\n )\n\n # public interface parameters\n real_parameters = {\n 'access_key_id': access_key_id,\n 'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,\n 'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),\n 'version': DEFAULT_QINGCLOUD_API_VERSION,\n }\n\n # include action or function parameters\n if params:\n for key, value in params.items():\n if isinstance(value, list):\n for i in range(1, len(value) + 1):\n if isinstance(value[i - 1], dict):\n for sk, sv in value[i - 1].items():\n if isinstance(sv, dict) or isinstance(sv, list):\n sv = salt.utils.json.dumps(sv, separators=(',', ':'))\n real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv\n else:\n real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]\n else:\n real_parameters[key] = value\n\n # Calculate the string for Signature\n signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')\n real_parameters['signature'] = signature\n\n # print('parameters:')\n # pprint.pprint(real_parameters)\n\n request = requests.get(path, params=real_parameters, verify=False)\n\n # print('url:')\n # print(request.url)\n\n if request.status_code != 200:\n raise SaltCloudSystemExit(\n 'An error occurred while querying QingCloud. HTTP Code: {0} '\n 'Error: \\'{1}\\''.format(\n request.status_code,\n request.text\n )\n )\n\n log.debug(request.url)\n\n content = request.text\n result = salt.utils.json.loads(content)\n\n # print('response:')\n # pprint.pprint(result)\n\n if result['ret_code'] != 0:\n raise SaltCloudSystemExit(\n pprint.pformat(result.get('message', {}))\n )\n\n return result\n",
"def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('access_key_id', 'secret_access_key', 'zone', 'key_filename')\n )\n",
"def _get_specified_zone(kwargs=None, provider=None):\n if provider is None:\n provider = get_configured_provider()\n\n if isinstance(kwargs, dict):\n zone = kwargs.get('zone', None)\n if zone is not None:\n return zone\n\n zone = provider['zone']\n return zone\n"
] |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
avail_sizes
|
python
|
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
|
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L399-L424
|
[
"def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('access_key_id', 'secret_access_key', 'zone', 'key_filename')\n )\n",
"def _get_specified_zone(kwargs=None, provider=None):\n if provider is None:\n provider = get_configured_provider()\n\n if isinstance(kwargs, dict):\n zone = kwargs.get('zone', None)\n if zone is not None:\n return zone\n\n zone = provider['zone']\n return zone\n"
] |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
_show_normalized_node
|
python
|
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
|
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L448-L473
| null |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
list_nodes_full
|
python
|
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
|
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L476-L521
|
[
"def query(params=None):\n '''\n Make a web call to QingCloud IaaS API.\n '''\n path = 'https://api.qingcloud.com/iaas/'\n\n access_key_id = config.get_cloud_config_value(\n 'access_key_id', get_configured_provider(), __opts__, search_global=False\n )\n access_key_secret = config.get_cloud_config_value(\n 'secret_access_key', get_configured_provider(), __opts__, search_global=False\n )\n\n # public interface parameters\n real_parameters = {\n 'access_key_id': access_key_id,\n 'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,\n 'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),\n 'version': DEFAULT_QINGCLOUD_API_VERSION,\n }\n\n # include action or function parameters\n if params:\n for key, value in params.items():\n if isinstance(value, list):\n for i in range(1, len(value) + 1):\n if isinstance(value[i - 1], dict):\n for sk, sv in value[i - 1].items():\n if isinstance(sv, dict) or isinstance(sv, list):\n sv = salt.utils.json.dumps(sv, separators=(',', ':'))\n real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv\n else:\n real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]\n else:\n real_parameters[key] = value\n\n # Calculate the string for Signature\n signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')\n real_parameters['signature'] = signature\n\n # print('parameters:')\n # pprint.pprint(real_parameters)\n\n request = requests.get(path, params=real_parameters, verify=False)\n\n # print('url:')\n # print(request.url)\n\n if request.status_code != 200:\n raise SaltCloudSystemExit(\n 'An error occurred while querying QingCloud. HTTP Code: {0} '\n 'Error: \\'{1}\\''.format(\n request.status_code,\n request.text\n )\n )\n\n log.debug(request.url)\n\n content = request.text\n result = salt.utils.json.loads(content)\n\n # print('response:')\n # pprint.pprint(result)\n\n if result['ret_code'] != 0:\n raise SaltCloudSystemExit(\n pprint.pformat(result.get('message', {}))\n )\n\n return result\n",
"def _get_specified_zone(kwargs=None, provider=None):\n if provider is None:\n provider = get_configured_provider()\n\n if isinstance(kwargs, dict):\n zone = kwargs.get('zone', None)\n if zone is not None:\n return zone\n\n zone = provider['zone']\n return zone\n",
"def _show_normalized_node(full_node):\n '''\n Normalize the QingCloud instance data. Used by list_nodes()-related\n functions.\n '''\n public_ips = full_node.get('eip', [])\n if public_ips:\n public_ip = public_ips['eip_addr']\n public_ips = [public_ip, ]\n\n private_ips = []\n for vxnet in full_node.get('vxnets', []):\n private_ip = vxnet.get('private_ip', None)\n if private_ip:\n private_ips.append(private_ip)\n\n normalized_node = {\n 'id': full_node['instance_id'],\n 'image': full_node['image']['image_id'],\n 'size': full_node['instance_type'],\n 'state': full_node['status'],\n 'private_ips': private_ips,\n 'public_ips': public_ips,\n }\n\n return normalized_node\n"
] |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
list_nodes
|
python
|
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
|
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L524-L552
|
[
"def list_nodes_full(call=None):\n '''\n Return a list of the instances that are on the provider.\n\n CLI Examples:\n\n .. code-block:: bash\n\n salt-cloud -F my-qingcloud\n '''\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The list_nodes_full function must be called with -f or --function.'\n )\n\n zone = _get_specified_zone()\n\n params = {\n 'action': 'DescribeInstances',\n 'zone': zone,\n 'status': ['pending', 'running', 'stopped', 'suspended'],\n }\n items = query(params=params)\n\n log.debug('Total %s instances found in zone %s', items['total_count'], zone)\n\n result = {}\n\n if items['total_count'] == 0:\n return result\n\n for node in items['instance_set']:\n normalized_node = _show_normalized_node(node)\n node.update(normalized_node)\n\n result[node['instance_id']] = node\n\n provider = __active_provider_name__ or 'qingcloud'\n if ':' in provider:\n comps = provider.split(':')\n provider = comps[0]\n\n __opts__['update_cachedir'] = True\n __utils__['cloud.cache_node_list'](result, provider, __opts__)\n\n return result\n"
] |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
list_nodes_min
|
python
|
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
|
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L555-L580
|
[
"def list_nodes_full(call=None):\n '''\n Return a list of the instances that are on the provider.\n\n CLI Examples:\n\n .. code-block:: bash\n\n salt-cloud -F my-qingcloud\n '''\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The list_nodes_full function must be called with -f or --function.'\n )\n\n zone = _get_specified_zone()\n\n params = {\n 'action': 'DescribeInstances',\n 'zone': zone,\n 'status': ['pending', 'running', 'stopped', 'suspended'],\n }\n items = query(params=params)\n\n log.debug('Total %s instances found in zone %s', items['total_count'], zone)\n\n result = {}\n\n if items['total_count'] == 0:\n return result\n\n for node in items['instance_set']:\n normalized_node = _show_normalized_node(node)\n node.update(normalized_node)\n\n result[node['instance_id']] = node\n\n provider = __active_provider_name__ or 'qingcloud'\n if ':' in provider:\n comps = provider.split(':')\n provider = comps[0]\n\n __opts__['update_cachedir'] = True\n __utils__['cloud.cache_node_list'](result, provider, __opts__)\n\n return result\n"
] |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
show_instance
|
python
|
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
|
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L601-L634
|
[
"def query(params=None):\n '''\n Make a web call to QingCloud IaaS API.\n '''\n path = 'https://api.qingcloud.com/iaas/'\n\n access_key_id = config.get_cloud_config_value(\n 'access_key_id', get_configured_provider(), __opts__, search_global=False\n )\n access_key_secret = config.get_cloud_config_value(\n 'secret_access_key', get_configured_provider(), __opts__, search_global=False\n )\n\n # public interface parameters\n real_parameters = {\n 'access_key_id': access_key_id,\n 'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,\n 'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),\n 'version': DEFAULT_QINGCLOUD_API_VERSION,\n }\n\n # include action or function parameters\n if params:\n for key, value in params.items():\n if isinstance(value, list):\n for i in range(1, len(value) + 1):\n if isinstance(value[i - 1], dict):\n for sk, sv in value[i - 1].items():\n if isinstance(sv, dict) or isinstance(sv, list):\n sv = salt.utils.json.dumps(sv, separators=(',', ':'))\n real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv\n else:\n real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]\n else:\n real_parameters[key] = value\n\n # Calculate the string for Signature\n signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')\n real_parameters['signature'] = signature\n\n # print('parameters:')\n # pprint.pprint(real_parameters)\n\n request = requests.get(path, params=real_parameters, verify=False)\n\n # print('url:')\n # print(request.url)\n\n if request.status_code != 200:\n raise SaltCloudSystemExit(\n 'An error occurred while querying QingCloud. HTTP Code: {0} '\n 'Error: \\'{1}\\''.format(\n request.status_code,\n request.text\n )\n )\n\n log.debug(request.url)\n\n content = request.text\n result = salt.utils.json.loads(content)\n\n # print('response:')\n # pprint.pprint(result)\n\n if result['ret_code'] != 0:\n raise SaltCloudSystemExit(\n pprint.pformat(result.get('message', {}))\n )\n\n return result\n",
"def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('access_key_id', 'secret_access_key', 'zone', 'key_filename')\n )\n",
"def _get_specified_zone(kwargs=None, provider=None):\n if provider is None:\n provider = get_configured_provider()\n\n if isinstance(kwargs, dict):\n zone = kwargs.get('zone', None)\n if zone is not None:\n return zone\n\n zone = provider['zone']\n return zone\n",
"def _show_normalized_node(full_node):\n '''\n Normalize the QingCloud instance data. Used by list_nodes()-related\n functions.\n '''\n public_ips = full_node.get('eip', [])\n if public_ips:\n public_ip = public_ips['eip_addr']\n public_ips = [public_ip, ]\n\n private_ips = []\n for vxnet in full_node.get('vxnets', []):\n private_ip = vxnet.get('private_ip', None)\n if private_ip:\n private_ips.append(private_ip)\n\n normalized_node = {\n 'id': full_node['instance_id'],\n 'image': full_node['image']['image_id'],\n 'size': full_node['instance_type'],\n 'state': full_node['status'],\n 'private_ips': private_ips,\n 'public_ips': public_ips,\n }\n\n return normalized_node\n"
] |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
create
|
python
|
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
|
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L647-L747
|
[
"def destroy(instance_id, call=None):\n '''\n Destroy an instance.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -a destroy i-2f733r5n\n salt-cloud -d i-2f733r5n\n '''\n if call == 'function':\n raise SaltCloudSystemExit(\n 'The destroy action must be called with -d, --destroy, '\n '-a or --action.'\n )\n\n instance_data = show_instance(instance_id, call='action')\n name = instance_data['instance_name']\n\n __utils__['cloud.fire_event'](\n 'event',\n 'destroying instance',\n 'salt/cloud/{0}/destroying'.format(name),\n args={'name': name},\n sock_dir=__opts__['sock_dir'],\n transport=__opts__['transport']\n )\n\n params = {\n 'action': 'TerminateInstances',\n 'zone': _get_specified_zone(provider=get_configured_provider()),\n 'instances.1': instance_id,\n }\n result = query(params)\n\n __utils__['cloud.fire_event'](\n 'event',\n 'destroyed instance',\n 'salt/cloud/{0}/destroyed'.format(name),\n args={'name': name},\n sock_dir=__opts__['sock_dir'],\n transport=__opts__['transport']\n )\n\n return result\n",
"def query(params=None):\n '''\n Make a web call to QingCloud IaaS API.\n '''\n path = 'https://api.qingcloud.com/iaas/'\n\n access_key_id = config.get_cloud_config_value(\n 'access_key_id', get_configured_provider(), __opts__, search_global=False\n )\n access_key_secret = config.get_cloud_config_value(\n 'secret_access_key', get_configured_provider(), __opts__, search_global=False\n )\n\n # public interface parameters\n real_parameters = {\n 'access_key_id': access_key_id,\n 'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,\n 'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),\n 'version': DEFAULT_QINGCLOUD_API_VERSION,\n }\n\n # include action or function parameters\n if params:\n for key, value in params.items():\n if isinstance(value, list):\n for i in range(1, len(value) + 1):\n if isinstance(value[i - 1], dict):\n for sk, sv in value[i - 1].items():\n if isinstance(sv, dict) or isinstance(sv, list):\n sv = salt.utils.json.dumps(sv, separators=(',', ':'))\n real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv\n else:\n real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]\n else:\n real_parameters[key] = value\n\n # Calculate the string for Signature\n signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')\n real_parameters['signature'] = signature\n\n # print('parameters:')\n # pprint.pprint(real_parameters)\n\n request = requests.get(path, params=real_parameters, verify=False)\n\n # print('url:')\n # print(request.url)\n\n if request.status_code != 200:\n raise SaltCloudSystemExit(\n 'An error occurred while querying QingCloud. HTTP Code: {0} '\n 'Error: \\'{1}\\''.format(\n request.status_code,\n request.text\n )\n )\n\n log.debug(request.url)\n\n content = request.text\n result = salt.utils.json.loads(content)\n\n # print('response:')\n # pprint.pprint(result)\n\n if result['ret_code'] != 0:\n raise SaltCloudSystemExit(\n pprint.pformat(result.get('message', {}))\n )\n\n return result\n",
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def is_profile_configured(opts, provider, profile_name, vm_=None):\n '''\n Check if the requested profile contains the minimum required parameters for\n a profile.\n\n Required parameters include image and provider for all drivers, while some\n drivers also require size keys.\n\n .. versionadded:: 2015.8.0\n '''\n # Standard dict keys required by all drivers.\n required_keys = ['provider']\n alias, driver = provider.split(':')\n\n # Most drivers need an image to be specified, but some do not.\n non_image_drivers = ['nova', 'virtualbox', 'libvirt', 'softlayer', 'oneandone', 'profitbricks']\n\n # Most drivers need a size, but some do not.\n non_size_drivers = ['opennebula', 'parallels', 'proxmox', 'scaleway',\n 'softlayer', 'softlayer_hw', 'vmware', 'vsphere',\n 'virtualbox', 'libvirt', 'oneandone', 'profitbricks']\n\n provider_key = opts['providers'][alias][driver]\n profile_key = opts['providers'][alias][driver]['profiles'][profile_name]\n\n # If cloning on Linode, size and image are not necessary.\n # They are obtained from the to-be-cloned VM.\n if driver == 'linode' and profile_key.get('clonefrom', False):\n non_image_drivers.append('linode')\n non_size_drivers.append('linode')\n elif driver == 'gce' and 'sourceImage' in six.text_type(vm_.get('ex_disks_gce_struct')):\n non_image_drivers.append('gce')\n\n # If cloning on VMware, specifying image is not necessary.\n if driver == 'vmware' and 'image' not in list(profile_key.keys()):\n non_image_drivers.append('vmware')\n\n if driver not in non_image_drivers:\n required_keys.append('image')\n if driver == 'vmware':\n required_keys.append('datastore')\n elif driver in ['linode', 'virtualbox']:\n required_keys.append('clonefrom')\n elif driver == 'nova':\n nova_image_keys = ['image', 'block_device_mapping', 'block_device', 'boot_volume']\n if not any([key in provider_key for key in nova_image_keys]) and not any([key in profile_key for key in nova_image_keys]):\n required_keys.extend(nova_image_keys)\n\n if driver not in non_size_drivers:\n required_keys.append('size')\n\n # Check if required fields are supplied in the provider config. If they\n # are present, remove it from the required_keys list.\n for item in list(required_keys):\n if item in provider_key:\n required_keys.remove(item)\n\n # If a vm_ dict was passed in, use that information to get any other configs\n # that we might have missed thus far, such as a option provided in a map file.\n if vm_:\n for item in list(required_keys):\n if item in vm_:\n required_keys.remove(item)\n\n # Check for remaining required parameters in the profile config.\n for item in required_keys:\n if profile_key.get(item, None) is None:\n # There's at least one required configuration item which is not set.\n log.error(\n \"The required '%s' configuration setting is missing from \"\n \"the '%s' profile, which is configured under the '%s' alias.\",\n item, profile_name, alias\n )\n return False\n\n return True\n",
"def wait_for_ip(update_callback,\n update_args=None,\n update_kwargs=None,\n timeout=5 * 60,\n interval=5,\n interval_multiplier=1,\n max_failures=10):\n '''\n Helper function that waits for an IP address for a specific maximum amount\n of time.\n\n :param update_callback: callback function which queries the cloud provider\n for the VM ip address. It must return None if the\n required data, IP included, is not available yet.\n :param update_args: Arguments to pass to update_callback\n :param update_kwargs: Keyword arguments to pass to update_callback\n :param timeout: The maximum amount of time(in seconds) to wait for the IP\n address.\n :param interval: The looping interval, i.e., the amount of time to sleep\n before the next iteration.\n :param interval_multiplier: Increase the interval by this multiplier after\n each request; helps with throttling\n :param max_failures: If update_callback returns ``False`` it's considered\n query failure. This value is the amount of failures\n accepted before giving up.\n :returns: The update_callback returned data\n :raises: SaltCloudExecutionTimeout\n\n '''\n if update_args is None:\n update_args = ()\n if update_kwargs is None:\n update_kwargs = {}\n\n duration = timeout\n while True:\n log.debug(\n 'Waiting for VM IP. Giving up in 00:%02d:%02d.',\n int(timeout // 60), int(timeout % 60)\n )\n data = update_callback(*update_args, **update_kwargs)\n if data is False:\n log.debug(\n '\\'update_callback\\' has returned \\'False\\', which is '\n 'considered a failure. Remaining Failures: %s.', max_failures\n )\n max_failures -= 1\n if max_failures <= 0:\n raise SaltCloudExecutionFailure(\n 'Too many failures occurred while waiting for '\n 'the IP address.'\n )\n elif data is not None:\n return data\n\n if timeout < 0:\n raise SaltCloudExecutionTimeout(\n 'Unable to get IP for 00:{0:02d}:{1:02d}.'.format(\n int(duration // 60),\n int(duration % 60)\n )\n )\n time.sleep(interval)\n timeout -= interval\n\n if interval_multiplier > 1:\n interval *= interval_multiplier\n if interval > timeout:\n interval = timeout + 1\n log.info('Interval multiplier in effect; interval is '\n 'now %ss.', interval)\n",
"def _get_size(vm_):\n '''\n Return the VM's size. Used by create().\n '''\n sizes = avail_sizes()\n\n vm_size = six.text_type(config.get_cloud_config_value(\n 'size', vm_, __opts__, search_global=False\n ))\n\n if not vm_size:\n raise SaltCloudNotFound('No size specified for this instance.')\n\n if vm_size in sizes.keys():\n return vm_size\n\n raise SaltCloudNotFound(\n 'The specified size, \\'{0}\\', could not be found.'.format(vm_size)\n )\n",
"def _get_location(vm_=None):\n '''\n Return the VM's location. Used by create().\n '''\n locations = avail_locations()\n\n vm_location = six.text_type(config.get_cloud_config_value(\n 'zone', vm_, __opts__, search_global=False\n ))\n\n if not vm_location:\n raise SaltCloudNotFound('No location specified for this VM.')\n\n if vm_location in locations:\n return vm_location\n\n raise SaltCloudNotFound(\n 'The specified location, \\'{0}\\', could not be found.'.format(\n vm_location\n )\n )\n",
"def _get_image(vm_):\n '''\n Return the VM's image. Used by create().\n '''\n images = avail_images()\n vm_image = six.text_type(config.get_cloud_config_value(\n 'image', vm_, __opts__, search_global=False\n ))\n\n if not vm_image:\n raise SaltCloudNotFound('No image specified for this VM.')\n\n if vm_image in images:\n return vm_image\n\n raise SaltCloudNotFound(\n 'The specified image, \\'{0}\\', could not be found.'.format(vm_image)\n )\n"
] |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
start
|
python
|
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
|
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L766-L790
|
[
"def query(params=None):\n '''\n Make a web call to QingCloud IaaS API.\n '''\n path = 'https://api.qingcloud.com/iaas/'\n\n access_key_id = config.get_cloud_config_value(\n 'access_key_id', get_configured_provider(), __opts__, search_global=False\n )\n access_key_secret = config.get_cloud_config_value(\n 'secret_access_key', get_configured_provider(), __opts__, search_global=False\n )\n\n # public interface parameters\n real_parameters = {\n 'access_key_id': access_key_id,\n 'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,\n 'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),\n 'version': DEFAULT_QINGCLOUD_API_VERSION,\n }\n\n # include action or function parameters\n if params:\n for key, value in params.items():\n if isinstance(value, list):\n for i in range(1, len(value) + 1):\n if isinstance(value[i - 1], dict):\n for sk, sv in value[i - 1].items():\n if isinstance(sv, dict) or isinstance(sv, list):\n sv = salt.utils.json.dumps(sv, separators=(',', ':'))\n real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv\n else:\n real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]\n else:\n real_parameters[key] = value\n\n # Calculate the string for Signature\n signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')\n real_parameters['signature'] = signature\n\n # print('parameters:')\n # pprint.pprint(real_parameters)\n\n request = requests.get(path, params=real_parameters, verify=False)\n\n # print('url:')\n # print(request.url)\n\n if request.status_code != 200:\n raise SaltCloudSystemExit(\n 'An error occurred while querying QingCloud. HTTP Code: {0} '\n 'Error: \\'{1}\\''.format(\n request.status_code,\n request.text\n )\n )\n\n log.debug(request.url)\n\n content = request.text\n result = salt.utils.json.loads(content)\n\n # print('response:')\n # pprint.pprint(result)\n\n if result['ret_code'] != 0:\n raise SaltCloudSystemExit(\n pprint.pformat(result.get('message', {}))\n )\n\n return result\n",
"def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('access_key_id', 'secret_access_key', 'zone', 'key_filename')\n )\n",
"def _get_specified_zone(kwargs=None, provider=None):\n if provider is None:\n provider = get_configured_provider()\n\n if isinstance(kwargs, dict):\n zone = kwargs.get('zone', None)\n if zone is not None:\n return zone\n\n zone = provider['zone']\n return zone\n"
] |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
stop
|
python
|
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
|
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L793-L819
|
[
"def query(params=None):\n '''\n Make a web call to QingCloud IaaS API.\n '''\n path = 'https://api.qingcloud.com/iaas/'\n\n access_key_id = config.get_cloud_config_value(\n 'access_key_id', get_configured_provider(), __opts__, search_global=False\n )\n access_key_secret = config.get_cloud_config_value(\n 'secret_access_key', get_configured_provider(), __opts__, search_global=False\n )\n\n # public interface parameters\n real_parameters = {\n 'access_key_id': access_key_id,\n 'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,\n 'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),\n 'version': DEFAULT_QINGCLOUD_API_VERSION,\n }\n\n # include action or function parameters\n if params:\n for key, value in params.items():\n if isinstance(value, list):\n for i in range(1, len(value) + 1):\n if isinstance(value[i - 1], dict):\n for sk, sv in value[i - 1].items():\n if isinstance(sv, dict) or isinstance(sv, list):\n sv = salt.utils.json.dumps(sv, separators=(',', ':'))\n real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv\n else:\n real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]\n else:\n real_parameters[key] = value\n\n # Calculate the string for Signature\n signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')\n real_parameters['signature'] = signature\n\n # print('parameters:')\n # pprint.pprint(real_parameters)\n\n request = requests.get(path, params=real_parameters, verify=False)\n\n # print('url:')\n # print(request.url)\n\n if request.status_code != 200:\n raise SaltCloudSystemExit(\n 'An error occurred while querying QingCloud. HTTP Code: {0} '\n 'Error: \\'{1}\\''.format(\n request.status_code,\n request.text\n )\n )\n\n log.debug(request.url)\n\n content = request.text\n result = salt.utils.json.loads(content)\n\n # print('response:')\n # pprint.pprint(result)\n\n if result['ret_code'] != 0:\n raise SaltCloudSystemExit(\n pprint.pformat(result.get('message', {}))\n )\n\n return result\n",
"def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('access_key_id', 'secret_access_key', 'zone', 'key_filename')\n )\n",
"def _get_specified_zone(kwargs=None, provider=None):\n if provider is None:\n provider = get_configured_provider()\n\n if isinstance(kwargs, dict):\n zone = kwargs.get('zone', None)\n if zone is not None:\n return zone\n\n zone = provider['zone']\n return zone\n"
] |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
saltstack/salt
|
salt/cloud/clouds/qingcloud.py
|
destroy
|
python
|
def destroy(instance_id, call=None):
'''
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {
'action': 'TerminateInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result
|
Destroy an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a destroy i-2f733r5n
salt-cloud -d i-2f733r5n
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L849-L894
|
[
"def query(params=None):\n '''\n Make a web call to QingCloud IaaS API.\n '''\n path = 'https://api.qingcloud.com/iaas/'\n\n access_key_id = config.get_cloud_config_value(\n 'access_key_id', get_configured_provider(), __opts__, search_global=False\n )\n access_key_secret = config.get_cloud_config_value(\n 'secret_access_key', get_configured_provider(), __opts__, search_global=False\n )\n\n # public interface parameters\n real_parameters = {\n 'access_key_id': access_key_id,\n 'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,\n 'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),\n 'version': DEFAULT_QINGCLOUD_API_VERSION,\n }\n\n # include action or function parameters\n if params:\n for key, value in params.items():\n if isinstance(value, list):\n for i in range(1, len(value) + 1):\n if isinstance(value[i - 1], dict):\n for sk, sv in value[i - 1].items():\n if isinstance(sv, dict) or isinstance(sv, list):\n sv = salt.utils.json.dumps(sv, separators=(',', ':'))\n real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv\n else:\n real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]\n else:\n real_parameters[key] = value\n\n # Calculate the string for Signature\n signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')\n real_parameters['signature'] = signature\n\n # print('parameters:')\n # pprint.pprint(real_parameters)\n\n request = requests.get(path, params=real_parameters, verify=False)\n\n # print('url:')\n # print(request.url)\n\n if request.status_code != 200:\n raise SaltCloudSystemExit(\n 'An error occurred while querying QingCloud. HTTP Code: {0} '\n 'Error: \\'{1}\\''.format(\n request.status_code,\n request.text\n )\n )\n\n log.debug(request.url)\n\n content = request.text\n result = salt.utils.json.loads(content)\n\n # print('response:')\n # pprint.pprint(result)\n\n if result['ret_code'] != 0:\n raise SaltCloudSystemExit(\n pprint.pformat(result.get('message', {}))\n )\n\n return result\n",
"def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('access_key_id', 'secret_access_key', 'zone', 'key_filename')\n )\n",
"def show_instance(instance_id, call=None, kwargs=None):\n '''\n Show the details from QingCloud concerning an instance.\n\n CLI Examples:\n\n .. code-block:: bash\n\n salt-cloud -a show_instance i-2f733r5n\n '''\n if call != 'action':\n raise SaltCloudSystemExit(\n 'The show_instance action must be called with -a or --action.'\n )\n\n params = {\n 'action': 'DescribeInstances',\n 'instances.1': instance_id,\n 'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),\n }\n items = query(params=params)\n\n if items['total_count'] == 0:\n raise SaltCloudNotFound(\n 'The specified instance, \\'{0}\\', could not be found.'.format(instance_id)\n )\n\n full_node = items['instance_set'][0]\n normalized_node = _show_normalized_node(full_node)\n full_node.update(normalized_node)\n\n result = full_node\n\n return result\n",
"def _get_specified_zone(kwargs=None, provider=None):\n if provider is None:\n provider = get_configured_provider()\n\n if isinstance(kwargs, dict):\n zone = kwargs.get('zone', None)\n if zone is not None:\n return zone\n\n zone = provider['zone']\n return zone\n"
] |
# -*- coding: utf-8 -*-
'''
QingCloud Cloud Module
======================
.. versionadded:: 2015.8.0
The QingCloud cloud module is used to control access to the QingCloud.
http://www.qingcloud.com/
Use of this module requires the ``access_key_id``, ``secret_access_key``,
``zone`` and ``key_filename`` parameter to be set.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/qingcloud.conf``:
.. code-block:: yaml
my-qingcloud:
driver: qingcloud
access_key_id: AKIDMRTGYONNLTFFRBQJ
secret_access_key: clYwH21U5UOmcov4aNV2V2XocaHCG3JZGcxEczFu
zone: pek2
key_filename: /path/to/your.pem
:depends: requests
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
import hmac
import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.data
import salt.utils.json
import salt.config as config
from salt.exceptions import (
SaltCloudNotFound,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'qingcloud'
DEFAULT_QINGCLOUD_API_VERSION = 1
DEFAULT_QINGCLOUD_SIGNATURE_VERSION = 1
# Only load in this module if the qingcloud configurations are in place
def __virtual__():
'''
Check for QingCloud configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('access_key_id', 'secret_access_key', 'zone', 'key_filename')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'requests': HAS_REQUESTS}
)
def _compute_signature(parameters, access_key_secret, method, path):
'''
Generate an API request signature. Detailed document can be found at:
https://docs.qingcloud.com/api/common/signature.html
'''
parameters['signature_method'] = 'HmacSHA256'
string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
h = hmac.new(access_key_secret, digestmod=sha256)
h.update(string_to_sign)
signature = base64.b64encode(h.digest()).strip()
return signature
def query(params=None):
'''
Make a web call to QingCloud IaaS API.
'''
path = 'https://api.qingcloud.com/iaas/'
access_key_id = config.get_cloud_config_value(
'access_key_id', get_configured_provider(), __opts__, search_global=False
)
access_key_secret = config.get_cloud_config_value(
'secret_access_key', get_configured_provider(), __opts__, search_global=False
)
# public interface parameters
real_parameters = {
'access_key_id': access_key_id,
'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION,
'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'version': DEFAULT_QINGCLOUD_API_VERSION,
}
# include action or function parameters
if params:
for key, value in params.items():
if isinstance(value, list):
for i in range(1, len(value) + 1):
if isinstance(value[i - 1], dict):
for sk, sv in value[i - 1].items():
if isinstance(sv, dict) or isinstance(sv, list):
sv = salt.utils.json.dumps(sv, separators=(',', ':'))
real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv
else:
real_parameters['{0}.{1}'.format(key, i)] = value[i - 1]
else:
real_parameters[key] = value
# Calculate the string for Signature
signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/')
real_parameters['signature'] = signature
# print('parameters:')
# pprint.pprint(real_parameters)
request = requests.get(path, params=real_parameters, verify=False)
# print('url:')
# print(request.url)
if request.status_code != 200:
raise SaltCloudSystemExit(
'An error occurred while querying QingCloud. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.status_code,
request.text
)
)
log.debug(request.url)
content = request.text
result = salt.utils.json.loads(content)
# print('response:')
# pprint.pprint(result)
if result['ret_code'] != 0:
raise SaltCloudSystemExit(
pprint.pformat(result.get('message', {}))
)
return result
def avail_locations(call=None):
'''
Return a dict of all available locations on the provider with
relevant data.
CLI Examples:
.. code-block:: bash
salt-cloud --list-locations my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
params = {
'action': 'DescribeZones',
}
items = query(params=params)
result = {}
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = six.text_type(region[key])
return result
def _get_location(vm_=None):
'''
Return the VM's location. Used by create().
'''
locations = avail_locations()
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location in locations:
return vm_location
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
)
def _get_specified_zone(kwargs=None, provider=None):
if provider is None:
provider = get_configured_provider()
if isinstance(kwargs, dict):
zone = kwargs.get('zone', None)
if zone is not None:
return zone
zone = provider['zone']
return zone
def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
def _get_image(vm_):
'''
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image in images:
return vm_image
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result
# QingCloud doesn't provide an API of geting instance sizes
QINGCLOUD_SIZES = {
'pek2': {
'c1m1': {'cpu': 1, 'memory': '1G'},
'c1m2': {'cpu': 1, 'memory': '2G'},
'c1m4': {'cpu': 1, 'memory': '4G'},
'c2m2': {'cpu': 2, 'memory': '2G'},
'c2m4': {'cpu': 2, 'memory': '4G'},
'c2m8': {'cpu': 2, 'memory': '8G'},
'c4m4': {'cpu': 4, 'memory': '4G'},
'c4m8': {'cpu': 4, 'memory': '8G'},
'c4m16': {'cpu': 4, 'memory': '16G'},
},
'pek1': {
'small_b': {'cpu': 1, 'memory': '1G'},
'small_c': {'cpu': 1, 'memory': '2G'},
'medium_a': {'cpu': 2, 'memory': '2G'},
'medium_b': {'cpu': 2, 'memory': '4G'},
'medium_c': {'cpu': 2, 'memory': '8G'},
'large_a': {'cpu': 4, 'memory': '4G'},
'large_b': {'cpu': 4, 'memory': '8G'},
'large_c': {'cpu': 4, 'memory': '16G'},
},
}
QINGCLOUD_SIZES['ap1'] = QINGCLOUD_SIZES['pek2']
QINGCLOUD_SIZES['gd1'] = QINGCLOUD_SIZES['pek2']
def avail_sizes(kwargs=None, call=None):
'''
Return a list of the instance sizes that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-sizes my-qingcloud
salt-cloud -f avail_sizes my-qingcloud zone=pek2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
zone = _get_specified_zone(kwargs, get_configured_provider())
result = {}
for size_key in QINGCLOUD_SIZES[zone]:
result[size_key] = {}
for attribute_key in QINGCLOUD_SIZES[zone][size_key]:
result[size_key][attribute_key] = QINGCLOUD_SIZES[zone][size_key][attribute_key]
return result
def _get_size(vm_):
'''
Return the VM's size. Used by create().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this instance.')
if vm_size in sizes.keys():
return vm_size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def _show_normalized_node(full_node):
'''
Normalize the QingCloud instance data. Used by list_nodes()-related
functions.
'''
public_ips = full_node.get('eip', [])
if public_ips:
public_ip = public_ips['eip_addr']
public_ips = [public_ip, ]
private_ips = []
for vxnet in full_node.get('vxnets', []):
private_ip = vxnet.get('private_ip', None)
if private_ip:
private_ips.append(private_ip)
normalized_node = {
'id': full_node['instance_id'],
'image': full_node['image']['image_id'],
'size': full_node['instance_type'],
'state': full_node['status'],
'private_ips': private_ips,
'public_ips': public_ips,
}
return normalized_node
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
def list_nodes(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -Q my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
nodes = list_nodes_full()
ret = {}
for instance_id, full_node in nodes.items():
ret[instance_id] = {
'id': full_node['id'],
'image': full_node['image'],
'size': full_node['size'],
'state': full_node['state'],
'public_ips': full_node['public_ips'],
'private_ips': full_node['private_ips'],
}
return ret
def list_nodes_min(call=None):
'''
Return a list of the instances that are on the provider. Only a list of
instances names, and their state, is returned.
CLI Examples:
.. code-block:: bash
salt-cloud -f list_nodes_min my-qingcloud
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
nodes = list_nodes_full()
result = {}
for instance_id, full_node in nodes.items():
result[instance_id] = {
'name': full_node['instance_name'],
'status': full_node['status'],
}
return result
def list_nodes_select(call=None):
'''
Return a list of the instances that are on the provider, with selected
fields.
CLI Examples:
.. code-block:: bash
salt-cloud -S my-qingcloud
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'),
__opts__['query.selection'],
call,
)
def show_instance(instance_id, call=None, kwargs=None):
'''
Show the details from QingCloud concerning an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a show_instance i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
params = {
'action': 'DescribeInstances',
'instances.1': instance_id,
'zone': _get_specified_zone(kwargs=None, provider=get_configured_provider()),
}
items = query(params=params)
if items['total_count'] == 0:
raise SaltCloudNotFound(
'The specified instance, \'{0}\', could not be found.'.format(instance_id)
)
full_node = items['instance_set'][0]
normalized_node = _show_normalized_node(full_node)
full_node.update(normalized_node)
result = full_node
return result
def _query_node_data(instance_id):
data = show_instance(instance_id, call='action')
if not data:
return False
if data.get('private_ips', []):
return data
def create(vm_):
'''
Create a single instance from a data dict.
CLI Examples:
.. code-block:: bash
salt-cloud -p qingcloud-ubuntu-c1m1 hostname1
salt-cloud -m /path/to/mymap.sls -P
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'qingcloud',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
'action': 'RunInstances',
'instance_name': vm_['name'],
'zone': _get_location(vm_),
'instance_type': _get_size(vm_),
'image_id': _get_image(vm_),
'vxnets.1': vm_['vxnets'],
'login_mode': vm_['login_mode'],
'login_keypair': vm_['login_keypair'],
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = query(params)
new_instance_id = result['instances'][0]
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(new_instance_id,),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data
def script(vm_):
'''
Return the script deployment object.
'''
deploy_script = salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
return deploy_script
def start(instance_id, call=None):
'''
Start an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a start i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
def stop(instance_id, force=False, call=None):
'''
Stop an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a stop i-2f733r5n
salt-cloud -a stop i-2f733r5n force=True
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
'force': int(force),
}
result = query(params)
return result
def reboot(instance_id, call=None):
'''
Reboot an instance.
CLI Examples:
.. code-block:: bash
salt-cloud -a reboot i-2f733r5n
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',
'zone': _get_specified_zone(provider=get_configured_provider()),
'instances.1': instance_id,
}
result = query(params)
return result
|
saltstack/salt
|
salt/states/grafana4_org.py
|
present
|
python
|
def present(name,
users=None,
theme=None,
home_dashboard_id=None,
timezone=None,
address1=None,
address2=None,
city=None,
zip_code=None,
address_state=None,
country=None,
profile='grafana'):
'''
Ensure that an organization is present.
name
Name of the org.
users
Optional - Dict of user/role associated with the org. Example:
.. code-block:: yaml
users:
foo: Viewer
bar: Editor
theme
Optional - Selected theme for the org.
home_dashboard_id
Optional - Home dashboard for the org.
timezone
Optional - Timezone for the org (one of: "browser", "utc", or "").
address1
Optional - address1 of the org.
address2
Optional - address2 of the org.
city
Optional - city of the org.
zip_code
Optional - zip_code of the org.
address_state
Optional - state of the org.
country
Optional - country of the org.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
'''
if isinstance(profile, string_types):
profile = __salt__['config.option'](profile)
ret = {'name': name, 'result': None, 'comment': None, 'changes': {}}
create = False
try:
org = __salt__['grafana4.get_org'](name, profile)
except HTTPError as e:
if e.response.status_code == 404:
create = True
else:
raise
if create:
if __opts__['test']:
ret['comment'] = 'Org {0} will be created'.format(name)
return ret
__salt__['grafana4.create_org'](profile=profile, name=name)
org = __salt__['grafana4.get_org'](name, profile)
ret['changes'] = org
ret['comment'] = 'New org {0} added'.format(name)
data = _get_json_data(address1=address1, address2=address2,
city=city, zipCode=zip_code, state=address_state, country=country,
defaults=org['address'])
if data != org['address']:
if __opts__['test']:
ret['comment'] = 'Org {0} address will be updated'.format(name)
return ret
__salt__['grafana4.update_org_address'](name, profile=profile, **data)
if create:
dictupdate.update(ret['changes']['address'], data)
else:
dictupdate.update(ret['changes'], deep_diff(org['address'], data))
prefs = __salt__['grafana4.get_org_prefs'](name, profile=profile)
data = _get_json_data(theme=theme, homeDashboardId=home_dashboard_id,
timezone=timezone, defaults=prefs)
if data != prefs:
if __opts__['test']:
ret['comment'] = 'Org {0} prefs will be updated'.format(name)
return ret
__salt__['grafana4.update_org_prefs'](name, profile=profile, **data)
if create:
dictupdate.update(ret['changes'], data)
else:
dictupdate.update(ret['changes'], deep_diff(prefs, data))
if users:
db_users = {}
for item in __salt__['grafana4.get_org_users'](name, profile=profile):
db_users[item['login']] = {
'userId': item['userId'],
'role': item['role'],
}
for username, role in users.items():
if username in db_users:
if role is False:
if __opts__['test']:
ret['comment'] = 'Org {0} user {1} will be ' \
'deleted'.format(name, username)
return ret
__salt__['grafana4.delete_org_user'](
db_users[username]['userId'], profile=profile)
elif role != db_users[username]['role']:
if __opts__['test']:
ret['comment'] = 'Org {0} user {1} role will be ' \
'updated'.format(name, username)
return ret
__salt__['grafana4.update_org_user'](
db_users[username]['userId'], loginOrEmail=username,
role=role, profile=profile)
elif role:
if __opts__['test']:
ret['comment'] = 'Org {0} user {1} will be created'.format(
name, username)
return ret
__salt__['grafana4.create_org_user'](
loginOrEmail=username, role=role, profile=profile)
new_db_users = {}
for item in __salt__['grafana4.get_org_users'](name, profile=profile):
new_db_users[item['login']] = {
'userId': item['userId'],
'role': item['role'],
}
if create:
dictupdate.update(ret['changes'], new_db_users)
else:
dictupdate.update(ret['changes'], deep_diff(db_users, new_db_users))
ret['result'] = True
if not create:
if ret['changes']:
ret['comment'] = 'Org {0} updated'.format(name)
else:
ret['changes'] = {}
ret['comment'] = 'Org {0} already up-to-date'.format(name)
return ret
|
Ensure that an organization is present.
name
Name of the org.
users
Optional - Dict of user/role associated with the org. Example:
.. code-block:: yaml
users:
foo: Viewer
bar: Editor
theme
Optional - Selected theme for the org.
home_dashboard_id
Optional - Home dashboard for the org.
timezone
Optional - Timezone for the org (one of: "browser", "utc", or "").
address1
Optional - address1 of the org.
address2
Optional - address2 of the org.
city
Optional - city of the org.
zip_code
Optional - zip_code of the org.
address_state
Optional - state of the org.
country
Optional - country of the org.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/grafana4_org.py#L60-L217
|
[
"def update(dest, upd, recursive_update=True, merge_lists=False):\n '''\n Recursive version of the default dict.update\n\n Merges upd recursively into dest\n\n If recursive_update=False, will use the classic dict.update, or fall back\n on a manual merge (helpful for non-dict types like FunctionWrapper)\n\n If merge_lists=True, will aggregate list object types instead of replace.\n The list in ``upd`` is added to the list in ``dest``, so the resulting list\n is ``dest[key] + upd[key]``. This behavior is only activated when\n recursive_update=True. By default merge_lists=False.\n\n .. versionchanged: 2016.11.6\n When merging lists, duplicate values are removed. Values already\n present in the ``dest`` list are not added from the ``upd`` list.\n '''\n if (not isinstance(dest, Mapping)) \\\n or (not isinstance(upd, Mapping)):\n raise TypeError('Cannot update using non-dict types in dictupdate.update()')\n updkeys = list(upd.keys())\n if not set(list(dest.keys())) & set(updkeys):\n recursive_update = False\n if recursive_update:\n for key in updkeys:\n val = upd[key]\n try:\n dest_subkey = dest.get(key, None)\n except AttributeError:\n dest_subkey = None\n if isinstance(dest_subkey, Mapping) \\\n and isinstance(val, Mapping):\n ret = update(dest_subkey, val, merge_lists=merge_lists)\n dest[key] = ret\n elif isinstance(dest_subkey, list) and isinstance(val, list):\n if merge_lists:\n merged = copy.deepcopy(dest_subkey)\n merged.extend([x for x in val if x not in merged])\n dest[key] = merged\n else:\n dest[key] = upd[key]\n else:\n dest[key] = upd[key]\n return dest\n try:\n for k in upd:\n dest[k] = upd[k]\n except AttributeError:\n # this mapping is not a dict\n for k in upd:\n dest[k] = upd[k]\n return dest\n",
"def deep_diff(old, new, ignore=None):\n ignore = ignore or []\n res = {}\n old = copy.deepcopy(old)\n new = copy.deepcopy(new)\n stack = [(old, new, False)]\n\n while stack:\n tmps = []\n tmp_old, tmp_new, reentrant = stack.pop()\n for key in set(list(tmp_old) + list(tmp_new)):\n if key in tmp_old and key in tmp_new \\\n and tmp_old[key] == tmp_new[key]:\n del tmp_old[key]\n del tmp_new[key]\n continue\n if not reentrant:\n if key in tmp_old and key in ignore:\n del tmp_old[key]\n if key in tmp_new and key in ignore:\n del tmp_new[key]\n if isinstance(tmp_old.get(key), Mapping) \\\n and isinstance(tmp_new.get(key), Mapping):\n tmps.append((tmp_old[key], tmp_new[key], False))\n if tmps:\n stack.extend([(tmp_old, tmp_new, True)] + tmps)\n if old:\n res['old'] = old\n if new:\n res['new'] = new\n return res\n",
"def _get_json_data(defaults=None, **kwargs):\n if defaults is None:\n defaults = {}\n for k, v in kwargs.items():\n if v is None:\n kwargs[k] = defaults.get(k)\n return kwargs\n"
] |
# -*- coding: utf-8 -*-
'''
Manage Grafana v4.0 orgs
.. versionadded:: 2017.7.0
:configuration: This state requires a configuration profile to be configured
in the minion config, minion pillar, or master config. The module will use
the 'grafana' key by default, if defined.
Example configuration using basic authentication:
.. code-block:: yaml
grafana:
grafana_url: http://grafana.localhost
grafana_user: admin
grafana_password: admin
grafana_timeout: 3
Example configuration using token based authentication:
.. code-block:: yaml
grafana:
grafana_url: http://grafana.localhost
grafana_token: token
grafana_timeout: 3
.. code-block:: yaml
Ensure foobar org is present:
grafana4_org.present:
- name: foobar
- theme: ""
- home_dashboard_id: 0
- timezone: "utc"
- address1: ""
- address2: ""
- city: ""
- zip_code: ""
- state: ""
- country: ""
'''
from __future__ import absolute_import, print_function, unicode_literals
import salt.utils.dictupdate as dictupdate
from salt.utils.dictdiffer import deep_diff
from requests.exceptions import HTTPError
# Import 3rd-party libs
from salt.ext.six import string_types
def __virtual__():
'''Only load if grafana4 module is available'''
return 'grafana4.get_org' in __salt__
def absent(name, profile='grafana'):
'''
Ensure that a org is present.
name
Name of the org to remove.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
'''
if isinstance(profile, string_types):
profile = __salt__['config.option'](profile)
ret = {'name': name, 'result': None, 'comment': None, 'changes': {}}
org = __salt__['grafana4.get_org'](name, profile)
if not org:
ret['result'] = True
ret['comment'] = 'Org {0} already absent'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Org {0} will be deleted'.format(name)
return ret
__salt__['grafana4.delete_org'](org['id'], profile=profile)
ret['result'] = True
ret['changes'][name] = 'Absent'
ret['comment'] = 'Org {0} was deleted'.format(name)
return ret
def _get_json_data(defaults=None, **kwargs):
if defaults is None:
defaults = {}
for k, v in kwargs.items():
if v is None:
kwargs[k] = defaults.get(k)
return kwargs
|
saltstack/salt
|
salt/states/grafana4_org.py
|
absent
|
python
|
def absent(name, profile='grafana'):
'''
Ensure that a org is present.
name
Name of the org to remove.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
'''
if isinstance(profile, string_types):
profile = __salt__['config.option'](profile)
ret = {'name': name, 'result': None, 'comment': None, 'changes': {}}
org = __salt__['grafana4.get_org'](name, profile)
if not org:
ret['result'] = True
ret['comment'] = 'Org {0} already absent'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Org {0} will be deleted'.format(name)
return ret
__salt__['grafana4.delete_org'](org['id'], profile=profile)
ret['result'] = True
ret['changes'][name] = 'Absent'
ret['comment'] = 'Org {0} was deleted'.format(name)
return ret
|
Ensure that a org is present.
name
Name of the org to remove.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/grafana4_org.py#L220-L251
| null |
# -*- coding: utf-8 -*-
'''
Manage Grafana v4.0 orgs
.. versionadded:: 2017.7.0
:configuration: This state requires a configuration profile to be configured
in the minion config, minion pillar, or master config. The module will use
the 'grafana' key by default, if defined.
Example configuration using basic authentication:
.. code-block:: yaml
grafana:
grafana_url: http://grafana.localhost
grafana_user: admin
grafana_password: admin
grafana_timeout: 3
Example configuration using token based authentication:
.. code-block:: yaml
grafana:
grafana_url: http://grafana.localhost
grafana_token: token
grafana_timeout: 3
.. code-block:: yaml
Ensure foobar org is present:
grafana4_org.present:
- name: foobar
- theme: ""
- home_dashboard_id: 0
- timezone: "utc"
- address1: ""
- address2: ""
- city: ""
- zip_code: ""
- state: ""
- country: ""
'''
from __future__ import absolute_import, print_function, unicode_literals
import salt.utils.dictupdate as dictupdate
from salt.utils.dictdiffer import deep_diff
from requests.exceptions import HTTPError
# Import 3rd-party libs
from salt.ext.six import string_types
def __virtual__():
'''Only load if grafana4 module is available'''
return 'grafana4.get_org' in __salt__
def present(name,
users=None,
theme=None,
home_dashboard_id=None,
timezone=None,
address1=None,
address2=None,
city=None,
zip_code=None,
address_state=None,
country=None,
profile='grafana'):
'''
Ensure that an organization is present.
name
Name of the org.
users
Optional - Dict of user/role associated with the org. Example:
.. code-block:: yaml
users:
foo: Viewer
bar: Editor
theme
Optional - Selected theme for the org.
home_dashboard_id
Optional - Home dashboard for the org.
timezone
Optional - Timezone for the org (one of: "browser", "utc", or "").
address1
Optional - address1 of the org.
address2
Optional - address2 of the org.
city
Optional - city of the org.
zip_code
Optional - zip_code of the org.
address_state
Optional - state of the org.
country
Optional - country of the org.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
'''
if isinstance(profile, string_types):
profile = __salt__['config.option'](profile)
ret = {'name': name, 'result': None, 'comment': None, 'changes': {}}
create = False
try:
org = __salt__['grafana4.get_org'](name, profile)
except HTTPError as e:
if e.response.status_code == 404:
create = True
else:
raise
if create:
if __opts__['test']:
ret['comment'] = 'Org {0} will be created'.format(name)
return ret
__salt__['grafana4.create_org'](profile=profile, name=name)
org = __salt__['grafana4.get_org'](name, profile)
ret['changes'] = org
ret['comment'] = 'New org {0} added'.format(name)
data = _get_json_data(address1=address1, address2=address2,
city=city, zipCode=zip_code, state=address_state, country=country,
defaults=org['address'])
if data != org['address']:
if __opts__['test']:
ret['comment'] = 'Org {0} address will be updated'.format(name)
return ret
__salt__['grafana4.update_org_address'](name, profile=profile, **data)
if create:
dictupdate.update(ret['changes']['address'], data)
else:
dictupdate.update(ret['changes'], deep_diff(org['address'], data))
prefs = __salt__['grafana4.get_org_prefs'](name, profile=profile)
data = _get_json_data(theme=theme, homeDashboardId=home_dashboard_id,
timezone=timezone, defaults=prefs)
if data != prefs:
if __opts__['test']:
ret['comment'] = 'Org {0} prefs will be updated'.format(name)
return ret
__salt__['grafana4.update_org_prefs'](name, profile=profile, **data)
if create:
dictupdate.update(ret['changes'], data)
else:
dictupdate.update(ret['changes'], deep_diff(prefs, data))
if users:
db_users = {}
for item in __salt__['grafana4.get_org_users'](name, profile=profile):
db_users[item['login']] = {
'userId': item['userId'],
'role': item['role'],
}
for username, role in users.items():
if username in db_users:
if role is False:
if __opts__['test']:
ret['comment'] = 'Org {0} user {1} will be ' \
'deleted'.format(name, username)
return ret
__salt__['grafana4.delete_org_user'](
db_users[username]['userId'], profile=profile)
elif role != db_users[username]['role']:
if __opts__['test']:
ret['comment'] = 'Org {0} user {1} role will be ' \
'updated'.format(name, username)
return ret
__salt__['grafana4.update_org_user'](
db_users[username]['userId'], loginOrEmail=username,
role=role, profile=profile)
elif role:
if __opts__['test']:
ret['comment'] = 'Org {0} user {1} will be created'.format(
name, username)
return ret
__salt__['grafana4.create_org_user'](
loginOrEmail=username, role=role, profile=profile)
new_db_users = {}
for item in __salt__['grafana4.get_org_users'](name, profile=profile):
new_db_users[item['login']] = {
'userId': item['userId'],
'role': item['role'],
}
if create:
dictupdate.update(ret['changes'], new_db_users)
else:
dictupdate.update(ret['changes'], deep_diff(db_users, new_db_users))
ret['result'] = True
if not create:
if ret['changes']:
ret['comment'] = 'Org {0} updated'.format(name)
else:
ret['changes'] = {}
ret['comment'] = 'Org {0} already up-to-date'.format(name)
return ret
def _get_json_data(defaults=None, **kwargs):
if defaults is None:
defaults = {}
for k, v in kwargs.items():
if v is None:
kwargs[k] = defaults.get(k)
return kwargs
|
saltstack/salt
|
salt/states/ansiblegate.py
|
_changes
|
python
|
def _changes(plays):
'''
Find changes in ansible return data
'''
changes = {}
for play in plays['plays']:
task_changes = {}
for task in play['tasks']:
host_changes = {}
for host, data in six.iteritems(task['hosts']):
if data['changed'] is True:
host_changes[host] = data.get('diff', data.get('changes', {}))
if host_changes:
task_changes[task['task']['name']] = host_changes
if task_changes:
changes[play['play']['name']] = task_changes
return changes
|
Find changes in ansible return data
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/ansiblegate.py#L114-L130
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n"
] |
# -*- coding: utf-8 -*-
#
# Author: Bo Maryniuk <bo@suse.de>
#
# Copyright 2017 SUSE LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r'''
Execution of Ansible modules from within states
===============================================
With `ansible.call` these states allow individual Ansible module calls to be
made via states. To call an Ansible module function use a :mod:`module.run <salt.states.ansible.call>`
state:
.. code-block:: yaml
some_set_of_tasks:
ansible:
- system.ping
- packaging.os.zypper
- name: emacs
- state: installed
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import sys
# Import salt modules
import salt.fileclient
import salt.ext.six as six
from salt.utils.decorators import depends
import salt.utils.decorators.path
log = logging.getLogger(__name__)
__virtualname__ = 'ansible'
@depends('ansible')
class AnsibleState(object):
'''
Ansible state caller.
'''
def get_args(self, argset):
'''
Get args and kwargs from the argset.
:param argset:
:return:
'''
args = []
kwargs = {}
for element in argset or []:
if isinstance(element, dict):
kwargs.update(element)
else:
args.append(element)
return args, kwargs
def __call__(self, **kwargs):
'''
Call Ansible module.
:return:
'''
ret = {
'name': kwargs.pop('name'),
'changes': {},
'comment': '',
'result': True,
}
for mod_name, mod_params in kwargs.items():
args, kwargs = self.get_args(mod_params)
try:
ans_mod_out = __salt__['ansible.{0}'.format(mod_name)](**{'__pub_arg': [args, kwargs]})
except Exception as err:
ans_mod_out = 'Module "{0}" failed. Error message: ({1}) {2}'.format(
mod_name, err.__class__.__name__, err)
ret['result'] = False
ret['changes'][mod_name] = ans_mod_out
return ret
def __virtual__():
'''
Disable, if Ansible is not available around on the Minion.
'''
setattr(sys.modules[__name__], 'call', lambda **kwargs: AnsibleState()(**kwargs)) # pylint: disable=W0108
return __virtualname__
def _client():
'''
Get a fileclient
'''
return salt.fileclient.get_file_client(__opts__)
@salt.utils.decorators.path.which('ansible-playbook')
def playbooks(name, rundir=None, git_repo=None, git_kwargs=None, ansible_kwargs=None):
'''
Run Ansible Playbooks
:param name: path to playbook. This can be relative to rundir or the git repo
:param rundir: location to run ansible-playbook from.
:param git_repo: git repository to clone for ansible playbooks. This is cloned
using the `git.latest` state, and is cloned to the `rundir`
if specified, otherwise it is clone to the `cache_dir`
:param git_kwargs: extra kwargs to pass to `git.latest` state module besides
the `name` and `target`
:param ansible_kwargs: extra kwargs to pass to `ansible.playbooks` execution
module besides the `name` and `target`
:return: Ansible playbook output.
.. code-block:: yaml
run nginx install:
ansible.playbooks:
- name: install.yml
- git_repo: git://github.com/gituser/playbook.git
- git_kwargs:
rev: master
'''
ret = {
'result': False,
'changes': {},
'comment': 'Running playbook {0}'.format(name),
'name': name,
}
if git_repo:
if not isinstance(rundir, six.text_type) or not os.path.isdir(rundir):
rundir = _client()._extrn_path(git_repo, 'base')
log.trace('rundir set to %s', rundir)
if not isinstance(git_kwargs, dict):
log.debug('Setting git_kwargs to empty dict: %s', git_kwargs)
git_kwargs = {}
__states__['git.latest'](
name=git_repo,
target=rundir,
**git_kwargs
)
if not isinstance(ansible_kwargs, dict):
log.debug('Setting ansible_kwargs to empty dict: %s', ansible_kwargs)
ansible_kwargs = {}
checks = __salt__['ansible.playbooks'](name, rundir=rundir, check=True, diff=True, **ansible_kwargs)
if all(not check['changed'] for check in six.itervalues(checks['stats'])):
ret['comment'] = 'No changes to be made from playbook {0}'.format(name)
ret['result'] = True
elif __opts__['test']:
ret['comment'] = 'Changes will be made from playbook {0}'.format(name)
ret['result'] = None
ret['changes'] = _changes(checks)
else:
results = __salt__['ansible.playbooks'](name, rundir=rundir, diff=True, **ansible_kwargs)
ret['comment'] = 'Changes were made by playbook {0}'.format(name)
ret['changes'] = _changes(results)
ret['result'] = all(
not check['failures'] and not check['unreachable']
for check in six.itervalues(checks['stats'])
)
return ret
|
saltstack/salt
|
salt/states/ansiblegate.py
|
playbooks
|
python
|
def playbooks(name, rundir=None, git_repo=None, git_kwargs=None, ansible_kwargs=None):
'''
Run Ansible Playbooks
:param name: path to playbook. This can be relative to rundir or the git repo
:param rundir: location to run ansible-playbook from.
:param git_repo: git repository to clone for ansible playbooks. This is cloned
using the `git.latest` state, and is cloned to the `rundir`
if specified, otherwise it is clone to the `cache_dir`
:param git_kwargs: extra kwargs to pass to `git.latest` state module besides
the `name` and `target`
:param ansible_kwargs: extra kwargs to pass to `ansible.playbooks` execution
module besides the `name` and `target`
:return: Ansible playbook output.
.. code-block:: yaml
run nginx install:
ansible.playbooks:
- name: install.yml
- git_repo: git://github.com/gituser/playbook.git
- git_kwargs:
rev: master
'''
ret = {
'result': False,
'changes': {},
'comment': 'Running playbook {0}'.format(name),
'name': name,
}
if git_repo:
if not isinstance(rundir, six.text_type) or not os.path.isdir(rundir):
rundir = _client()._extrn_path(git_repo, 'base')
log.trace('rundir set to %s', rundir)
if not isinstance(git_kwargs, dict):
log.debug('Setting git_kwargs to empty dict: %s', git_kwargs)
git_kwargs = {}
__states__['git.latest'](
name=git_repo,
target=rundir,
**git_kwargs
)
if not isinstance(ansible_kwargs, dict):
log.debug('Setting ansible_kwargs to empty dict: %s', ansible_kwargs)
ansible_kwargs = {}
checks = __salt__['ansible.playbooks'](name, rundir=rundir, check=True, diff=True, **ansible_kwargs)
if all(not check['changed'] for check in six.itervalues(checks['stats'])):
ret['comment'] = 'No changes to be made from playbook {0}'.format(name)
ret['result'] = True
elif __opts__['test']:
ret['comment'] = 'Changes will be made from playbook {0}'.format(name)
ret['result'] = None
ret['changes'] = _changes(checks)
else:
results = __salt__['ansible.playbooks'](name, rundir=rundir, diff=True, **ansible_kwargs)
ret['comment'] = 'Changes were made by playbook {0}'.format(name)
ret['changes'] = _changes(results)
ret['result'] = all(
not check['failures'] and not check['unreachable']
for check in six.itervalues(checks['stats'])
)
return ret
|
Run Ansible Playbooks
:param name: path to playbook. This can be relative to rundir or the git repo
:param rundir: location to run ansible-playbook from.
:param git_repo: git repository to clone for ansible playbooks. This is cloned
using the `git.latest` state, and is cloned to the `rundir`
if specified, otherwise it is clone to the `cache_dir`
:param git_kwargs: extra kwargs to pass to `git.latest` state module besides
the `name` and `target`
:param ansible_kwargs: extra kwargs to pass to `ansible.playbooks` execution
module besides the `name` and `target`
:return: Ansible playbook output.
.. code-block:: yaml
run nginx install:
ansible.playbooks:
- name: install.yml
- git_repo: git://github.com/gituser/playbook.git
- git_kwargs:
rev: master
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/ansiblegate.py#L134-L196
|
[
"def itervalues(d, **kw):\n return d.itervalues(**kw)\n",
"def _client():\n '''\n Get a fileclient\n '''\n return salt.fileclient.get_file_client(__opts__)\n",
"def _changes(plays):\n '''\n Find changes in ansible return data\n '''\n changes = {}\n for play in plays['plays']:\n task_changes = {}\n for task in play['tasks']:\n host_changes = {}\n for host, data in six.iteritems(task['hosts']):\n if data['changed'] is True:\n host_changes[host] = data.get('diff', data.get('changes', {}))\n if host_changes:\n task_changes[task['task']['name']] = host_changes\n if task_changes:\n changes[play['play']['name']] = task_changes\n return changes\n"
] |
# -*- coding: utf-8 -*-
#
# Author: Bo Maryniuk <bo@suse.de>
#
# Copyright 2017 SUSE LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r'''
Execution of Ansible modules from within states
===============================================
With `ansible.call` these states allow individual Ansible module calls to be
made via states. To call an Ansible module function use a :mod:`module.run <salt.states.ansible.call>`
state:
.. code-block:: yaml
some_set_of_tasks:
ansible:
- system.ping
- packaging.os.zypper
- name: emacs
- state: installed
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import sys
# Import salt modules
import salt.fileclient
import salt.ext.six as six
from salt.utils.decorators import depends
import salt.utils.decorators.path
log = logging.getLogger(__name__)
__virtualname__ = 'ansible'
@depends('ansible')
class AnsibleState(object):
'''
Ansible state caller.
'''
def get_args(self, argset):
'''
Get args and kwargs from the argset.
:param argset:
:return:
'''
args = []
kwargs = {}
for element in argset or []:
if isinstance(element, dict):
kwargs.update(element)
else:
args.append(element)
return args, kwargs
def __call__(self, **kwargs):
'''
Call Ansible module.
:return:
'''
ret = {
'name': kwargs.pop('name'),
'changes': {},
'comment': '',
'result': True,
}
for mod_name, mod_params in kwargs.items():
args, kwargs = self.get_args(mod_params)
try:
ans_mod_out = __salt__['ansible.{0}'.format(mod_name)](**{'__pub_arg': [args, kwargs]})
except Exception as err:
ans_mod_out = 'Module "{0}" failed. Error message: ({1}) {2}'.format(
mod_name, err.__class__.__name__, err)
ret['result'] = False
ret['changes'][mod_name] = ans_mod_out
return ret
def __virtual__():
'''
Disable, if Ansible is not available around on the Minion.
'''
setattr(sys.modules[__name__], 'call', lambda **kwargs: AnsibleState()(**kwargs)) # pylint: disable=W0108
return __virtualname__
def _client():
'''
Get a fileclient
'''
return salt.fileclient.get_file_client(__opts__)
def _changes(plays):
'''
Find changes in ansible return data
'''
changes = {}
for play in plays['plays']:
task_changes = {}
for task in play['tasks']:
host_changes = {}
for host, data in six.iteritems(task['hosts']):
if data['changed'] is True:
host_changes[host] = data.get('diff', data.get('changes', {}))
if host_changes:
task_changes[task['task']['name']] = host_changes
if task_changes:
changes[play['play']['name']] = task_changes
return changes
@salt.utils.decorators.path.which('ansible-playbook')
|
saltstack/salt
|
salt/states/netsnmp.py
|
_clear_community_details
|
python
|
def _clear_community_details(community_details):
'''
Clears community details.
'''
for key in ['acl', 'mode']:
_str_elem(community_details, key)
_mode = community_details.get['mode'] = community_details.get('mode').lower()
if _mode in _COMMUNITY_MODE_MAP.keys():
community_details['mode'] = _COMMUNITY_MODE_MAP.get(_mode)
if community_details['mode'] not in ['ro', 'rw']:
community_details['mode'] = 'ro' # default is read-only
return community_details
|
Clears community details.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netsnmp.py#L113-L130
| null |
# -*- coding: utf-8 -*-
'''
Network SNMP
============
Manage the SNMP configuration on network devices.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`napalm snmp management module (salt.modules.napalm_snmp) <salt.modules.napalm_snmp>`
.. versionadded: 2016.11.0
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# salt lib
from salt.utils.json import loads, dumps
from salt.ext import six
# import NAPALM utils
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netsnmp'
_COMMUNITY_MODE_MAP = {
'read-only': 'ro',
'readonly': 'ro',
'read-write': 'rw',
'write': 'rw'
}
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _ordered_dict_to_dict(config):
'''
Forced the datatype to dict, in case OrderedDict is used.
'''
return loads(dumps(config))
def _expand_config(config, defaults):
'''
Completed the values of the expected config for the edge cases with the default values.
'''
defaults.update(config)
return defaults
def _valid_dict(dic):
'''
Valid dictionary?
'''
return isinstance(dic, dict) and len(dic) > 0
def _valid_str(value):
'''
Valid str?
'''
return isinstance(value, six.string_types) and len(value) > 0
def _community_defaults():
'''
Returns the default values of a community.
'''
return {
'mode': 'ro'
}
def _str_elem(config, key):
'''
Re-adds the value of a specific key in the dict, only in case of valid str value.
'''
_value = config.pop(key, '')
if _valid_str(_value):
config[key] = _value
def _check_config(config):
'''
Checks the desired config and clears interesting details.
'''
if not _valid_dict(config):
return True, ''
_community = config.get('community')
_community_tmp = {}
if not _community:
return False, 'Must specify at least a community.'
if _valid_str(_community):
_community_tmp[_community] = _community_defaults()
elif isinstance(_community, list):
# if the user specifies the communities as list
for _comm in _community:
if _valid_str(_comm):
# list of values
_community_tmp[_comm] = _community_defaults()
# default mode is read-only
if _valid_dict(_comm):
# list of dicts
for _comm_name, _comm_details in six.iteritems(_comm):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
elif _valid_dict(_community):
# directly as dict of communities
# recommended way...
for _comm_name, _comm_details in six.iteritems(_community):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
else:
return False, 'Please specify a community or a list of communities.'
if not _valid_dict(_community_tmp):
return False, 'Please specify at least a valid community!'
config['community'] = _community_tmp
for key in ['location', 'contact', 'chassis_id']:
# not mandatory, but should be here only if valid
_str_elem(config, key)
return True, ''
def _retrieve_device_config():
'''
Retrieves the SNMP config from the device.
'''
return __salt__['snmp.config']()
def _create_diff_action(diff, diff_key, key, value):
'''
DRY to build diff parts (added, removed, updated).
'''
if diff_key not in diff.keys():
diff[diff_key] = {}
diff[diff_key][key] = value
def _create_diff(diff, fun, key, prev, curr):
'''
Builds the diff dictionary.
'''
if not fun(prev):
_create_diff_action(diff, 'added', key, curr)
elif fun(prev) and not fun(curr):
_create_diff_action(diff, 'removed', key, prev)
elif not fun(curr):
_create_diff_action(diff, 'updated', key, curr)
def _compute_diff(existing, expected):
'''
Computes the differences between the existing and the expected SNMP config.
'''
diff = {}
for key in ['location', 'contact', 'chassis_id']:
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_str,
key,
existing.get(key),
expected.get(key))
for key in ['community']: # for the moment only onen
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_dict,
key,
existing.get(key),
expected.get(key))
return diff
def _configure(changes):
'''
Calls the configuration template to apply the configuration changes on the device.
'''
cfgred = True
reasons = []
fun = 'update_config'
for key in ['added', 'updated', 'removed']:
_updated_changes = changes.get(key, {})
if not _updated_changes:
continue
_location = _updated_changes.get('location', '')
_contact = _updated_changes.get('contact', '')
_community = _updated_changes.get('community', {})
_chassis_id = _updated_changes.get('chassis_id', '')
if key == 'removed':
fun = 'remove_config'
_ret = __salt__['snmp.{fun}'.format(fun=fun)](location=_location,
contact=_contact,
community=_community,
chassis_id=_chassis_id,
commit=False)
cfgred = cfgred and _ret.get('result')
if not _ret.get('result') and _ret.get('comment'):
reasons.append(_ret.get('comment'))
return {
'result': cfgred,
'comment': '\n'.join(reasons) if reasons else ''
}
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def managed(name, config=None, defaults=None):
'''
Configures the SNMP on the device as specified in the SLS file.
SLS Example:
.. code-block:: yaml
snmp_example:
netsnmp.managed:
- config:
location: Honolulu, HI, US
- defaults:
contact: noc@cloudflare.com
Output example (for the SLS above, e.g. called snmp.sls under /router/):
.. code-block:: bash
$ sudo salt edge01.hnl01 state.sls router.snmp test=True
edge01.hnl01:
----------
ID: snmp_example
Function: snmp.managed
Result: None
Comment: Testing mode: configuration was not changed!
Started: 13:29:06.872363
Duration: 920.466 ms
Changes:
----------
added:
----------
chassis_id:
None
contact:
noc@cloudflare.com
location:
Honolulu, HI, US
Summary for edge01.hnl01
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 920.466 ms
'''
result = False
comment = ''
changes = {}
ret = {
'name': name,
'changes': changes,
'result': result,
'comment': comment
}
# make sure we're working only with dict
config = _ordered_dict_to_dict(config)
defaults = _ordered_dict_to_dict(defaults)
expected_config = _expand_config(config, defaults)
if not isinstance(expected_config, dict):
ret['comment'] = 'User provided an empty SNMP config!'
return ret
valid, message = _check_config(expected_config)
if not valid: # check and clean
ret['comment'] = 'Please provide a valid configuration: {error}'.format(error=message)
return ret
# ----- Retrieve existing users configuration and determine differences ------------------------------------------->
_device_config = _retrieve_device_config()
if not _device_config.get('result'):
ret['comment'] = 'Cannot retrieve SNMP config from the device: {reason}'.format(
reason=_device_config.get('comment')
)
return ret
device_config = _device_config.get('out', {})
if device_config == expected_config:
ret.update({
'comment': 'SNMP already configured as needed.',
'result': True
})
return ret
diff = _compute_diff(device_config, expected_config)
changes.update(diff)
ret.update({
'changes': changes
})
if __opts__['test'] is True:
ret.update({
'result': None,
'comment': 'Testing mode: configuration was not changed!'
})
return ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_users and _delete_users as needed ------------------------------------------------------->
expected_config_change = False
result = True
if diff:
_configured = _configure(diff)
if _configured.get('result'):
expected_config_change = True
else: # something went wrong...
result = False
comment = 'Cannot push new SNMP config: \n{reason}'.format(
reason=_configured.get('comment')
) + comment
if expected_config_change:
result, comment = __salt__['net.config_control']()
# <---- Call _set_users and _delete_users as needed --------------------------------------------------------
ret.update({
'result': result,
'comment': comment
})
return ret
|
saltstack/salt
|
salt/states/netsnmp.py
|
_str_elem
|
python
|
def _str_elem(config, key):
'''
Re-adds the value of a specific key in the dict, only in case of valid str value.
'''
_value = config.pop(key, '')
if _valid_str(_value):
config[key] = _value
|
Re-adds the value of a specific key in the dict, only in case of valid str value.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netsnmp.py#L133-L141
| null |
# -*- coding: utf-8 -*-
'''
Network SNMP
============
Manage the SNMP configuration on network devices.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`napalm snmp management module (salt.modules.napalm_snmp) <salt.modules.napalm_snmp>`
.. versionadded: 2016.11.0
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# salt lib
from salt.utils.json import loads, dumps
from salt.ext import six
# import NAPALM utils
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netsnmp'
_COMMUNITY_MODE_MAP = {
'read-only': 'ro',
'readonly': 'ro',
'read-write': 'rw',
'write': 'rw'
}
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _ordered_dict_to_dict(config):
'''
Forced the datatype to dict, in case OrderedDict is used.
'''
return loads(dumps(config))
def _expand_config(config, defaults):
'''
Completed the values of the expected config for the edge cases with the default values.
'''
defaults.update(config)
return defaults
def _valid_dict(dic):
'''
Valid dictionary?
'''
return isinstance(dic, dict) and len(dic) > 0
def _valid_str(value):
'''
Valid str?
'''
return isinstance(value, six.string_types) and len(value) > 0
def _community_defaults():
'''
Returns the default values of a community.
'''
return {
'mode': 'ro'
}
def _clear_community_details(community_details):
'''
Clears community details.
'''
for key in ['acl', 'mode']:
_str_elem(community_details, key)
_mode = community_details.get['mode'] = community_details.get('mode').lower()
if _mode in _COMMUNITY_MODE_MAP.keys():
community_details['mode'] = _COMMUNITY_MODE_MAP.get(_mode)
if community_details['mode'] not in ['ro', 'rw']:
community_details['mode'] = 'ro' # default is read-only
return community_details
def _check_config(config):
'''
Checks the desired config and clears interesting details.
'''
if not _valid_dict(config):
return True, ''
_community = config.get('community')
_community_tmp = {}
if not _community:
return False, 'Must specify at least a community.'
if _valid_str(_community):
_community_tmp[_community] = _community_defaults()
elif isinstance(_community, list):
# if the user specifies the communities as list
for _comm in _community:
if _valid_str(_comm):
# list of values
_community_tmp[_comm] = _community_defaults()
# default mode is read-only
if _valid_dict(_comm):
# list of dicts
for _comm_name, _comm_details in six.iteritems(_comm):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
elif _valid_dict(_community):
# directly as dict of communities
# recommended way...
for _comm_name, _comm_details in six.iteritems(_community):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
else:
return False, 'Please specify a community or a list of communities.'
if not _valid_dict(_community_tmp):
return False, 'Please specify at least a valid community!'
config['community'] = _community_tmp
for key in ['location', 'contact', 'chassis_id']:
# not mandatory, but should be here only if valid
_str_elem(config, key)
return True, ''
def _retrieve_device_config():
'''
Retrieves the SNMP config from the device.
'''
return __salt__['snmp.config']()
def _create_diff_action(diff, diff_key, key, value):
'''
DRY to build diff parts (added, removed, updated).
'''
if diff_key not in diff.keys():
diff[diff_key] = {}
diff[diff_key][key] = value
def _create_diff(diff, fun, key, prev, curr):
'''
Builds the diff dictionary.
'''
if not fun(prev):
_create_diff_action(diff, 'added', key, curr)
elif fun(prev) and not fun(curr):
_create_diff_action(diff, 'removed', key, prev)
elif not fun(curr):
_create_diff_action(diff, 'updated', key, curr)
def _compute_diff(existing, expected):
'''
Computes the differences between the existing and the expected SNMP config.
'''
diff = {}
for key in ['location', 'contact', 'chassis_id']:
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_str,
key,
existing.get(key),
expected.get(key))
for key in ['community']: # for the moment only onen
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_dict,
key,
existing.get(key),
expected.get(key))
return diff
def _configure(changes):
'''
Calls the configuration template to apply the configuration changes on the device.
'''
cfgred = True
reasons = []
fun = 'update_config'
for key in ['added', 'updated', 'removed']:
_updated_changes = changes.get(key, {})
if not _updated_changes:
continue
_location = _updated_changes.get('location', '')
_contact = _updated_changes.get('contact', '')
_community = _updated_changes.get('community', {})
_chassis_id = _updated_changes.get('chassis_id', '')
if key == 'removed':
fun = 'remove_config'
_ret = __salt__['snmp.{fun}'.format(fun=fun)](location=_location,
contact=_contact,
community=_community,
chassis_id=_chassis_id,
commit=False)
cfgred = cfgred and _ret.get('result')
if not _ret.get('result') and _ret.get('comment'):
reasons.append(_ret.get('comment'))
return {
'result': cfgred,
'comment': '\n'.join(reasons) if reasons else ''
}
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def managed(name, config=None, defaults=None):
'''
Configures the SNMP on the device as specified in the SLS file.
SLS Example:
.. code-block:: yaml
snmp_example:
netsnmp.managed:
- config:
location: Honolulu, HI, US
- defaults:
contact: noc@cloudflare.com
Output example (for the SLS above, e.g. called snmp.sls under /router/):
.. code-block:: bash
$ sudo salt edge01.hnl01 state.sls router.snmp test=True
edge01.hnl01:
----------
ID: snmp_example
Function: snmp.managed
Result: None
Comment: Testing mode: configuration was not changed!
Started: 13:29:06.872363
Duration: 920.466 ms
Changes:
----------
added:
----------
chassis_id:
None
contact:
noc@cloudflare.com
location:
Honolulu, HI, US
Summary for edge01.hnl01
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 920.466 ms
'''
result = False
comment = ''
changes = {}
ret = {
'name': name,
'changes': changes,
'result': result,
'comment': comment
}
# make sure we're working only with dict
config = _ordered_dict_to_dict(config)
defaults = _ordered_dict_to_dict(defaults)
expected_config = _expand_config(config, defaults)
if not isinstance(expected_config, dict):
ret['comment'] = 'User provided an empty SNMP config!'
return ret
valid, message = _check_config(expected_config)
if not valid: # check and clean
ret['comment'] = 'Please provide a valid configuration: {error}'.format(error=message)
return ret
# ----- Retrieve existing users configuration and determine differences ------------------------------------------->
_device_config = _retrieve_device_config()
if not _device_config.get('result'):
ret['comment'] = 'Cannot retrieve SNMP config from the device: {reason}'.format(
reason=_device_config.get('comment')
)
return ret
device_config = _device_config.get('out', {})
if device_config == expected_config:
ret.update({
'comment': 'SNMP already configured as needed.',
'result': True
})
return ret
diff = _compute_diff(device_config, expected_config)
changes.update(diff)
ret.update({
'changes': changes
})
if __opts__['test'] is True:
ret.update({
'result': None,
'comment': 'Testing mode: configuration was not changed!'
})
return ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_users and _delete_users as needed ------------------------------------------------------->
expected_config_change = False
result = True
if diff:
_configured = _configure(diff)
if _configured.get('result'):
expected_config_change = True
else: # something went wrong...
result = False
comment = 'Cannot push new SNMP config: \n{reason}'.format(
reason=_configured.get('comment')
) + comment
if expected_config_change:
result, comment = __salt__['net.config_control']()
# <---- Call _set_users and _delete_users as needed --------------------------------------------------------
ret.update({
'result': result,
'comment': comment
})
return ret
|
saltstack/salt
|
salt/states/netsnmp.py
|
_check_config
|
python
|
def _check_config(config):
'''
Checks the desired config and clears interesting details.
'''
if not _valid_dict(config):
return True, ''
_community = config.get('community')
_community_tmp = {}
if not _community:
return False, 'Must specify at least a community.'
if _valid_str(_community):
_community_tmp[_community] = _community_defaults()
elif isinstance(_community, list):
# if the user specifies the communities as list
for _comm in _community:
if _valid_str(_comm):
# list of values
_community_tmp[_comm] = _community_defaults()
# default mode is read-only
if _valid_dict(_comm):
# list of dicts
for _comm_name, _comm_details in six.iteritems(_comm):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
elif _valid_dict(_community):
# directly as dict of communities
# recommended way...
for _comm_name, _comm_details in six.iteritems(_community):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
else:
return False, 'Please specify a community or a list of communities.'
if not _valid_dict(_community_tmp):
return False, 'Please specify at least a valid community!'
config['community'] = _community_tmp
for key in ['location', 'contact', 'chassis_id']:
# not mandatory, but should be here only if valid
_str_elem(config, key)
return True, ''
|
Checks the desired config and clears interesting details.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netsnmp.py#L144-L189
| null |
# -*- coding: utf-8 -*-
'''
Network SNMP
============
Manage the SNMP configuration on network devices.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`napalm snmp management module (salt.modules.napalm_snmp) <salt.modules.napalm_snmp>`
.. versionadded: 2016.11.0
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# salt lib
from salt.utils.json import loads, dumps
from salt.ext import six
# import NAPALM utils
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netsnmp'
_COMMUNITY_MODE_MAP = {
'read-only': 'ro',
'readonly': 'ro',
'read-write': 'rw',
'write': 'rw'
}
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _ordered_dict_to_dict(config):
'''
Forced the datatype to dict, in case OrderedDict is used.
'''
return loads(dumps(config))
def _expand_config(config, defaults):
'''
Completed the values of the expected config for the edge cases with the default values.
'''
defaults.update(config)
return defaults
def _valid_dict(dic):
'''
Valid dictionary?
'''
return isinstance(dic, dict) and len(dic) > 0
def _valid_str(value):
'''
Valid str?
'''
return isinstance(value, six.string_types) and len(value) > 0
def _community_defaults():
'''
Returns the default values of a community.
'''
return {
'mode': 'ro'
}
def _clear_community_details(community_details):
'''
Clears community details.
'''
for key in ['acl', 'mode']:
_str_elem(community_details, key)
_mode = community_details.get['mode'] = community_details.get('mode').lower()
if _mode in _COMMUNITY_MODE_MAP.keys():
community_details['mode'] = _COMMUNITY_MODE_MAP.get(_mode)
if community_details['mode'] not in ['ro', 'rw']:
community_details['mode'] = 'ro' # default is read-only
return community_details
def _str_elem(config, key):
'''
Re-adds the value of a specific key in the dict, only in case of valid str value.
'''
_value = config.pop(key, '')
if _valid_str(_value):
config[key] = _value
def _retrieve_device_config():
'''
Retrieves the SNMP config from the device.
'''
return __salt__['snmp.config']()
def _create_diff_action(diff, diff_key, key, value):
'''
DRY to build diff parts (added, removed, updated).
'''
if diff_key not in diff.keys():
diff[diff_key] = {}
diff[diff_key][key] = value
def _create_diff(diff, fun, key, prev, curr):
'''
Builds the diff dictionary.
'''
if not fun(prev):
_create_diff_action(diff, 'added', key, curr)
elif fun(prev) and not fun(curr):
_create_diff_action(diff, 'removed', key, prev)
elif not fun(curr):
_create_diff_action(diff, 'updated', key, curr)
def _compute_diff(existing, expected):
'''
Computes the differences between the existing and the expected SNMP config.
'''
diff = {}
for key in ['location', 'contact', 'chassis_id']:
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_str,
key,
existing.get(key),
expected.get(key))
for key in ['community']: # for the moment only onen
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_dict,
key,
existing.get(key),
expected.get(key))
return diff
def _configure(changes):
'''
Calls the configuration template to apply the configuration changes on the device.
'''
cfgred = True
reasons = []
fun = 'update_config'
for key in ['added', 'updated', 'removed']:
_updated_changes = changes.get(key, {})
if not _updated_changes:
continue
_location = _updated_changes.get('location', '')
_contact = _updated_changes.get('contact', '')
_community = _updated_changes.get('community', {})
_chassis_id = _updated_changes.get('chassis_id', '')
if key == 'removed':
fun = 'remove_config'
_ret = __salt__['snmp.{fun}'.format(fun=fun)](location=_location,
contact=_contact,
community=_community,
chassis_id=_chassis_id,
commit=False)
cfgred = cfgred and _ret.get('result')
if not _ret.get('result') and _ret.get('comment'):
reasons.append(_ret.get('comment'))
return {
'result': cfgred,
'comment': '\n'.join(reasons) if reasons else ''
}
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def managed(name, config=None, defaults=None):
'''
Configures the SNMP on the device as specified in the SLS file.
SLS Example:
.. code-block:: yaml
snmp_example:
netsnmp.managed:
- config:
location: Honolulu, HI, US
- defaults:
contact: noc@cloudflare.com
Output example (for the SLS above, e.g. called snmp.sls under /router/):
.. code-block:: bash
$ sudo salt edge01.hnl01 state.sls router.snmp test=True
edge01.hnl01:
----------
ID: snmp_example
Function: snmp.managed
Result: None
Comment: Testing mode: configuration was not changed!
Started: 13:29:06.872363
Duration: 920.466 ms
Changes:
----------
added:
----------
chassis_id:
None
contact:
noc@cloudflare.com
location:
Honolulu, HI, US
Summary for edge01.hnl01
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 920.466 ms
'''
result = False
comment = ''
changes = {}
ret = {
'name': name,
'changes': changes,
'result': result,
'comment': comment
}
# make sure we're working only with dict
config = _ordered_dict_to_dict(config)
defaults = _ordered_dict_to_dict(defaults)
expected_config = _expand_config(config, defaults)
if not isinstance(expected_config, dict):
ret['comment'] = 'User provided an empty SNMP config!'
return ret
valid, message = _check_config(expected_config)
if not valid: # check and clean
ret['comment'] = 'Please provide a valid configuration: {error}'.format(error=message)
return ret
# ----- Retrieve existing users configuration and determine differences ------------------------------------------->
_device_config = _retrieve_device_config()
if not _device_config.get('result'):
ret['comment'] = 'Cannot retrieve SNMP config from the device: {reason}'.format(
reason=_device_config.get('comment')
)
return ret
device_config = _device_config.get('out', {})
if device_config == expected_config:
ret.update({
'comment': 'SNMP already configured as needed.',
'result': True
})
return ret
diff = _compute_diff(device_config, expected_config)
changes.update(diff)
ret.update({
'changes': changes
})
if __opts__['test'] is True:
ret.update({
'result': None,
'comment': 'Testing mode: configuration was not changed!'
})
return ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_users and _delete_users as needed ------------------------------------------------------->
expected_config_change = False
result = True
if diff:
_configured = _configure(diff)
if _configured.get('result'):
expected_config_change = True
else: # something went wrong...
result = False
comment = 'Cannot push new SNMP config: \n{reason}'.format(
reason=_configured.get('comment')
) + comment
if expected_config_change:
result, comment = __salt__['net.config_control']()
# <---- Call _set_users and _delete_users as needed --------------------------------------------------------
ret.update({
'result': result,
'comment': comment
})
return ret
|
saltstack/salt
|
salt/states/netsnmp.py
|
_create_diff_action
|
python
|
def _create_diff_action(diff, diff_key, key, value):
'''
DRY to build diff parts (added, removed, updated).
'''
if diff_key not in diff.keys():
diff[diff_key] = {}
diff[diff_key][key] = value
|
DRY to build diff parts (added, removed, updated).
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netsnmp.py#L201-L209
| null |
# -*- coding: utf-8 -*-
'''
Network SNMP
============
Manage the SNMP configuration on network devices.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`napalm snmp management module (salt.modules.napalm_snmp) <salt.modules.napalm_snmp>`
.. versionadded: 2016.11.0
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# salt lib
from salt.utils.json import loads, dumps
from salt.ext import six
# import NAPALM utils
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netsnmp'
_COMMUNITY_MODE_MAP = {
'read-only': 'ro',
'readonly': 'ro',
'read-write': 'rw',
'write': 'rw'
}
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _ordered_dict_to_dict(config):
'''
Forced the datatype to dict, in case OrderedDict is used.
'''
return loads(dumps(config))
def _expand_config(config, defaults):
'''
Completed the values of the expected config for the edge cases with the default values.
'''
defaults.update(config)
return defaults
def _valid_dict(dic):
'''
Valid dictionary?
'''
return isinstance(dic, dict) and len(dic) > 0
def _valid_str(value):
'''
Valid str?
'''
return isinstance(value, six.string_types) and len(value) > 0
def _community_defaults():
'''
Returns the default values of a community.
'''
return {
'mode': 'ro'
}
def _clear_community_details(community_details):
'''
Clears community details.
'''
for key in ['acl', 'mode']:
_str_elem(community_details, key)
_mode = community_details.get['mode'] = community_details.get('mode').lower()
if _mode in _COMMUNITY_MODE_MAP.keys():
community_details['mode'] = _COMMUNITY_MODE_MAP.get(_mode)
if community_details['mode'] not in ['ro', 'rw']:
community_details['mode'] = 'ro' # default is read-only
return community_details
def _str_elem(config, key):
'''
Re-adds the value of a specific key in the dict, only in case of valid str value.
'''
_value = config.pop(key, '')
if _valid_str(_value):
config[key] = _value
def _check_config(config):
'''
Checks the desired config and clears interesting details.
'''
if not _valid_dict(config):
return True, ''
_community = config.get('community')
_community_tmp = {}
if not _community:
return False, 'Must specify at least a community.'
if _valid_str(_community):
_community_tmp[_community] = _community_defaults()
elif isinstance(_community, list):
# if the user specifies the communities as list
for _comm in _community:
if _valid_str(_comm):
# list of values
_community_tmp[_comm] = _community_defaults()
# default mode is read-only
if _valid_dict(_comm):
# list of dicts
for _comm_name, _comm_details in six.iteritems(_comm):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
elif _valid_dict(_community):
# directly as dict of communities
# recommended way...
for _comm_name, _comm_details in six.iteritems(_community):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
else:
return False, 'Please specify a community or a list of communities.'
if not _valid_dict(_community_tmp):
return False, 'Please specify at least a valid community!'
config['community'] = _community_tmp
for key in ['location', 'contact', 'chassis_id']:
# not mandatory, but should be here only if valid
_str_elem(config, key)
return True, ''
def _retrieve_device_config():
'''
Retrieves the SNMP config from the device.
'''
return __salt__['snmp.config']()
def _create_diff(diff, fun, key, prev, curr):
'''
Builds the diff dictionary.
'''
if not fun(prev):
_create_diff_action(diff, 'added', key, curr)
elif fun(prev) and not fun(curr):
_create_diff_action(diff, 'removed', key, prev)
elif not fun(curr):
_create_diff_action(diff, 'updated', key, curr)
def _compute_diff(existing, expected):
'''
Computes the differences between the existing and the expected SNMP config.
'''
diff = {}
for key in ['location', 'contact', 'chassis_id']:
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_str,
key,
existing.get(key),
expected.get(key))
for key in ['community']: # for the moment only onen
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_dict,
key,
existing.get(key),
expected.get(key))
return diff
def _configure(changes):
'''
Calls the configuration template to apply the configuration changes on the device.
'''
cfgred = True
reasons = []
fun = 'update_config'
for key in ['added', 'updated', 'removed']:
_updated_changes = changes.get(key, {})
if not _updated_changes:
continue
_location = _updated_changes.get('location', '')
_contact = _updated_changes.get('contact', '')
_community = _updated_changes.get('community', {})
_chassis_id = _updated_changes.get('chassis_id', '')
if key == 'removed':
fun = 'remove_config'
_ret = __salt__['snmp.{fun}'.format(fun=fun)](location=_location,
contact=_contact,
community=_community,
chassis_id=_chassis_id,
commit=False)
cfgred = cfgred and _ret.get('result')
if not _ret.get('result') and _ret.get('comment'):
reasons.append(_ret.get('comment'))
return {
'result': cfgred,
'comment': '\n'.join(reasons) if reasons else ''
}
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def managed(name, config=None, defaults=None):
'''
Configures the SNMP on the device as specified in the SLS file.
SLS Example:
.. code-block:: yaml
snmp_example:
netsnmp.managed:
- config:
location: Honolulu, HI, US
- defaults:
contact: noc@cloudflare.com
Output example (for the SLS above, e.g. called snmp.sls under /router/):
.. code-block:: bash
$ sudo salt edge01.hnl01 state.sls router.snmp test=True
edge01.hnl01:
----------
ID: snmp_example
Function: snmp.managed
Result: None
Comment: Testing mode: configuration was not changed!
Started: 13:29:06.872363
Duration: 920.466 ms
Changes:
----------
added:
----------
chassis_id:
None
contact:
noc@cloudflare.com
location:
Honolulu, HI, US
Summary for edge01.hnl01
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 920.466 ms
'''
result = False
comment = ''
changes = {}
ret = {
'name': name,
'changes': changes,
'result': result,
'comment': comment
}
# make sure we're working only with dict
config = _ordered_dict_to_dict(config)
defaults = _ordered_dict_to_dict(defaults)
expected_config = _expand_config(config, defaults)
if not isinstance(expected_config, dict):
ret['comment'] = 'User provided an empty SNMP config!'
return ret
valid, message = _check_config(expected_config)
if not valid: # check and clean
ret['comment'] = 'Please provide a valid configuration: {error}'.format(error=message)
return ret
# ----- Retrieve existing users configuration and determine differences ------------------------------------------->
_device_config = _retrieve_device_config()
if not _device_config.get('result'):
ret['comment'] = 'Cannot retrieve SNMP config from the device: {reason}'.format(
reason=_device_config.get('comment')
)
return ret
device_config = _device_config.get('out', {})
if device_config == expected_config:
ret.update({
'comment': 'SNMP already configured as needed.',
'result': True
})
return ret
diff = _compute_diff(device_config, expected_config)
changes.update(diff)
ret.update({
'changes': changes
})
if __opts__['test'] is True:
ret.update({
'result': None,
'comment': 'Testing mode: configuration was not changed!'
})
return ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_users and _delete_users as needed ------------------------------------------------------->
expected_config_change = False
result = True
if diff:
_configured = _configure(diff)
if _configured.get('result'):
expected_config_change = True
else: # something went wrong...
result = False
comment = 'Cannot push new SNMP config: \n{reason}'.format(
reason=_configured.get('comment')
) + comment
if expected_config_change:
result, comment = __salt__['net.config_control']()
# <---- Call _set_users and _delete_users as needed --------------------------------------------------------
ret.update({
'result': result,
'comment': comment
})
return ret
|
saltstack/salt
|
salt/states/netsnmp.py
|
_create_diff
|
python
|
def _create_diff(diff, fun, key, prev, curr):
'''
Builds the diff dictionary.
'''
if not fun(prev):
_create_diff_action(diff, 'added', key, curr)
elif fun(prev) and not fun(curr):
_create_diff_action(diff, 'removed', key, prev)
elif not fun(curr):
_create_diff_action(diff, 'updated', key, curr)
|
Builds the diff dictionary.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netsnmp.py#L212-L223
| null |
# -*- coding: utf-8 -*-
'''
Network SNMP
============
Manage the SNMP configuration on network devices.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`napalm snmp management module (salt.modules.napalm_snmp) <salt.modules.napalm_snmp>`
.. versionadded: 2016.11.0
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# salt lib
from salt.utils.json import loads, dumps
from salt.ext import six
# import NAPALM utils
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netsnmp'
_COMMUNITY_MODE_MAP = {
'read-only': 'ro',
'readonly': 'ro',
'read-write': 'rw',
'write': 'rw'
}
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _ordered_dict_to_dict(config):
'''
Forced the datatype to dict, in case OrderedDict is used.
'''
return loads(dumps(config))
def _expand_config(config, defaults):
'''
Completed the values of the expected config for the edge cases with the default values.
'''
defaults.update(config)
return defaults
def _valid_dict(dic):
'''
Valid dictionary?
'''
return isinstance(dic, dict) and len(dic) > 0
def _valid_str(value):
'''
Valid str?
'''
return isinstance(value, six.string_types) and len(value) > 0
def _community_defaults():
'''
Returns the default values of a community.
'''
return {
'mode': 'ro'
}
def _clear_community_details(community_details):
'''
Clears community details.
'''
for key in ['acl', 'mode']:
_str_elem(community_details, key)
_mode = community_details.get['mode'] = community_details.get('mode').lower()
if _mode in _COMMUNITY_MODE_MAP.keys():
community_details['mode'] = _COMMUNITY_MODE_MAP.get(_mode)
if community_details['mode'] not in ['ro', 'rw']:
community_details['mode'] = 'ro' # default is read-only
return community_details
def _str_elem(config, key):
'''
Re-adds the value of a specific key in the dict, only in case of valid str value.
'''
_value = config.pop(key, '')
if _valid_str(_value):
config[key] = _value
def _check_config(config):
'''
Checks the desired config and clears interesting details.
'''
if not _valid_dict(config):
return True, ''
_community = config.get('community')
_community_tmp = {}
if not _community:
return False, 'Must specify at least a community.'
if _valid_str(_community):
_community_tmp[_community] = _community_defaults()
elif isinstance(_community, list):
# if the user specifies the communities as list
for _comm in _community:
if _valid_str(_comm):
# list of values
_community_tmp[_comm] = _community_defaults()
# default mode is read-only
if _valid_dict(_comm):
# list of dicts
for _comm_name, _comm_details in six.iteritems(_comm):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
elif _valid_dict(_community):
# directly as dict of communities
# recommended way...
for _comm_name, _comm_details in six.iteritems(_community):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
else:
return False, 'Please specify a community or a list of communities.'
if not _valid_dict(_community_tmp):
return False, 'Please specify at least a valid community!'
config['community'] = _community_tmp
for key in ['location', 'contact', 'chassis_id']:
# not mandatory, but should be here only if valid
_str_elem(config, key)
return True, ''
def _retrieve_device_config():
'''
Retrieves the SNMP config from the device.
'''
return __salt__['snmp.config']()
def _create_diff_action(diff, diff_key, key, value):
'''
DRY to build diff parts (added, removed, updated).
'''
if diff_key not in diff.keys():
diff[diff_key] = {}
diff[diff_key][key] = value
def _compute_diff(existing, expected):
'''
Computes the differences between the existing and the expected SNMP config.
'''
diff = {}
for key in ['location', 'contact', 'chassis_id']:
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_str,
key,
existing.get(key),
expected.get(key))
for key in ['community']: # for the moment only onen
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_dict,
key,
existing.get(key),
expected.get(key))
return diff
def _configure(changes):
'''
Calls the configuration template to apply the configuration changes on the device.
'''
cfgred = True
reasons = []
fun = 'update_config'
for key in ['added', 'updated', 'removed']:
_updated_changes = changes.get(key, {})
if not _updated_changes:
continue
_location = _updated_changes.get('location', '')
_contact = _updated_changes.get('contact', '')
_community = _updated_changes.get('community', {})
_chassis_id = _updated_changes.get('chassis_id', '')
if key == 'removed':
fun = 'remove_config'
_ret = __salt__['snmp.{fun}'.format(fun=fun)](location=_location,
contact=_contact,
community=_community,
chassis_id=_chassis_id,
commit=False)
cfgred = cfgred and _ret.get('result')
if not _ret.get('result') and _ret.get('comment'):
reasons.append(_ret.get('comment'))
return {
'result': cfgred,
'comment': '\n'.join(reasons) if reasons else ''
}
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def managed(name, config=None, defaults=None):
'''
Configures the SNMP on the device as specified in the SLS file.
SLS Example:
.. code-block:: yaml
snmp_example:
netsnmp.managed:
- config:
location: Honolulu, HI, US
- defaults:
contact: noc@cloudflare.com
Output example (for the SLS above, e.g. called snmp.sls under /router/):
.. code-block:: bash
$ sudo salt edge01.hnl01 state.sls router.snmp test=True
edge01.hnl01:
----------
ID: snmp_example
Function: snmp.managed
Result: None
Comment: Testing mode: configuration was not changed!
Started: 13:29:06.872363
Duration: 920.466 ms
Changes:
----------
added:
----------
chassis_id:
None
contact:
noc@cloudflare.com
location:
Honolulu, HI, US
Summary for edge01.hnl01
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 920.466 ms
'''
result = False
comment = ''
changes = {}
ret = {
'name': name,
'changes': changes,
'result': result,
'comment': comment
}
# make sure we're working only with dict
config = _ordered_dict_to_dict(config)
defaults = _ordered_dict_to_dict(defaults)
expected_config = _expand_config(config, defaults)
if not isinstance(expected_config, dict):
ret['comment'] = 'User provided an empty SNMP config!'
return ret
valid, message = _check_config(expected_config)
if not valid: # check and clean
ret['comment'] = 'Please provide a valid configuration: {error}'.format(error=message)
return ret
# ----- Retrieve existing users configuration and determine differences ------------------------------------------->
_device_config = _retrieve_device_config()
if not _device_config.get('result'):
ret['comment'] = 'Cannot retrieve SNMP config from the device: {reason}'.format(
reason=_device_config.get('comment')
)
return ret
device_config = _device_config.get('out', {})
if device_config == expected_config:
ret.update({
'comment': 'SNMP already configured as needed.',
'result': True
})
return ret
diff = _compute_diff(device_config, expected_config)
changes.update(diff)
ret.update({
'changes': changes
})
if __opts__['test'] is True:
ret.update({
'result': None,
'comment': 'Testing mode: configuration was not changed!'
})
return ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_users and _delete_users as needed ------------------------------------------------------->
expected_config_change = False
result = True
if diff:
_configured = _configure(diff)
if _configured.get('result'):
expected_config_change = True
else: # something went wrong...
result = False
comment = 'Cannot push new SNMP config: \n{reason}'.format(
reason=_configured.get('comment')
) + comment
if expected_config_change:
result, comment = __salt__['net.config_control']()
# <---- Call _set_users and _delete_users as needed --------------------------------------------------------
ret.update({
'result': result,
'comment': comment
})
return ret
|
saltstack/salt
|
salt/states/netsnmp.py
|
_compute_diff
|
python
|
def _compute_diff(existing, expected):
'''
Computes the differences between the existing and the expected SNMP config.
'''
diff = {}
for key in ['location', 'contact', 'chassis_id']:
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_str,
key,
existing.get(key),
expected.get(key))
for key in ['community']: # for the moment only onen
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_dict,
key,
existing.get(key),
expected.get(key))
return diff
|
Computes the differences between the existing and the expected SNMP config.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netsnmp.py#L226-L250
| null |
# -*- coding: utf-8 -*-
'''
Network SNMP
============
Manage the SNMP configuration on network devices.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`napalm snmp management module (salt.modules.napalm_snmp) <salt.modules.napalm_snmp>`
.. versionadded: 2016.11.0
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# salt lib
from salt.utils.json import loads, dumps
from salt.ext import six
# import NAPALM utils
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netsnmp'
_COMMUNITY_MODE_MAP = {
'read-only': 'ro',
'readonly': 'ro',
'read-write': 'rw',
'write': 'rw'
}
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _ordered_dict_to_dict(config):
'''
Forced the datatype to dict, in case OrderedDict is used.
'''
return loads(dumps(config))
def _expand_config(config, defaults):
'''
Completed the values of the expected config for the edge cases with the default values.
'''
defaults.update(config)
return defaults
def _valid_dict(dic):
'''
Valid dictionary?
'''
return isinstance(dic, dict) and len(dic) > 0
def _valid_str(value):
'''
Valid str?
'''
return isinstance(value, six.string_types) and len(value) > 0
def _community_defaults():
'''
Returns the default values of a community.
'''
return {
'mode': 'ro'
}
def _clear_community_details(community_details):
'''
Clears community details.
'''
for key in ['acl', 'mode']:
_str_elem(community_details, key)
_mode = community_details.get['mode'] = community_details.get('mode').lower()
if _mode in _COMMUNITY_MODE_MAP.keys():
community_details['mode'] = _COMMUNITY_MODE_MAP.get(_mode)
if community_details['mode'] not in ['ro', 'rw']:
community_details['mode'] = 'ro' # default is read-only
return community_details
def _str_elem(config, key):
'''
Re-adds the value of a specific key in the dict, only in case of valid str value.
'''
_value = config.pop(key, '')
if _valid_str(_value):
config[key] = _value
def _check_config(config):
'''
Checks the desired config and clears interesting details.
'''
if not _valid_dict(config):
return True, ''
_community = config.get('community')
_community_tmp = {}
if not _community:
return False, 'Must specify at least a community.'
if _valid_str(_community):
_community_tmp[_community] = _community_defaults()
elif isinstance(_community, list):
# if the user specifies the communities as list
for _comm in _community:
if _valid_str(_comm):
# list of values
_community_tmp[_comm] = _community_defaults()
# default mode is read-only
if _valid_dict(_comm):
# list of dicts
for _comm_name, _comm_details in six.iteritems(_comm):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
elif _valid_dict(_community):
# directly as dict of communities
# recommended way...
for _comm_name, _comm_details in six.iteritems(_community):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
else:
return False, 'Please specify a community or a list of communities.'
if not _valid_dict(_community_tmp):
return False, 'Please specify at least a valid community!'
config['community'] = _community_tmp
for key in ['location', 'contact', 'chassis_id']:
# not mandatory, but should be here only if valid
_str_elem(config, key)
return True, ''
def _retrieve_device_config():
'''
Retrieves the SNMP config from the device.
'''
return __salt__['snmp.config']()
def _create_diff_action(diff, diff_key, key, value):
'''
DRY to build diff parts (added, removed, updated).
'''
if diff_key not in diff.keys():
diff[diff_key] = {}
diff[diff_key][key] = value
def _create_diff(diff, fun, key, prev, curr):
'''
Builds the diff dictionary.
'''
if not fun(prev):
_create_diff_action(diff, 'added', key, curr)
elif fun(prev) and not fun(curr):
_create_diff_action(diff, 'removed', key, prev)
elif not fun(curr):
_create_diff_action(diff, 'updated', key, curr)
def _configure(changes):
'''
Calls the configuration template to apply the configuration changes on the device.
'''
cfgred = True
reasons = []
fun = 'update_config'
for key in ['added', 'updated', 'removed']:
_updated_changes = changes.get(key, {})
if not _updated_changes:
continue
_location = _updated_changes.get('location', '')
_contact = _updated_changes.get('contact', '')
_community = _updated_changes.get('community', {})
_chassis_id = _updated_changes.get('chassis_id', '')
if key == 'removed':
fun = 'remove_config'
_ret = __salt__['snmp.{fun}'.format(fun=fun)](location=_location,
contact=_contact,
community=_community,
chassis_id=_chassis_id,
commit=False)
cfgred = cfgred and _ret.get('result')
if not _ret.get('result') and _ret.get('comment'):
reasons.append(_ret.get('comment'))
return {
'result': cfgred,
'comment': '\n'.join(reasons) if reasons else ''
}
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def managed(name, config=None, defaults=None):
'''
Configures the SNMP on the device as specified in the SLS file.
SLS Example:
.. code-block:: yaml
snmp_example:
netsnmp.managed:
- config:
location: Honolulu, HI, US
- defaults:
contact: noc@cloudflare.com
Output example (for the SLS above, e.g. called snmp.sls under /router/):
.. code-block:: bash
$ sudo salt edge01.hnl01 state.sls router.snmp test=True
edge01.hnl01:
----------
ID: snmp_example
Function: snmp.managed
Result: None
Comment: Testing mode: configuration was not changed!
Started: 13:29:06.872363
Duration: 920.466 ms
Changes:
----------
added:
----------
chassis_id:
None
contact:
noc@cloudflare.com
location:
Honolulu, HI, US
Summary for edge01.hnl01
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 920.466 ms
'''
result = False
comment = ''
changes = {}
ret = {
'name': name,
'changes': changes,
'result': result,
'comment': comment
}
# make sure we're working only with dict
config = _ordered_dict_to_dict(config)
defaults = _ordered_dict_to_dict(defaults)
expected_config = _expand_config(config, defaults)
if not isinstance(expected_config, dict):
ret['comment'] = 'User provided an empty SNMP config!'
return ret
valid, message = _check_config(expected_config)
if not valid: # check and clean
ret['comment'] = 'Please provide a valid configuration: {error}'.format(error=message)
return ret
# ----- Retrieve existing users configuration and determine differences ------------------------------------------->
_device_config = _retrieve_device_config()
if not _device_config.get('result'):
ret['comment'] = 'Cannot retrieve SNMP config from the device: {reason}'.format(
reason=_device_config.get('comment')
)
return ret
device_config = _device_config.get('out', {})
if device_config == expected_config:
ret.update({
'comment': 'SNMP already configured as needed.',
'result': True
})
return ret
diff = _compute_diff(device_config, expected_config)
changes.update(diff)
ret.update({
'changes': changes
})
if __opts__['test'] is True:
ret.update({
'result': None,
'comment': 'Testing mode: configuration was not changed!'
})
return ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_users and _delete_users as needed ------------------------------------------------------->
expected_config_change = False
result = True
if diff:
_configured = _configure(diff)
if _configured.get('result'):
expected_config_change = True
else: # something went wrong...
result = False
comment = 'Cannot push new SNMP config: \n{reason}'.format(
reason=_configured.get('comment')
) + comment
if expected_config_change:
result, comment = __salt__['net.config_control']()
# <---- Call _set_users and _delete_users as needed --------------------------------------------------------
ret.update({
'result': result,
'comment': comment
})
return ret
|
saltstack/salt
|
salt/states/netsnmp.py
|
_configure
|
python
|
def _configure(changes):
'''
Calls the configuration template to apply the configuration changes on the device.
'''
cfgred = True
reasons = []
fun = 'update_config'
for key in ['added', 'updated', 'removed']:
_updated_changes = changes.get(key, {})
if not _updated_changes:
continue
_location = _updated_changes.get('location', '')
_contact = _updated_changes.get('contact', '')
_community = _updated_changes.get('community', {})
_chassis_id = _updated_changes.get('chassis_id', '')
if key == 'removed':
fun = 'remove_config'
_ret = __salt__['snmp.{fun}'.format(fun=fun)](location=_location,
contact=_contact,
community=_community,
chassis_id=_chassis_id,
commit=False)
cfgred = cfgred and _ret.get('result')
if not _ret.get('result') and _ret.get('comment'):
reasons.append(_ret.get('comment'))
return {
'result': cfgred,
'comment': '\n'.join(reasons) if reasons else ''
}
|
Calls the configuration template to apply the configuration changes on the device.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netsnmp.py#L253-L285
| null |
# -*- coding: utf-8 -*-
'''
Network SNMP
============
Manage the SNMP configuration on network devices.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`napalm snmp management module (salt.modules.napalm_snmp) <salt.modules.napalm_snmp>`
.. versionadded: 2016.11.0
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# salt lib
from salt.utils.json import loads, dumps
from salt.ext import six
# import NAPALM utils
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netsnmp'
_COMMUNITY_MODE_MAP = {
'read-only': 'ro',
'readonly': 'ro',
'read-write': 'rw',
'write': 'rw'
}
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _ordered_dict_to_dict(config):
'''
Forced the datatype to dict, in case OrderedDict is used.
'''
return loads(dumps(config))
def _expand_config(config, defaults):
'''
Completed the values of the expected config for the edge cases with the default values.
'''
defaults.update(config)
return defaults
def _valid_dict(dic):
'''
Valid dictionary?
'''
return isinstance(dic, dict) and len(dic) > 0
def _valid_str(value):
'''
Valid str?
'''
return isinstance(value, six.string_types) and len(value) > 0
def _community_defaults():
'''
Returns the default values of a community.
'''
return {
'mode': 'ro'
}
def _clear_community_details(community_details):
'''
Clears community details.
'''
for key in ['acl', 'mode']:
_str_elem(community_details, key)
_mode = community_details.get['mode'] = community_details.get('mode').lower()
if _mode in _COMMUNITY_MODE_MAP.keys():
community_details['mode'] = _COMMUNITY_MODE_MAP.get(_mode)
if community_details['mode'] not in ['ro', 'rw']:
community_details['mode'] = 'ro' # default is read-only
return community_details
def _str_elem(config, key):
'''
Re-adds the value of a specific key in the dict, only in case of valid str value.
'''
_value = config.pop(key, '')
if _valid_str(_value):
config[key] = _value
def _check_config(config):
'''
Checks the desired config and clears interesting details.
'''
if not _valid_dict(config):
return True, ''
_community = config.get('community')
_community_tmp = {}
if not _community:
return False, 'Must specify at least a community.'
if _valid_str(_community):
_community_tmp[_community] = _community_defaults()
elif isinstance(_community, list):
# if the user specifies the communities as list
for _comm in _community:
if _valid_str(_comm):
# list of values
_community_tmp[_comm] = _community_defaults()
# default mode is read-only
if _valid_dict(_comm):
# list of dicts
for _comm_name, _comm_details in six.iteritems(_comm):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
elif _valid_dict(_community):
# directly as dict of communities
# recommended way...
for _comm_name, _comm_details in six.iteritems(_community):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
else:
return False, 'Please specify a community or a list of communities.'
if not _valid_dict(_community_tmp):
return False, 'Please specify at least a valid community!'
config['community'] = _community_tmp
for key in ['location', 'contact', 'chassis_id']:
# not mandatory, but should be here only if valid
_str_elem(config, key)
return True, ''
def _retrieve_device_config():
'''
Retrieves the SNMP config from the device.
'''
return __salt__['snmp.config']()
def _create_diff_action(diff, diff_key, key, value):
'''
DRY to build diff parts (added, removed, updated).
'''
if diff_key not in diff.keys():
diff[diff_key] = {}
diff[diff_key][key] = value
def _create_diff(diff, fun, key, prev, curr):
'''
Builds the diff dictionary.
'''
if not fun(prev):
_create_diff_action(diff, 'added', key, curr)
elif fun(prev) and not fun(curr):
_create_diff_action(diff, 'removed', key, prev)
elif not fun(curr):
_create_diff_action(diff, 'updated', key, curr)
def _compute_diff(existing, expected):
'''
Computes the differences between the existing and the expected SNMP config.
'''
diff = {}
for key in ['location', 'contact', 'chassis_id']:
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_str,
key,
existing.get(key),
expected.get(key))
for key in ['community']: # for the moment only onen
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_dict,
key,
existing.get(key),
expected.get(key))
return diff
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def managed(name, config=None, defaults=None):
'''
Configures the SNMP on the device as specified in the SLS file.
SLS Example:
.. code-block:: yaml
snmp_example:
netsnmp.managed:
- config:
location: Honolulu, HI, US
- defaults:
contact: noc@cloudflare.com
Output example (for the SLS above, e.g. called snmp.sls under /router/):
.. code-block:: bash
$ sudo salt edge01.hnl01 state.sls router.snmp test=True
edge01.hnl01:
----------
ID: snmp_example
Function: snmp.managed
Result: None
Comment: Testing mode: configuration was not changed!
Started: 13:29:06.872363
Duration: 920.466 ms
Changes:
----------
added:
----------
chassis_id:
None
contact:
noc@cloudflare.com
location:
Honolulu, HI, US
Summary for edge01.hnl01
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 920.466 ms
'''
result = False
comment = ''
changes = {}
ret = {
'name': name,
'changes': changes,
'result': result,
'comment': comment
}
# make sure we're working only with dict
config = _ordered_dict_to_dict(config)
defaults = _ordered_dict_to_dict(defaults)
expected_config = _expand_config(config, defaults)
if not isinstance(expected_config, dict):
ret['comment'] = 'User provided an empty SNMP config!'
return ret
valid, message = _check_config(expected_config)
if not valid: # check and clean
ret['comment'] = 'Please provide a valid configuration: {error}'.format(error=message)
return ret
# ----- Retrieve existing users configuration and determine differences ------------------------------------------->
_device_config = _retrieve_device_config()
if not _device_config.get('result'):
ret['comment'] = 'Cannot retrieve SNMP config from the device: {reason}'.format(
reason=_device_config.get('comment')
)
return ret
device_config = _device_config.get('out', {})
if device_config == expected_config:
ret.update({
'comment': 'SNMP already configured as needed.',
'result': True
})
return ret
diff = _compute_diff(device_config, expected_config)
changes.update(diff)
ret.update({
'changes': changes
})
if __opts__['test'] is True:
ret.update({
'result': None,
'comment': 'Testing mode: configuration was not changed!'
})
return ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_users and _delete_users as needed ------------------------------------------------------->
expected_config_change = False
result = True
if diff:
_configured = _configure(diff)
if _configured.get('result'):
expected_config_change = True
else: # something went wrong...
result = False
comment = 'Cannot push new SNMP config: \n{reason}'.format(
reason=_configured.get('comment')
) + comment
if expected_config_change:
result, comment = __salt__['net.config_control']()
# <---- Call _set_users and _delete_users as needed --------------------------------------------------------
ret.update({
'result': result,
'comment': comment
})
return ret
|
saltstack/salt
|
salt/states/netsnmp.py
|
managed
|
python
|
def managed(name, config=None, defaults=None):
'''
Configures the SNMP on the device as specified in the SLS file.
SLS Example:
.. code-block:: yaml
snmp_example:
netsnmp.managed:
- config:
location: Honolulu, HI, US
- defaults:
contact: noc@cloudflare.com
Output example (for the SLS above, e.g. called snmp.sls under /router/):
.. code-block:: bash
$ sudo salt edge01.hnl01 state.sls router.snmp test=True
edge01.hnl01:
----------
ID: snmp_example
Function: snmp.managed
Result: None
Comment: Testing mode: configuration was not changed!
Started: 13:29:06.872363
Duration: 920.466 ms
Changes:
----------
added:
----------
chassis_id:
None
contact:
noc@cloudflare.com
location:
Honolulu, HI, US
Summary for edge01.hnl01
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 920.466 ms
'''
result = False
comment = ''
changes = {}
ret = {
'name': name,
'changes': changes,
'result': result,
'comment': comment
}
# make sure we're working only with dict
config = _ordered_dict_to_dict(config)
defaults = _ordered_dict_to_dict(defaults)
expected_config = _expand_config(config, defaults)
if not isinstance(expected_config, dict):
ret['comment'] = 'User provided an empty SNMP config!'
return ret
valid, message = _check_config(expected_config)
if not valid: # check and clean
ret['comment'] = 'Please provide a valid configuration: {error}'.format(error=message)
return ret
# ----- Retrieve existing users configuration and determine differences ------------------------------------------->
_device_config = _retrieve_device_config()
if not _device_config.get('result'):
ret['comment'] = 'Cannot retrieve SNMP config from the device: {reason}'.format(
reason=_device_config.get('comment')
)
return ret
device_config = _device_config.get('out', {})
if device_config == expected_config:
ret.update({
'comment': 'SNMP already configured as needed.',
'result': True
})
return ret
diff = _compute_diff(device_config, expected_config)
changes.update(diff)
ret.update({
'changes': changes
})
if __opts__['test'] is True:
ret.update({
'result': None,
'comment': 'Testing mode: configuration was not changed!'
})
return ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_users and _delete_users as needed ------------------------------------------------------->
expected_config_change = False
result = True
if diff:
_configured = _configure(diff)
if _configured.get('result'):
expected_config_change = True
else: # something went wrong...
result = False
comment = 'Cannot push new SNMP config: \n{reason}'.format(
reason=_configured.get('comment')
) + comment
if expected_config_change:
result, comment = __salt__['net.config_control']()
# <---- Call _set_users and _delete_users as needed --------------------------------------------------------
ret.update({
'result': result,
'comment': comment
})
return ret
|
Configures the SNMP on the device as specified in the SLS file.
SLS Example:
.. code-block:: yaml
snmp_example:
netsnmp.managed:
- config:
location: Honolulu, HI, US
- defaults:
contact: noc@cloudflare.com
Output example (for the SLS above, e.g. called snmp.sls under /router/):
.. code-block:: bash
$ sudo salt edge01.hnl01 state.sls router.snmp test=True
edge01.hnl01:
----------
ID: snmp_example
Function: snmp.managed
Result: None
Comment: Testing mode: configuration was not changed!
Started: 13:29:06.872363
Duration: 920.466 ms
Changes:
----------
added:
----------
chassis_id:
None
contact:
noc@cloudflare.com
location:
Honolulu, HI, US
Summary for edge01.hnl01
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 920.466 ms
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netsnmp.py#L292-L426
|
[
"def _ordered_dict_to_dict(config):\n\n '''\n Forced the datatype to dict, in case OrderedDict is used.\n '''\n\n return loads(dumps(config))\n",
"def _expand_config(config, defaults):\n\n '''\n Completed the values of the expected config for the edge cases with the default values.\n '''\n\n defaults.update(config)\n return defaults\n",
"def _check_config(config):\n\n '''\n Checks the desired config and clears interesting details.\n '''\n\n if not _valid_dict(config):\n return True, ''\n\n _community = config.get('community')\n _community_tmp = {}\n if not _community:\n return False, 'Must specify at least a community.'\n if _valid_str(_community):\n _community_tmp[_community] = _community_defaults()\n elif isinstance(_community, list):\n # if the user specifies the communities as list\n for _comm in _community:\n if _valid_str(_comm):\n # list of values\n _community_tmp[_comm] = _community_defaults()\n # default mode is read-only\n if _valid_dict(_comm):\n # list of dicts\n for _comm_name, _comm_details in six.iteritems(_comm):\n if _valid_str(_comm_name):\n _community_tmp[_comm_name] = _clear_community_details(_comm_details)\n elif _valid_dict(_community):\n # directly as dict of communities\n # recommended way...\n for _comm_name, _comm_details in six.iteritems(_community):\n if _valid_str(_comm_name):\n _community_tmp[_comm_name] = _clear_community_details(_comm_details)\n else:\n return False, 'Please specify a community or a list of communities.'\n\n if not _valid_dict(_community_tmp):\n return False, 'Please specify at least a valid community!'\n\n config['community'] = _community_tmp\n\n for key in ['location', 'contact', 'chassis_id']:\n # not mandatory, but should be here only if valid\n _str_elem(config, key)\n\n return True, ''\n",
"def _retrieve_device_config():\n\n '''\n Retrieves the SNMP config from the device.\n '''\n\n return __salt__['snmp.config']()\n",
"def _compute_diff(existing, expected):\n\n '''\n Computes the differences between the existing and the expected SNMP config.\n '''\n\n diff = {}\n\n for key in ['location', 'contact', 'chassis_id']:\n if existing.get(key) != expected.get(key):\n _create_diff(diff,\n _valid_str,\n key,\n existing.get(key),\n expected.get(key))\n\n for key in ['community']: # for the moment only onen\n if existing.get(key) != expected.get(key):\n _create_diff(diff,\n _valid_dict,\n key,\n existing.get(key),\n expected.get(key))\n\n return diff\n",
"def _configure(changes):\n\n '''\n Calls the configuration template to apply the configuration changes on the device.\n '''\n\n cfgred = True\n reasons = []\n fun = 'update_config'\n\n for key in ['added', 'updated', 'removed']:\n _updated_changes = changes.get(key, {})\n if not _updated_changes:\n continue\n _location = _updated_changes.get('location', '')\n _contact = _updated_changes.get('contact', '')\n _community = _updated_changes.get('community', {})\n _chassis_id = _updated_changes.get('chassis_id', '')\n if key == 'removed':\n fun = 'remove_config'\n _ret = __salt__['snmp.{fun}'.format(fun=fun)](location=_location,\n contact=_contact,\n community=_community,\n chassis_id=_chassis_id,\n commit=False)\n cfgred = cfgred and _ret.get('result')\n if not _ret.get('result') and _ret.get('comment'):\n reasons.append(_ret.get('comment'))\n\n return {\n 'result': cfgred,\n 'comment': '\\n'.join(reasons) if reasons else ''\n }\n"
] |
# -*- coding: utf-8 -*-
'''
Network SNMP
============
Manage the SNMP configuration on network devices.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`napalm snmp management module (salt.modules.napalm_snmp) <salt.modules.napalm_snmp>`
.. versionadded: 2016.11.0
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# salt lib
from salt.utils.json import loads, dumps
from salt.ext import six
# import NAPALM utils
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netsnmp'
_COMMUNITY_MODE_MAP = {
'read-only': 'ro',
'readonly': 'ro',
'read-write': 'rw',
'write': 'rw'
}
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _ordered_dict_to_dict(config):
'''
Forced the datatype to dict, in case OrderedDict is used.
'''
return loads(dumps(config))
def _expand_config(config, defaults):
'''
Completed the values of the expected config for the edge cases with the default values.
'''
defaults.update(config)
return defaults
def _valid_dict(dic):
'''
Valid dictionary?
'''
return isinstance(dic, dict) and len(dic) > 0
def _valid_str(value):
'''
Valid str?
'''
return isinstance(value, six.string_types) and len(value) > 0
def _community_defaults():
'''
Returns the default values of a community.
'''
return {
'mode': 'ro'
}
def _clear_community_details(community_details):
'''
Clears community details.
'''
for key in ['acl', 'mode']:
_str_elem(community_details, key)
_mode = community_details.get['mode'] = community_details.get('mode').lower()
if _mode in _COMMUNITY_MODE_MAP.keys():
community_details['mode'] = _COMMUNITY_MODE_MAP.get(_mode)
if community_details['mode'] not in ['ro', 'rw']:
community_details['mode'] = 'ro' # default is read-only
return community_details
def _str_elem(config, key):
'''
Re-adds the value of a specific key in the dict, only in case of valid str value.
'''
_value = config.pop(key, '')
if _valid_str(_value):
config[key] = _value
def _check_config(config):
'''
Checks the desired config and clears interesting details.
'''
if not _valid_dict(config):
return True, ''
_community = config.get('community')
_community_tmp = {}
if not _community:
return False, 'Must specify at least a community.'
if _valid_str(_community):
_community_tmp[_community] = _community_defaults()
elif isinstance(_community, list):
# if the user specifies the communities as list
for _comm in _community:
if _valid_str(_comm):
# list of values
_community_tmp[_comm] = _community_defaults()
# default mode is read-only
if _valid_dict(_comm):
# list of dicts
for _comm_name, _comm_details in six.iteritems(_comm):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
elif _valid_dict(_community):
# directly as dict of communities
# recommended way...
for _comm_name, _comm_details in six.iteritems(_community):
if _valid_str(_comm_name):
_community_tmp[_comm_name] = _clear_community_details(_comm_details)
else:
return False, 'Please specify a community or a list of communities.'
if not _valid_dict(_community_tmp):
return False, 'Please specify at least a valid community!'
config['community'] = _community_tmp
for key in ['location', 'contact', 'chassis_id']:
# not mandatory, but should be here only if valid
_str_elem(config, key)
return True, ''
def _retrieve_device_config():
'''
Retrieves the SNMP config from the device.
'''
return __salt__['snmp.config']()
def _create_diff_action(diff, diff_key, key, value):
'''
DRY to build diff parts (added, removed, updated).
'''
if diff_key not in diff.keys():
diff[diff_key] = {}
diff[diff_key][key] = value
def _create_diff(diff, fun, key, prev, curr):
'''
Builds the diff dictionary.
'''
if not fun(prev):
_create_diff_action(diff, 'added', key, curr)
elif fun(prev) and not fun(curr):
_create_diff_action(diff, 'removed', key, prev)
elif not fun(curr):
_create_diff_action(diff, 'updated', key, curr)
def _compute_diff(existing, expected):
'''
Computes the differences between the existing and the expected SNMP config.
'''
diff = {}
for key in ['location', 'contact', 'chassis_id']:
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_str,
key,
existing.get(key),
expected.get(key))
for key in ['community']: # for the moment only onen
if existing.get(key) != expected.get(key):
_create_diff(diff,
_valid_dict,
key,
existing.get(key),
expected.get(key))
return diff
def _configure(changes):
'''
Calls the configuration template to apply the configuration changes on the device.
'''
cfgred = True
reasons = []
fun = 'update_config'
for key in ['added', 'updated', 'removed']:
_updated_changes = changes.get(key, {})
if not _updated_changes:
continue
_location = _updated_changes.get('location', '')
_contact = _updated_changes.get('contact', '')
_community = _updated_changes.get('community', {})
_chassis_id = _updated_changes.get('chassis_id', '')
if key == 'removed':
fun = 'remove_config'
_ret = __salt__['snmp.{fun}'.format(fun=fun)](location=_location,
contact=_contact,
community=_community,
chassis_id=_chassis_id,
commit=False)
cfgred = cfgred and _ret.get('result')
if not _ret.get('result') and _ret.get('comment'):
reasons.append(_ret.get('comment'))
return {
'result': cfgred,
'comment': '\n'.join(reasons) if reasons else ''
}
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
|
saltstack/salt
|
salt/states/pip_state.py
|
_fulfills_version_spec
|
python
|
def _fulfills_version_spec(version, version_spec):
'''
Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version.
'''
for oper, spec in version_spec:
if oper is None:
continue
if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec, cmp_func=_pep440_version_cmp):
return False
return True
|
Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pip_state.py#L92-L103
| null |
# -*- coding: utf-8 -*-
'''
Installation of Python Packages Using pip
=========================================
These states manage system installed python packages. Note that pip must be
installed for these states to be available, so pip states should include a
requisite to a pkg.installed state for the package which provides pip
(``python-pip`` in most cases). Example:
.. code-block:: yaml
python-pip:
pkg.installed
virtualenvwrapper:
pip.installed:
- require:
- pkg: python-pip
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import re
try:
import pkg_resources
HAS_PKG_RESOURCES = True
except ImportError:
HAS_PKG_RESOURCES = False
# Import salt libs
import salt.utils.data
import salt.utils.versions
from salt.exceptions import CommandExecutionError, CommandNotFoundError
# Import 3rd-party libs
from salt.ext import six
# pylint: disable=import-error
try:
import pip
HAS_PIP = True
except ImportError:
HAS_PIP = False
if HAS_PIP is True:
try:
from pip.req import InstallRequirement
_from_line = InstallRequirement.from_line
except ImportError:
# pip 10.0.0 move req module under pip._internal
try:
try:
from pip._internal.req import InstallRequirement
_from_line = InstallRequirement.from_line
except AttributeError:
from pip._internal.req.constructors import install_req_from_line as _from_line
except ImportError:
HAS_PIP = False
# Remove references to the loaded pip module above so reloading works
import sys
del pip
if 'pip' in sys.modules:
del sys.modules['pip']
try:
from pip.exceptions import InstallationError
except ImportError:
InstallationError = ValueError
# pylint: enable=import-error
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pip'
def __virtual__():
'''
Only load if the pip module is available in __salt__
'''
if HAS_PKG_RESOURCES is False:
return False, 'The pkg_resources python library is not installed'
if 'pip.list' in __salt__:
return __virtualname__
return False
def _check_pkg_version_format(pkg):
'''
Takes a package name and version specification (if any) and checks it using
the pip library.
'''
ret = {'result': False, 'comment': None,
'prefix': None, 'version_spec': None}
if not HAS_PIP:
ret['comment'] = (
'An importable Python 2 pip module is required but could not be '
'found on your system. This usually means that the system\'s pip '
'package is not installed properly.'
)
return ret
from_vcs = False
try:
# Get the requirement object from the pip library
try:
# With pip < 1.2, the __version__ attribute does not exist and
# vcs+URL urls are not properly parsed.
# The next line is meant to trigger an AttributeError and
# handle lower pip versions
log.debug('Installed pip version: %s', pip.__version__)
install_req = _from_line(pkg)
except AttributeError:
log.debug('Installed pip version is lower than 1.2')
supported_vcs = ('git', 'svn', 'hg', 'bzr')
if pkg.startswith(supported_vcs):
for vcs in supported_vcs:
if pkg.startswith(vcs):
from_vcs = True
install_req = _from_line(
pkg.split('{0}+'.format(vcs))[-1]
)
break
else:
install_req = _from_line(pkg)
except (ValueError, InstallationError) as exc:
ret['result'] = False
if not from_vcs and '=' in pkg and '==' not in pkg:
ret['comment'] = (
'Invalid version specification in package {0}. \'=\' is '
'not supported, use \'==\' instead.'.format(pkg)
)
return ret
ret['comment'] = (
'pip raised an exception while parsing \'{0}\': {1}'.format(
pkg, exc
)
)
return ret
if install_req.req is None:
# This is most likely an url and there's no way to know what will
# be installed before actually installing it.
ret['result'] = True
ret['prefix'] = ''
ret['version_spec'] = []
else:
ret['result'] = True
try:
ret['prefix'] = install_req.req.project_name
ret['version_spec'] = install_req.req.specs
except Exception:
ret['prefix'] = re.sub('[^A-Za-z0-9.]+', '-', install_req.name)
if hasattr(install_req, "specifier"):
specifier = install_req.specifier
else:
specifier = install_req.req.specifier
ret['version_spec'] = [(spec.operator, spec.version) for spec in specifier]
return ret
def _check_if_installed(prefix,
state_pkg_name,
version_spec,
ignore_installed,
force_reinstall,
upgrade,
user,
cwd,
bin_env,
env_vars,
index_url,
extra_index_url,
pip_list=False,
**kwargs):
'''
Takes a package name and version specification (if any) and checks it is
installed
Keyword arguments include:
pip_list: optional dict of installed pip packages, and their versions,
to search through to check if the package is installed. If not
provided, one will be generated in this function by querying the
system.
Returns:
result: None means the command failed to run
result: True means the package is installed
result: False means the package is not installed
'''
ret = {'result': False, 'comment': None}
# If we are not passed a pip list, get one:
pip_list = salt.utils.data.CaseInsensitiveDict(
pip_list or __salt__['pip.list'](prefix, bin_env=bin_env,
user=user, cwd=cwd,
env_vars=env_vars, **kwargs)
)
# If the package was already installed, check
# the ignore_installed and force_reinstall flags
if ignore_installed is False and prefix in pip_list:
if force_reinstall is False and not upgrade:
# Check desired version (if any) against currently-installed
if (
any(version_spec) and
_fulfills_version_spec(pip_list[prefix], version_spec)
) or (not any(version_spec)):
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
if force_reinstall is False and upgrade:
# Check desired version (if any) against currently-installed
include_alpha = False
include_beta = False
include_rc = False
if any(version_spec):
for spec in version_spec:
if 'a' in spec[1]:
include_alpha = True
if 'b' in spec[1]:
include_beta = True
if 'rc' in spec[1]:
include_rc = True
available_versions = __salt__['pip.list_all_versions'](
prefix, bin_env=bin_env, include_alpha=include_alpha,
include_beta=include_beta, include_rc=include_rc, user=user,
cwd=cwd, index_url=index_url, extra_index_url=extra_index_url)
desired_version = ''
if any(version_spec):
for version in reversed(available_versions):
if _fulfills_version_spec(version, version_spec):
desired_version = version
break
else:
desired_version = available_versions[-1]
if not desired_version:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed and\nthe available upgrade '
'doesn\'t fulfills the version '
'requirements'.format(prefix))
return ret
if _pep440_version_cmp(pip_list[prefix], desired_version) == 0:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
return ret
def _pep440_version_cmp(pkg1, pkg2, ignore_epoch=False):
'''
Compares two version strings using pkg_resources.parse_version.
Return -1 if version1 < version2, 0 if version1 ==version2,
and 1 if version1 > version2. Return None if there was a problem
making the comparison.
'''
normalize = lambda x: six.text_type(x).split('!', 1)[-1] \
if ignore_epoch else six.text_type(x)
pkg1 = normalize(pkg1)
pkg2 = normalize(pkg2)
try:
if pkg_resources.parse_version(pkg1) < pkg_resources.parse_version(pkg2):
return -1
if pkg_resources.parse_version(pkg1) == pkg_resources.parse_version(pkg2):
return 0
if pkg_resources.parse_version(pkg1) > pkg_resources.parse_version(pkg2):
return 1
except Exception as exc:
log.exception(exc)
return None
def installed(name,
pkgs=None,
pip_bin=None,
requirements=None,
bin_env=None,
use_wheel=False,
no_use_wheel=False,
log=None,
proxy=None,
timeout=None,
repo=None,
editable=None,
find_links=None,
index_url=None,
extra_index_url=None,
no_index=False,
mirrors=None,
build=None,
target=None,
download=None,
download_cache=None,
source=None,
upgrade=False,
force_reinstall=False,
ignore_installed=False,
exists_action=None,
no_deps=False,
no_install=False,
no_download=False,
install_options=None,
global_options=None,
user=None,
cwd=None,
pre_releases=False,
cert=None,
allow_all_external=False,
allow_external=None,
allow_unverified=None,
process_dependency_links=False,
env_vars=None,
use_vt=False,
trusted_host=None,
no_cache_dir=False,
cache_dir=None,
no_binary=None,
extra_args=None,
**kwargs):
'''
Make sure the package is installed
name
The name of the python package to install. You can also specify version
numbers here using the standard operators ``==, >=, <=``. If
``requirements`` is given, this parameter will be ignored.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- require:
- pkg: python-pip
This will install the latest Django version greater than 1.6 but less
than 1.7.
requirements
Path to a pip requirements file. If the path begins with salt://
the file will be transferred from the master file server.
user
The user under which to run pip
use_wheel : False
Prefer wheel archives (requires pip>=1.4)
no_use_wheel : False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
Example:
.. code-block:: yaml
django:
pip.installed:
- no_binary: ':all:'
flask:
pip.installed:
- no_binary:
- itsdangerous
- click
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable (i.e.
git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed)
find_links
URL to look for packages at
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URL(s) to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already
up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
exists_action
Default action when a path already exists: (s)witch, (i)gnore, (w)ipe,
(b)ackup
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_cache_dir:
Disable the cache.
cwd
Current working directory to run pip from
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
allow_all_external
Allow the installation of all externally hosted files
allow_external
Allow the installation of externally hosted files (comma separated list)
allow_unverified
Allow the installation of insecure and unverifiable files (comma separated list)
process_dependency_links
Enable the processing of dependency links
bin_env : None
Absolute path to a virtual environment directory or absolute path to
a pip executable. The example below assumes a virtual environment
has been created at ``/foo/.virtualenvs/bar``.
env_vars
Add or modify environment variables. Useful for tweaking build steps,
such as specifying INCLUDE or LIBRARY paths in Makefiles, build scripts or
compiler calls. This must be in the form of a dictionary or a mapping.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django_app
- env_vars:
CUSTOM_PATH: /opt/django_app
VERBOSE: True
use_vt
Use VT terminal emulation (see output while installing)
trusted_host
Mark this host as trusted, even though it does not have valid or any
HTTPS.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar
- require:
- pkg: python-pip
Or
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar/bin/pip
- require:
- pkg: python-pip
.. admonition:: Attention
The following arguments are deprecated, do not use.
pip_bin : None
Deprecated, use ``bin_env``
.. versionchanged:: 0.17.0
``use_wheel`` option added.
install_options
Extra arguments to be supplied to the setup.py install command.
If you are using an option with a directory path, be sure to use
absolute path.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django
- install_options:
- --prefix=/blah
- require:
- pkg: python-pip
global_options
Extra global options to be supplied to the setup.py call before the
install command.
.. versionadded:: 2014.1.3
.. admonition:: Attention
As of Salt 0.17.0 the pip state **needs** an importable pip module.
This usually means having the system's pip package installed or running
Salt from an active `virtualenv`_.
The reason for this requirement is because ``pip`` already does a
pretty good job parsing its own requirements. It makes no sense for
Salt to do ``pip`` requirements parsing and validation before passing
them to the ``pip`` library. It's functionality duplication and it's
more error prone.
.. admonition:: Attention
Please set ``reload_modules: True`` to have the salt minion
import this module after installation.
Example:
.. code-block:: yaml
pyopenssl:
pip.installed:
- name: pyOpenSSL
- reload_modules: True
- exists_action: i
extra_args
pip keyword and positional arguments not yet implemented in salt
.. code-block:: yaml
pandas:
pip.installed:
- name: pandas
- extra_args:
- --latest-pip-kwarg: param
- --latest-pip-arg
.. warning::
If unsupported options are passed here that are not supported in a
minion's version of pip, a `No such option error` will be thrown.
.. _`virtualenv`: http://www.virtualenv.org/en/latest/
'''
if pip_bin and not bin_env:
bin_env = pip_bin
# If pkgs is present, ignore name
if pkgs:
if not isinstance(pkgs, list):
return {'name': name,
'result': False,
'changes': {},
'comment': 'pkgs argument must be formatted as a list'}
else:
pkgs = [name]
# Assumption: If `pkg` is not an `string`, it's a `collections.OrderedDict`
# prepro = lambda pkg: pkg if type(pkg) == str else \
# ' '.join((pkg.items()[0][0], pkg.items()[0][1].replace(',', ';')))
# pkgs = ','.join([prepro(pkg) for pkg in pkgs])
prepro = lambda pkg: pkg if isinstance(pkg, six.string_types) else \
' '.join((six.iteritems(pkg)[0][0], six.iteritems(pkg)[0][1]))
pkgs = [prepro(pkg) for pkg in pkgs]
ret = {'name': ';'.join(pkgs), 'result': None,
'comment': '', 'changes': {}}
try:
cur_version = __salt__['pip.version'](bin_env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
ret['comment'] = 'Error installing \'{0}\': {1}'.format(name, err)
return ret
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_binary' option
if no_binary:
min_version = '7.0.0'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
ret['result'] = False
ret['comment'] = ('The \'no_binary\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Get the packages parsed name and version from the pip library.
# This only is done when there is no requirements or editable parameter.
pkgs_details = []
if pkgs and not (requirements or editable):
comments = []
for pkg in iter(pkgs):
out = _check_pkg_version_format(pkg)
if out['result'] is False:
ret['result'] = False
comments.append(out['comment'])
elif out['result'] is True:
pkgs_details.append((out['prefix'], pkg, out['version_spec']))
if ret['result'] is False:
ret['comment'] = '\n'.join(comments)
return ret
# If a requirements file is specified, only install the contents of the
# requirements file. Similarly, using the --editable flag with pip should
# also ignore the "name" and "pkgs" parameters.
target_pkgs = []
already_installed_comments = []
if requirements or editable:
comments = []
# Append comments if this is a dry run.
if __opts__['test']:
ret['result'] = None
if requirements:
# TODO: Check requirements file against currently-installed
# packages to provide more accurate state output.
comments.append('Requirements file \'{0}\' will be '
'processed.'.format(requirements))
if editable:
comments.append(
'Package will be installed in editable mode (i.e. '
'setuptools "develop mode") from {0}.'.format(editable)
)
ret['comment'] = ' '.join(comments)
return ret
# No requirements case.
# Check pre-existence of the requested packages.
else:
# Attempt to pre-cache a the current pip list
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
# If we fail, then just send False, and we'll try again in the next function call
except Exception as exc:
log.exception(exc)
pip_list = False
for prefix, state_pkg_name, version_spec in pkgs_details:
if prefix:
state_pkg_name = state_pkg_name
version_spec = version_spec
out = _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env, env_vars,
index_url, extra_index_url, pip_list,
**kwargs)
# If _check_if_installed result is None, something went wrong with
# the command running. This way we keep stateful output.
if out['result'] is None:
ret['result'] = False
ret['comment'] = out['comment']
return ret
else:
out = {'result': False, 'comment': None}
result = out['result']
# The package is not present. Add it to the pkgs to install.
if result is False:
# Replace commas (used for version ranges) with semicolons
# (which are not supported) in name so it does not treat
# them as multiple packages.
target_pkgs.append((prefix, state_pkg_name.replace(',', ';')))
# Append comments if this is a dry run.
if __opts__['test']:
msg = 'Python package {0} is set to be installed'
ret['result'] = None
ret['comment'] = msg.format(state_pkg_name)
return ret
# The package is already present and will not be reinstalled.
elif result is True:
# Append comment stating its presence
already_installed_comments.append(out['comment'])
# The command pip.list failed. Abort.
elif result is None:
ret['result'] = None
ret['comment'] = out['comment']
return ret
# No packages to install.
if not target_pkgs:
ret['result'] = True
aicomms = '\n'.join(already_installed_comments)
last_line = 'All specified packages are already installed' + (' and up-to-date' if upgrade else '')
ret['comment'] = aicomms + ('\n' if aicomms else '') + last_line
return ret
# Construct the string that will get passed to the install call
pkgs_str = ','.join([state_name for _, state_name in target_pkgs])
# Call to install the package. Actual installation takes place here
pip_install_call = __salt__['pip.install'](
pkgs='{0}'.format(pkgs_str) if pkgs_str else '',
requirements=requirements,
bin_env=bin_env,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
no_binary=no_binary,
log=log,
proxy=proxy,
timeout=timeout,
editable=editable,
find_links=find_links,
index_url=index_url,
extra_index_url=extra_index_url,
no_index=no_index,
mirrors=mirrors,
build=build,
target=target,
download=download,
download_cache=download_cache,
source=source,
upgrade=upgrade,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
exists_action=exists_action,
no_deps=no_deps,
no_install=no_install,
no_download=no_download,
install_options=install_options,
global_options=global_options,
user=user,
cwd=cwd,
pre_releases=pre_releases,
cert=cert,
allow_all_external=allow_all_external,
allow_external=allow_external,
allow_unverified=allow_unverified,
process_dependency_links=process_dependency_links,
saltenv=__env__,
env_vars=env_vars,
use_vt=use_vt,
trusted_host=trusted_host,
no_cache_dir=no_cache_dir,
extra_args=extra_args,
**kwargs
)
if pip_install_call and pip_install_call.get('retcode', 1) == 0:
ret['result'] = True
if requirements or editable:
comments = []
if requirements:
PIP_REQUIREMENTS_NOCHANGE = [
'Requirement already satisfied',
'Requirement already up-to-date',
'Requirement not upgraded',
'Collecting',
'Cloning',
'Cleaning up...',
]
for line in pip_install_call.get('stdout', '').split('\n'):
if not any(
[
line.strip().startswith(x)
for x in PIP_REQUIREMENTS_NOCHANGE
]
):
ret['changes']['requirements'] = True
if ret['changes'].get('requirements'):
comments.append('Successfully processed requirements file '
'{0}.'.format(requirements))
else:
comments.append('Requirements were already installed.')
if editable:
comments.append('Package successfully installed from VCS '
'checkout {0}.'.format(editable))
ret['changes']['editable'] = True
ret['comment'] = ' '.join(comments)
else:
# Check that the packages set to be installed were installed.
# Create comments reporting success and failures
pkg_404_comms = []
already_installed_packages = set()
for line in pip_install_call.get('stdout', '').split('\n'):
# Output for already installed packages:
# 'Requirement already up-to-date: jinja2 in /usr/local/lib/python2.7/dist-packages\nCleaning up...'
if line.startswith('Requirement already up-to-date: '):
package = line.split(':', 1)[1].split()[0]
already_installed_packages.add(package.lower())
for prefix, state_name in target_pkgs:
# Case for packages that are not an URL
if prefix:
pipsearch = salt.utils.data.CaseInsensitiveDict(
__salt__['pip.list'](prefix, bin_env,
user=user, cwd=cwd,
env_vars=env_vars,
**kwargs)
)
# If we didn't find the package in the system after
# installing it report it
if not pipsearch:
pkg_404_comms.append(
'There was no error installing package \'{0}\' '
'although it does not show when calling '
'\'pip.freeze\'.'.format(pkg)
)
else:
if prefix in pipsearch \
and prefix.lower() not in already_installed_packages:
ver = pipsearch[prefix]
ret['changes']['{0}=={1}'.format(prefix, ver)] = 'Installed'
# Case for packages that are an URL
else:
ret['changes']['{0}==???'.format(state_name)] = 'Installed'
# Set comments
aicomms = '\n'.join(already_installed_comments)
succ_comm = 'All packages were successfully installed'\
if not pkg_404_comms else '\n'.join(pkg_404_comms)
ret['comment'] = aicomms + ('\n' if aicomms else '') + succ_comm
return ret
elif pip_install_call:
ret['result'] = False
if 'stdout' in pip_install_call:
error = 'Error: {0} {1}'.format(pip_install_call['stdout'],
pip_install_call['stderr'])
else:
error = 'Error: {0}'.format(pip_install_call['comment'])
if requirements or editable:
comments = []
if requirements:
comments.append('Unable to process requirements file '
'"{0}".'.format(requirements))
if editable:
comments.append('Unable to install from VCS checkout'
'{0}.'.format(editable))
comments.append(error)
ret['comment'] = ' '.join(comments)
else:
pkgs_str = ', '.join([state_name for _, state_name in target_pkgs])
aicomms = '\n'.join(already_installed_comments)
error_comm = ('Failed to install packages: {0}. '
'{1}'.format(pkgs_str, error))
ret['comment'] = aicomms + ('\n' if aicomms else '') + error_comm
else:
ret['result'] = False
ret['comment'] = 'Could not install package'
return ret
def removed(name,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
user=None,
cwd=None,
use_vt=False):
'''
Make sure that a package is not installed.
name
The name of the package to uninstall
user
The user under which to run pip
bin_env : None
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error uninstalling \'{0}\': {1}'.format(name, err)
return ret
if name not in pip_list:
ret['result'] = True
ret['comment'] = 'Package is not installed.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0} is set to be removed'.format(name)
return ret
if __salt__['pip.uninstall'](pkgs=name,
requirements=requirements,
bin_env=bin_env,
log=log,
proxy=proxy,
timeout=timeout,
user=user,
cwd=cwd,
use_vt=use_vt):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package was successfully removed.'
else:
ret['result'] = False
ret['comment'] = 'Could not remove package.'
return ret
def uptodate(name,
bin_env=None,
user=None,
cwd=None,
use_vt=False):
'''
.. versionadded:: 2015.5.0
Verify that the system is completely up to date.
name
The name has no functional value and is only used as a tracking
reference
user
The user under which to run pip
bin_env
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': 'Failed to update.'}
try:
packages = __salt__['pip.list_upgrades'](bin_env=bin_env, user=user, cwd=cwd)
except Exception as e:
ret['comment'] = six.text_type(e)
return ret
if not packages:
ret['comment'] = 'System is already up-to-date.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'System update will be performed'
ret['result'] = None
return ret
updated = __salt__['pip.upgrade'](bin_env=bin_env, user=user, cwd=cwd, use_vt=use_vt)
if updated.get('result') is False:
ret.update(updated)
elif updated:
ret['changes'] = updated
ret['comment'] = 'Upgrade successful.'
ret['result'] = True
else:
ret['comment'] = 'Upgrade failed.'
return ret
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
'''
pkgs = []
pkg_type = None
agg_enabled = [
'installed',
'removed',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__['state.gen_tag'](chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get('state') == 'pip':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
# Check first if 'sources' was passed so we don't aggregate pkgs
# and sources together.
if pkg_type is None:
pkg_type = 'pkgs'
if pkg_type == 'pkgs':
# Pull out the pkg names!
if 'pkgs' in chunk:
pkgs.extend(chunk['pkgs'])
chunk['__agg__'] = True
elif 'name' in chunk:
version = chunk.pop('version', None)
if version is not None:
pkgs.append({chunk['name']: version})
else:
pkgs.append(chunk['name'])
chunk['__agg__'] = True
if pkg_type is not None and pkgs:
if pkg_type in low:
low[pkg_type].extend(pkgs)
else:
low[pkg_type] = pkgs
return low
|
saltstack/salt
|
salt/states/pip_state.py
|
_check_pkg_version_format
|
python
|
def _check_pkg_version_format(pkg):
'''
Takes a package name and version specification (if any) and checks it using
the pip library.
'''
ret = {'result': False, 'comment': None,
'prefix': None, 'version_spec': None}
if not HAS_PIP:
ret['comment'] = (
'An importable Python 2 pip module is required but could not be '
'found on your system. This usually means that the system\'s pip '
'package is not installed properly.'
)
return ret
from_vcs = False
try:
# Get the requirement object from the pip library
try:
# With pip < 1.2, the __version__ attribute does not exist and
# vcs+URL urls are not properly parsed.
# The next line is meant to trigger an AttributeError and
# handle lower pip versions
log.debug('Installed pip version: %s', pip.__version__)
install_req = _from_line(pkg)
except AttributeError:
log.debug('Installed pip version is lower than 1.2')
supported_vcs = ('git', 'svn', 'hg', 'bzr')
if pkg.startswith(supported_vcs):
for vcs in supported_vcs:
if pkg.startswith(vcs):
from_vcs = True
install_req = _from_line(
pkg.split('{0}+'.format(vcs))[-1]
)
break
else:
install_req = _from_line(pkg)
except (ValueError, InstallationError) as exc:
ret['result'] = False
if not from_vcs and '=' in pkg and '==' not in pkg:
ret['comment'] = (
'Invalid version specification in package {0}. \'=\' is '
'not supported, use \'==\' instead.'.format(pkg)
)
return ret
ret['comment'] = (
'pip raised an exception while parsing \'{0}\': {1}'.format(
pkg, exc
)
)
return ret
if install_req.req is None:
# This is most likely an url and there's no way to know what will
# be installed before actually installing it.
ret['result'] = True
ret['prefix'] = ''
ret['version_spec'] = []
else:
ret['result'] = True
try:
ret['prefix'] = install_req.req.project_name
ret['version_spec'] = install_req.req.specs
except Exception:
ret['prefix'] = re.sub('[^A-Za-z0-9.]+', '-', install_req.name)
if hasattr(install_req, "specifier"):
specifier = install_req.specifier
else:
specifier = install_req.req.specifier
ret['version_spec'] = [(spec.operator, spec.version) for spec in specifier]
return ret
|
Takes a package name and version specification (if any) and checks it using
the pip library.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pip_state.py#L106-L181
| null |
# -*- coding: utf-8 -*-
'''
Installation of Python Packages Using pip
=========================================
These states manage system installed python packages. Note that pip must be
installed for these states to be available, so pip states should include a
requisite to a pkg.installed state for the package which provides pip
(``python-pip`` in most cases). Example:
.. code-block:: yaml
python-pip:
pkg.installed
virtualenvwrapper:
pip.installed:
- require:
- pkg: python-pip
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import re
try:
import pkg_resources
HAS_PKG_RESOURCES = True
except ImportError:
HAS_PKG_RESOURCES = False
# Import salt libs
import salt.utils.data
import salt.utils.versions
from salt.exceptions import CommandExecutionError, CommandNotFoundError
# Import 3rd-party libs
from salt.ext import six
# pylint: disable=import-error
try:
import pip
HAS_PIP = True
except ImportError:
HAS_PIP = False
if HAS_PIP is True:
try:
from pip.req import InstallRequirement
_from_line = InstallRequirement.from_line
except ImportError:
# pip 10.0.0 move req module under pip._internal
try:
try:
from pip._internal.req import InstallRequirement
_from_line = InstallRequirement.from_line
except AttributeError:
from pip._internal.req.constructors import install_req_from_line as _from_line
except ImportError:
HAS_PIP = False
# Remove references to the loaded pip module above so reloading works
import sys
del pip
if 'pip' in sys.modules:
del sys.modules['pip']
try:
from pip.exceptions import InstallationError
except ImportError:
InstallationError = ValueError
# pylint: enable=import-error
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pip'
def __virtual__():
'''
Only load if the pip module is available in __salt__
'''
if HAS_PKG_RESOURCES is False:
return False, 'The pkg_resources python library is not installed'
if 'pip.list' in __salt__:
return __virtualname__
return False
def _fulfills_version_spec(version, version_spec):
'''
Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version.
'''
for oper, spec in version_spec:
if oper is None:
continue
if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec, cmp_func=_pep440_version_cmp):
return False
return True
def _check_if_installed(prefix,
state_pkg_name,
version_spec,
ignore_installed,
force_reinstall,
upgrade,
user,
cwd,
bin_env,
env_vars,
index_url,
extra_index_url,
pip_list=False,
**kwargs):
'''
Takes a package name and version specification (if any) and checks it is
installed
Keyword arguments include:
pip_list: optional dict of installed pip packages, and their versions,
to search through to check if the package is installed. If not
provided, one will be generated in this function by querying the
system.
Returns:
result: None means the command failed to run
result: True means the package is installed
result: False means the package is not installed
'''
ret = {'result': False, 'comment': None}
# If we are not passed a pip list, get one:
pip_list = salt.utils.data.CaseInsensitiveDict(
pip_list or __salt__['pip.list'](prefix, bin_env=bin_env,
user=user, cwd=cwd,
env_vars=env_vars, **kwargs)
)
# If the package was already installed, check
# the ignore_installed and force_reinstall flags
if ignore_installed is False and prefix in pip_list:
if force_reinstall is False and not upgrade:
# Check desired version (if any) against currently-installed
if (
any(version_spec) and
_fulfills_version_spec(pip_list[prefix], version_spec)
) or (not any(version_spec)):
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
if force_reinstall is False and upgrade:
# Check desired version (if any) against currently-installed
include_alpha = False
include_beta = False
include_rc = False
if any(version_spec):
for spec in version_spec:
if 'a' in spec[1]:
include_alpha = True
if 'b' in spec[1]:
include_beta = True
if 'rc' in spec[1]:
include_rc = True
available_versions = __salt__['pip.list_all_versions'](
prefix, bin_env=bin_env, include_alpha=include_alpha,
include_beta=include_beta, include_rc=include_rc, user=user,
cwd=cwd, index_url=index_url, extra_index_url=extra_index_url)
desired_version = ''
if any(version_spec):
for version in reversed(available_versions):
if _fulfills_version_spec(version, version_spec):
desired_version = version
break
else:
desired_version = available_versions[-1]
if not desired_version:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed and\nthe available upgrade '
'doesn\'t fulfills the version '
'requirements'.format(prefix))
return ret
if _pep440_version_cmp(pip_list[prefix], desired_version) == 0:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
return ret
def _pep440_version_cmp(pkg1, pkg2, ignore_epoch=False):
'''
Compares two version strings using pkg_resources.parse_version.
Return -1 if version1 < version2, 0 if version1 ==version2,
and 1 if version1 > version2. Return None if there was a problem
making the comparison.
'''
normalize = lambda x: six.text_type(x).split('!', 1)[-1] \
if ignore_epoch else six.text_type(x)
pkg1 = normalize(pkg1)
pkg2 = normalize(pkg2)
try:
if pkg_resources.parse_version(pkg1) < pkg_resources.parse_version(pkg2):
return -1
if pkg_resources.parse_version(pkg1) == pkg_resources.parse_version(pkg2):
return 0
if pkg_resources.parse_version(pkg1) > pkg_resources.parse_version(pkg2):
return 1
except Exception as exc:
log.exception(exc)
return None
def installed(name,
pkgs=None,
pip_bin=None,
requirements=None,
bin_env=None,
use_wheel=False,
no_use_wheel=False,
log=None,
proxy=None,
timeout=None,
repo=None,
editable=None,
find_links=None,
index_url=None,
extra_index_url=None,
no_index=False,
mirrors=None,
build=None,
target=None,
download=None,
download_cache=None,
source=None,
upgrade=False,
force_reinstall=False,
ignore_installed=False,
exists_action=None,
no_deps=False,
no_install=False,
no_download=False,
install_options=None,
global_options=None,
user=None,
cwd=None,
pre_releases=False,
cert=None,
allow_all_external=False,
allow_external=None,
allow_unverified=None,
process_dependency_links=False,
env_vars=None,
use_vt=False,
trusted_host=None,
no_cache_dir=False,
cache_dir=None,
no_binary=None,
extra_args=None,
**kwargs):
'''
Make sure the package is installed
name
The name of the python package to install. You can also specify version
numbers here using the standard operators ``==, >=, <=``. If
``requirements`` is given, this parameter will be ignored.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- require:
- pkg: python-pip
This will install the latest Django version greater than 1.6 but less
than 1.7.
requirements
Path to a pip requirements file. If the path begins with salt://
the file will be transferred from the master file server.
user
The user under which to run pip
use_wheel : False
Prefer wheel archives (requires pip>=1.4)
no_use_wheel : False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
Example:
.. code-block:: yaml
django:
pip.installed:
- no_binary: ':all:'
flask:
pip.installed:
- no_binary:
- itsdangerous
- click
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable (i.e.
git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed)
find_links
URL to look for packages at
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URL(s) to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already
up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
exists_action
Default action when a path already exists: (s)witch, (i)gnore, (w)ipe,
(b)ackup
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_cache_dir:
Disable the cache.
cwd
Current working directory to run pip from
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
allow_all_external
Allow the installation of all externally hosted files
allow_external
Allow the installation of externally hosted files (comma separated list)
allow_unverified
Allow the installation of insecure and unverifiable files (comma separated list)
process_dependency_links
Enable the processing of dependency links
bin_env : None
Absolute path to a virtual environment directory or absolute path to
a pip executable. The example below assumes a virtual environment
has been created at ``/foo/.virtualenvs/bar``.
env_vars
Add or modify environment variables. Useful for tweaking build steps,
such as specifying INCLUDE or LIBRARY paths in Makefiles, build scripts or
compiler calls. This must be in the form of a dictionary or a mapping.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django_app
- env_vars:
CUSTOM_PATH: /opt/django_app
VERBOSE: True
use_vt
Use VT terminal emulation (see output while installing)
trusted_host
Mark this host as trusted, even though it does not have valid or any
HTTPS.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar
- require:
- pkg: python-pip
Or
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar/bin/pip
- require:
- pkg: python-pip
.. admonition:: Attention
The following arguments are deprecated, do not use.
pip_bin : None
Deprecated, use ``bin_env``
.. versionchanged:: 0.17.0
``use_wheel`` option added.
install_options
Extra arguments to be supplied to the setup.py install command.
If you are using an option with a directory path, be sure to use
absolute path.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django
- install_options:
- --prefix=/blah
- require:
- pkg: python-pip
global_options
Extra global options to be supplied to the setup.py call before the
install command.
.. versionadded:: 2014.1.3
.. admonition:: Attention
As of Salt 0.17.0 the pip state **needs** an importable pip module.
This usually means having the system's pip package installed or running
Salt from an active `virtualenv`_.
The reason for this requirement is because ``pip`` already does a
pretty good job parsing its own requirements. It makes no sense for
Salt to do ``pip`` requirements parsing and validation before passing
them to the ``pip`` library. It's functionality duplication and it's
more error prone.
.. admonition:: Attention
Please set ``reload_modules: True`` to have the salt minion
import this module after installation.
Example:
.. code-block:: yaml
pyopenssl:
pip.installed:
- name: pyOpenSSL
- reload_modules: True
- exists_action: i
extra_args
pip keyword and positional arguments not yet implemented in salt
.. code-block:: yaml
pandas:
pip.installed:
- name: pandas
- extra_args:
- --latest-pip-kwarg: param
- --latest-pip-arg
.. warning::
If unsupported options are passed here that are not supported in a
minion's version of pip, a `No such option error` will be thrown.
.. _`virtualenv`: http://www.virtualenv.org/en/latest/
'''
if pip_bin and not bin_env:
bin_env = pip_bin
# If pkgs is present, ignore name
if pkgs:
if not isinstance(pkgs, list):
return {'name': name,
'result': False,
'changes': {},
'comment': 'pkgs argument must be formatted as a list'}
else:
pkgs = [name]
# Assumption: If `pkg` is not an `string`, it's a `collections.OrderedDict`
# prepro = lambda pkg: pkg if type(pkg) == str else \
# ' '.join((pkg.items()[0][0], pkg.items()[0][1].replace(',', ';')))
# pkgs = ','.join([prepro(pkg) for pkg in pkgs])
prepro = lambda pkg: pkg if isinstance(pkg, six.string_types) else \
' '.join((six.iteritems(pkg)[0][0], six.iteritems(pkg)[0][1]))
pkgs = [prepro(pkg) for pkg in pkgs]
ret = {'name': ';'.join(pkgs), 'result': None,
'comment': '', 'changes': {}}
try:
cur_version = __salt__['pip.version'](bin_env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
ret['comment'] = 'Error installing \'{0}\': {1}'.format(name, err)
return ret
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_binary' option
if no_binary:
min_version = '7.0.0'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
ret['result'] = False
ret['comment'] = ('The \'no_binary\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Get the packages parsed name and version from the pip library.
# This only is done when there is no requirements or editable parameter.
pkgs_details = []
if pkgs and not (requirements or editable):
comments = []
for pkg in iter(pkgs):
out = _check_pkg_version_format(pkg)
if out['result'] is False:
ret['result'] = False
comments.append(out['comment'])
elif out['result'] is True:
pkgs_details.append((out['prefix'], pkg, out['version_spec']))
if ret['result'] is False:
ret['comment'] = '\n'.join(comments)
return ret
# If a requirements file is specified, only install the contents of the
# requirements file. Similarly, using the --editable flag with pip should
# also ignore the "name" and "pkgs" parameters.
target_pkgs = []
already_installed_comments = []
if requirements or editable:
comments = []
# Append comments if this is a dry run.
if __opts__['test']:
ret['result'] = None
if requirements:
# TODO: Check requirements file against currently-installed
# packages to provide more accurate state output.
comments.append('Requirements file \'{0}\' will be '
'processed.'.format(requirements))
if editable:
comments.append(
'Package will be installed in editable mode (i.e. '
'setuptools "develop mode") from {0}.'.format(editable)
)
ret['comment'] = ' '.join(comments)
return ret
# No requirements case.
# Check pre-existence of the requested packages.
else:
# Attempt to pre-cache a the current pip list
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
# If we fail, then just send False, and we'll try again in the next function call
except Exception as exc:
log.exception(exc)
pip_list = False
for prefix, state_pkg_name, version_spec in pkgs_details:
if prefix:
state_pkg_name = state_pkg_name
version_spec = version_spec
out = _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env, env_vars,
index_url, extra_index_url, pip_list,
**kwargs)
# If _check_if_installed result is None, something went wrong with
# the command running. This way we keep stateful output.
if out['result'] is None:
ret['result'] = False
ret['comment'] = out['comment']
return ret
else:
out = {'result': False, 'comment': None}
result = out['result']
# The package is not present. Add it to the pkgs to install.
if result is False:
# Replace commas (used for version ranges) with semicolons
# (which are not supported) in name so it does not treat
# them as multiple packages.
target_pkgs.append((prefix, state_pkg_name.replace(',', ';')))
# Append comments if this is a dry run.
if __opts__['test']:
msg = 'Python package {0} is set to be installed'
ret['result'] = None
ret['comment'] = msg.format(state_pkg_name)
return ret
# The package is already present and will not be reinstalled.
elif result is True:
# Append comment stating its presence
already_installed_comments.append(out['comment'])
# The command pip.list failed. Abort.
elif result is None:
ret['result'] = None
ret['comment'] = out['comment']
return ret
# No packages to install.
if not target_pkgs:
ret['result'] = True
aicomms = '\n'.join(already_installed_comments)
last_line = 'All specified packages are already installed' + (' and up-to-date' if upgrade else '')
ret['comment'] = aicomms + ('\n' if aicomms else '') + last_line
return ret
# Construct the string that will get passed to the install call
pkgs_str = ','.join([state_name for _, state_name in target_pkgs])
# Call to install the package. Actual installation takes place here
pip_install_call = __salt__['pip.install'](
pkgs='{0}'.format(pkgs_str) if pkgs_str else '',
requirements=requirements,
bin_env=bin_env,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
no_binary=no_binary,
log=log,
proxy=proxy,
timeout=timeout,
editable=editable,
find_links=find_links,
index_url=index_url,
extra_index_url=extra_index_url,
no_index=no_index,
mirrors=mirrors,
build=build,
target=target,
download=download,
download_cache=download_cache,
source=source,
upgrade=upgrade,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
exists_action=exists_action,
no_deps=no_deps,
no_install=no_install,
no_download=no_download,
install_options=install_options,
global_options=global_options,
user=user,
cwd=cwd,
pre_releases=pre_releases,
cert=cert,
allow_all_external=allow_all_external,
allow_external=allow_external,
allow_unverified=allow_unverified,
process_dependency_links=process_dependency_links,
saltenv=__env__,
env_vars=env_vars,
use_vt=use_vt,
trusted_host=trusted_host,
no_cache_dir=no_cache_dir,
extra_args=extra_args,
**kwargs
)
if pip_install_call and pip_install_call.get('retcode', 1) == 0:
ret['result'] = True
if requirements or editable:
comments = []
if requirements:
PIP_REQUIREMENTS_NOCHANGE = [
'Requirement already satisfied',
'Requirement already up-to-date',
'Requirement not upgraded',
'Collecting',
'Cloning',
'Cleaning up...',
]
for line in pip_install_call.get('stdout', '').split('\n'):
if not any(
[
line.strip().startswith(x)
for x in PIP_REQUIREMENTS_NOCHANGE
]
):
ret['changes']['requirements'] = True
if ret['changes'].get('requirements'):
comments.append('Successfully processed requirements file '
'{0}.'.format(requirements))
else:
comments.append('Requirements were already installed.')
if editable:
comments.append('Package successfully installed from VCS '
'checkout {0}.'.format(editable))
ret['changes']['editable'] = True
ret['comment'] = ' '.join(comments)
else:
# Check that the packages set to be installed were installed.
# Create comments reporting success and failures
pkg_404_comms = []
already_installed_packages = set()
for line in pip_install_call.get('stdout', '').split('\n'):
# Output for already installed packages:
# 'Requirement already up-to-date: jinja2 in /usr/local/lib/python2.7/dist-packages\nCleaning up...'
if line.startswith('Requirement already up-to-date: '):
package = line.split(':', 1)[1].split()[0]
already_installed_packages.add(package.lower())
for prefix, state_name in target_pkgs:
# Case for packages that are not an URL
if prefix:
pipsearch = salt.utils.data.CaseInsensitiveDict(
__salt__['pip.list'](prefix, bin_env,
user=user, cwd=cwd,
env_vars=env_vars,
**kwargs)
)
# If we didn't find the package in the system after
# installing it report it
if not pipsearch:
pkg_404_comms.append(
'There was no error installing package \'{0}\' '
'although it does not show when calling '
'\'pip.freeze\'.'.format(pkg)
)
else:
if prefix in pipsearch \
and prefix.lower() not in already_installed_packages:
ver = pipsearch[prefix]
ret['changes']['{0}=={1}'.format(prefix, ver)] = 'Installed'
# Case for packages that are an URL
else:
ret['changes']['{0}==???'.format(state_name)] = 'Installed'
# Set comments
aicomms = '\n'.join(already_installed_comments)
succ_comm = 'All packages were successfully installed'\
if not pkg_404_comms else '\n'.join(pkg_404_comms)
ret['comment'] = aicomms + ('\n' if aicomms else '') + succ_comm
return ret
elif pip_install_call:
ret['result'] = False
if 'stdout' in pip_install_call:
error = 'Error: {0} {1}'.format(pip_install_call['stdout'],
pip_install_call['stderr'])
else:
error = 'Error: {0}'.format(pip_install_call['comment'])
if requirements or editable:
comments = []
if requirements:
comments.append('Unable to process requirements file '
'"{0}".'.format(requirements))
if editable:
comments.append('Unable to install from VCS checkout'
'{0}.'.format(editable))
comments.append(error)
ret['comment'] = ' '.join(comments)
else:
pkgs_str = ', '.join([state_name for _, state_name in target_pkgs])
aicomms = '\n'.join(already_installed_comments)
error_comm = ('Failed to install packages: {0}. '
'{1}'.format(pkgs_str, error))
ret['comment'] = aicomms + ('\n' if aicomms else '') + error_comm
else:
ret['result'] = False
ret['comment'] = 'Could not install package'
return ret
def removed(name,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
user=None,
cwd=None,
use_vt=False):
'''
Make sure that a package is not installed.
name
The name of the package to uninstall
user
The user under which to run pip
bin_env : None
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error uninstalling \'{0}\': {1}'.format(name, err)
return ret
if name not in pip_list:
ret['result'] = True
ret['comment'] = 'Package is not installed.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0} is set to be removed'.format(name)
return ret
if __salt__['pip.uninstall'](pkgs=name,
requirements=requirements,
bin_env=bin_env,
log=log,
proxy=proxy,
timeout=timeout,
user=user,
cwd=cwd,
use_vt=use_vt):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package was successfully removed.'
else:
ret['result'] = False
ret['comment'] = 'Could not remove package.'
return ret
def uptodate(name,
bin_env=None,
user=None,
cwd=None,
use_vt=False):
'''
.. versionadded:: 2015.5.0
Verify that the system is completely up to date.
name
The name has no functional value and is only used as a tracking
reference
user
The user under which to run pip
bin_env
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': 'Failed to update.'}
try:
packages = __salt__['pip.list_upgrades'](bin_env=bin_env, user=user, cwd=cwd)
except Exception as e:
ret['comment'] = six.text_type(e)
return ret
if not packages:
ret['comment'] = 'System is already up-to-date.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'System update will be performed'
ret['result'] = None
return ret
updated = __salt__['pip.upgrade'](bin_env=bin_env, user=user, cwd=cwd, use_vt=use_vt)
if updated.get('result') is False:
ret.update(updated)
elif updated:
ret['changes'] = updated
ret['comment'] = 'Upgrade successful.'
ret['result'] = True
else:
ret['comment'] = 'Upgrade failed.'
return ret
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
'''
pkgs = []
pkg_type = None
agg_enabled = [
'installed',
'removed',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__['state.gen_tag'](chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get('state') == 'pip':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
# Check first if 'sources' was passed so we don't aggregate pkgs
# and sources together.
if pkg_type is None:
pkg_type = 'pkgs'
if pkg_type == 'pkgs':
# Pull out the pkg names!
if 'pkgs' in chunk:
pkgs.extend(chunk['pkgs'])
chunk['__agg__'] = True
elif 'name' in chunk:
version = chunk.pop('version', None)
if version is not None:
pkgs.append({chunk['name']: version})
else:
pkgs.append(chunk['name'])
chunk['__agg__'] = True
if pkg_type is not None and pkgs:
if pkg_type in low:
low[pkg_type].extend(pkgs)
else:
low[pkg_type] = pkgs
return low
|
saltstack/salt
|
salt/states/pip_state.py
|
_check_if_installed
|
python
|
def _check_if_installed(prefix,
state_pkg_name,
version_spec,
ignore_installed,
force_reinstall,
upgrade,
user,
cwd,
bin_env,
env_vars,
index_url,
extra_index_url,
pip_list=False,
**kwargs):
'''
Takes a package name and version specification (if any) and checks it is
installed
Keyword arguments include:
pip_list: optional dict of installed pip packages, and their versions,
to search through to check if the package is installed. If not
provided, one will be generated in this function by querying the
system.
Returns:
result: None means the command failed to run
result: True means the package is installed
result: False means the package is not installed
'''
ret = {'result': False, 'comment': None}
# If we are not passed a pip list, get one:
pip_list = salt.utils.data.CaseInsensitiveDict(
pip_list or __salt__['pip.list'](prefix, bin_env=bin_env,
user=user, cwd=cwd,
env_vars=env_vars, **kwargs)
)
# If the package was already installed, check
# the ignore_installed and force_reinstall flags
if ignore_installed is False and prefix in pip_list:
if force_reinstall is False and not upgrade:
# Check desired version (if any) against currently-installed
if (
any(version_spec) and
_fulfills_version_spec(pip_list[prefix], version_spec)
) or (not any(version_spec)):
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
if force_reinstall is False and upgrade:
# Check desired version (if any) against currently-installed
include_alpha = False
include_beta = False
include_rc = False
if any(version_spec):
for spec in version_spec:
if 'a' in spec[1]:
include_alpha = True
if 'b' in spec[1]:
include_beta = True
if 'rc' in spec[1]:
include_rc = True
available_versions = __salt__['pip.list_all_versions'](
prefix, bin_env=bin_env, include_alpha=include_alpha,
include_beta=include_beta, include_rc=include_rc, user=user,
cwd=cwd, index_url=index_url, extra_index_url=extra_index_url)
desired_version = ''
if any(version_spec):
for version in reversed(available_versions):
if _fulfills_version_spec(version, version_spec):
desired_version = version
break
else:
desired_version = available_versions[-1]
if not desired_version:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed and\nthe available upgrade '
'doesn\'t fulfills the version '
'requirements'.format(prefix))
return ret
if _pep440_version_cmp(pip_list[prefix], desired_version) == 0:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
return ret
|
Takes a package name and version specification (if any) and checks it is
installed
Keyword arguments include:
pip_list: optional dict of installed pip packages, and their versions,
to search through to check if the package is installed. If not
provided, one will be generated in this function by querying the
system.
Returns:
result: None means the command failed to run
result: True means the package is installed
result: False means the package is not installed
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pip_state.py#L184-L273
| null |
# -*- coding: utf-8 -*-
'''
Installation of Python Packages Using pip
=========================================
These states manage system installed python packages. Note that pip must be
installed for these states to be available, so pip states should include a
requisite to a pkg.installed state for the package which provides pip
(``python-pip`` in most cases). Example:
.. code-block:: yaml
python-pip:
pkg.installed
virtualenvwrapper:
pip.installed:
- require:
- pkg: python-pip
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import re
try:
import pkg_resources
HAS_PKG_RESOURCES = True
except ImportError:
HAS_PKG_RESOURCES = False
# Import salt libs
import salt.utils.data
import salt.utils.versions
from salt.exceptions import CommandExecutionError, CommandNotFoundError
# Import 3rd-party libs
from salt.ext import six
# pylint: disable=import-error
try:
import pip
HAS_PIP = True
except ImportError:
HAS_PIP = False
if HAS_PIP is True:
try:
from pip.req import InstallRequirement
_from_line = InstallRequirement.from_line
except ImportError:
# pip 10.0.0 move req module under pip._internal
try:
try:
from pip._internal.req import InstallRequirement
_from_line = InstallRequirement.from_line
except AttributeError:
from pip._internal.req.constructors import install_req_from_line as _from_line
except ImportError:
HAS_PIP = False
# Remove references to the loaded pip module above so reloading works
import sys
del pip
if 'pip' in sys.modules:
del sys.modules['pip']
try:
from pip.exceptions import InstallationError
except ImportError:
InstallationError = ValueError
# pylint: enable=import-error
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pip'
def __virtual__():
'''
Only load if the pip module is available in __salt__
'''
if HAS_PKG_RESOURCES is False:
return False, 'The pkg_resources python library is not installed'
if 'pip.list' in __salt__:
return __virtualname__
return False
def _fulfills_version_spec(version, version_spec):
'''
Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version.
'''
for oper, spec in version_spec:
if oper is None:
continue
if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec, cmp_func=_pep440_version_cmp):
return False
return True
def _check_pkg_version_format(pkg):
'''
Takes a package name and version specification (if any) and checks it using
the pip library.
'''
ret = {'result': False, 'comment': None,
'prefix': None, 'version_spec': None}
if not HAS_PIP:
ret['comment'] = (
'An importable Python 2 pip module is required but could not be '
'found on your system. This usually means that the system\'s pip '
'package is not installed properly.'
)
return ret
from_vcs = False
try:
# Get the requirement object from the pip library
try:
# With pip < 1.2, the __version__ attribute does not exist and
# vcs+URL urls are not properly parsed.
# The next line is meant to trigger an AttributeError and
# handle lower pip versions
log.debug('Installed pip version: %s', pip.__version__)
install_req = _from_line(pkg)
except AttributeError:
log.debug('Installed pip version is lower than 1.2')
supported_vcs = ('git', 'svn', 'hg', 'bzr')
if pkg.startswith(supported_vcs):
for vcs in supported_vcs:
if pkg.startswith(vcs):
from_vcs = True
install_req = _from_line(
pkg.split('{0}+'.format(vcs))[-1]
)
break
else:
install_req = _from_line(pkg)
except (ValueError, InstallationError) as exc:
ret['result'] = False
if not from_vcs and '=' in pkg and '==' not in pkg:
ret['comment'] = (
'Invalid version specification in package {0}. \'=\' is '
'not supported, use \'==\' instead.'.format(pkg)
)
return ret
ret['comment'] = (
'pip raised an exception while parsing \'{0}\': {1}'.format(
pkg, exc
)
)
return ret
if install_req.req is None:
# This is most likely an url and there's no way to know what will
# be installed before actually installing it.
ret['result'] = True
ret['prefix'] = ''
ret['version_spec'] = []
else:
ret['result'] = True
try:
ret['prefix'] = install_req.req.project_name
ret['version_spec'] = install_req.req.specs
except Exception:
ret['prefix'] = re.sub('[^A-Za-z0-9.]+', '-', install_req.name)
if hasattr(install_req, "specifier"):
specifier = install_req.specifier
else:
specifier = install_req.req.specifier
ret['version_spec'] = [(spec.operator, spec.version) for spec in specifier]
return ret
def _pep440_version_cmp(pkg1, pkg2, ignore_epoch=False):
'''
Compares two version strings using pkg_resources.parse_version.
Return -1 if version1 < version2, 0 if version1 ==version2,
and 1 if version1 > version2. Return None if there was a problem
making the comparison.
'''
normalize = lambda x: six.text_type(x).split('!', 1)[-1] \
if ignore_epoch else six.text_type(x)
pkg1 = normalize(pkg1)
pkg2 = normalize(pkg2)
try:
if pkg_resources.parse_version(pkg1) < pkg_resources.parse_version(pkg2):
return -1
if pkg_resources.parse_version(pkg1) == pkg_resources.parse_version(pkg2):
return 0
if pkg_resources.parse_version(pkg1) > pkg_resources.parse_version(pkg2):
return 1
except Exception as exc:
log.exception(exc)
return None
def installed(name,
pkgs=None,
pip_bin=None,
requirements=None,
bin_env=None,
use_wheel=False,
no_use_wheel=False,
log=None,
proxy=None,
timeout=None,
repo=None,
editable=None,
find_links=None,
index_url=None,
extra_index_url=None,
no_index=False,
mirrors=None,
build=None,
target=None,
download=None,
download_cache=None,
source=None,
upgrade=False,
force_reinstall=False,
ignore_installed=False,
exists_action=None,
no_deps=False,
no_install=False,
no_download=False,
install_options=None,
global_options=None,
user=None,
cwd=None,
pre_releases=False,
cert=None,
allow_all_external=False,
allow_external=None,
allow_unverified=None,
process_dependency_links=False,
env_vars=None,
use_vt=False,
trusted_host=None,
no_cache_dir=False,
cache_dir=None,
no_binary=None,
extra_args=None,
**kwargs):
'''
Make sure the package is installed
name
The name of the python package to install. You can also specify version
numbers here using the standard operators ``==, >=, <=``. If
``requirements`` is given, this parameter will be ignored.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- require:
- pkg: python-pip
This will install the latest Django version greater than 1.6 but less
than 1.7.
requirements
Path to a pip requirements file. If the path begins with salt://
the file will be transferred from the master file server.
user
The user under which to run pip
use_wheel : False
Prefer wheel archives (requires pip>=1.4)
no_use_wheel : False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
Example:
.. code-block:: yaml
django:
pip.installed:
- no_binary: ':all:'
flask:
pip.installed:
- no_binary:
- itsdangerous
- click
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable (i.e.
git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed)
find_links
URL to look for packages at
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URL(s) to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already
up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
exists_action
Default action when a path already exists: (s)witch, (i)gnore, (w)ipe,
(b)ackup
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_cache_dir:
Disable the cache.
cwd
Current working directory to run pip from
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
allow_all_external
Allow the installation of all externally hosted files
allow_external
Allow the installation of externally hosted files (comma separated list)
allow_unverified
Allow the installation of insecure and unverifiable files (comma separated list)
process_dependency_links
Enable the processing of dependency links
bin_env : None
Absolute path to a virtual environment directory or absolute path to
a pip executable. The example below assumes a virtual environment
has been created at ``/foo/.virtualenvs/bar``.
env_vars
Add or modify environment variables. Useful for tweaking build steps,
such as specifying INCLUDE or LIBRARY paths in Makefiles, build scripts or
compiler calls. This must be in the form of a dictionary or a mapping.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django_app
- env_vars:
CUSTOM_PATH: /opt/django_app
VERBOSE: True
use_vt
Use VT terminal emulation (see output while installing)
trusted_host
Mark this host as trusted, even though it does not have valid or any
HTTPS.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar
- require:
- pkg: python-pip
Or
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar/bin/pip
- require:
- pkg: python-pip
.. admonition:: Attention
The following arguments are deprecated, do not use.
pip_bin : None
Deprecated, use ``bin_env``
.. versionchanged:: 0.17.0
``use_wheel`` option added.
install_options
Extra arguments to be supplied to the setup.py install command.
If you are using an option with a directory path, be sure to use
absolute path.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django
- install_options:
- --prefix=/blah
- require:
- pkg: python-pip
global_options
Extra global options to be supplied to the setup.py call before the
install command.
.. versionadded:: 2014.1.3
.. admonition:: Attention
As of Salt 0.17.0 the pip state **needs** an importable pip module.
This usually means having the system's pip package installed or running
Salt from an active `virtualenv`_.
The reason for this requirement is because ``pip`` already does a
pretty good job parsing its own requirements. It makes no sense for
Salt to do ``pip`` requirements parsing and validation before passing
them to the ``pip`` library. It's functionality duplication and it's
more error prone.
.. admonition:: Attention
Please set ``reload_modules: True`` to have the salt minion
import this module after installation.
Example:
.. code-block:: yaml
pyopenssl:
pip.installed:
- name: pyOpenSSL
- reload_modules: True
- exists_action: i
extra_args
pip keyword and positional arguments not yet implemented in salt
.. code-block:: yaml
pandas:
pip.installed:
- name: pandas
- extra_args:
- --latest-pip-kwarg: param
- --latest-pip-arg
.. warning::
If unsupported options are passed here that are not supported in a
minion's version of pip, a `No such option error` will be thrown.
.. _`virtualenv`: http://www.virtualenv.org/en/latest/
'''
if pip_bin and not bin_env:
bin_env = pip_bin
# If pkgs is present, ignore name
if pkgs:
if not isinstance(pkgs, list):
return {'name': name,
'result': False,
'changes': {},
'comment': 'pkgs argument must be formatted as a list'}
else:
pkgs = [name]
# Assumption: If `pkg` is not an `string`, it's a `collections.OrderedDict`
# prepro = lambda pkg: pkg if type(pkg) == str else \
# ' '.join((pkg.items()[0][0], pkg.items()[0][1].replace(',', ';')))
# pkgs = ','.join([prepro(pkg) for pkg in pkgs])
prepro = lambda pkg: pkg if isinstance(pkg, six.string_types) else \
' '.join((six.iteritems(pkg)[0][0], six.iteritems(pkg)[0][1]))
pkgs = [prepro(pkg) for pkg in pkgs]
ret = {'name': ';'.join(pkgs), 'result': None,
'comment': '', 'changes': {}}
try:
cur_version = __salt__['pip.version'](bin_env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
ret['comment'] = 'Error installing \'{0}\': {1}'.format(name, err)
return ret
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_binary' option
if no_binary:
min_version = '7.0.0'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
ret['result'] = False
ret['comment'] = ('The \'no_binary\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Get the packages parsed name and version from the pip library.
# This only is done when there is no requirements or editable parameter.
pkgs_details = []
if pkgs and not (requirements or editable):
comments = []
for pkg in iter(pkgs):
out = _check_pkg_version_format(pkg)
if out['result'] is False:
ret['result'] = False
comments.append(out['comment'])
elif out['result'] is True:
pkgs_details.append((out['prefix'], pkg, out['version_spec']))
if ret['result'] is False:
ret['comment'] = '\n'.join(comments)
return ret
# If a requirements file is specified, only install the contents of the
# requirements file. Similarly, using the --editable flag with pip should
# also ignore the "name" and "pkgs" parameters.
target_pkgs = []
already_installed_comments = []
if requirements or editable:
comments = []
# Append comments if this is a dry run.
if __opts__['test']:
ret['result'] = None
if requirements:
# TODO: Check requirements file against currently-installed
# packages to provide more accurate state output.
comments.append('Requirements file \'{0}\' will be '
'processed.'.format(requirements))
if editable:
comments.append(
'Package will be installed in editable mode (i.e. '
'setuptools "develop mode") from {0}.'.format(editable)
)
ret['comment'] = ' '.join(comments)
return ret
# No requirements case.
# Check pre-existence of the requested packages.
else:
# Attempt to pre-cache a the current pip list
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
# If we fail, then just send False, and we'll try again in the next function call
except Exception as exc:
log.exception(exc)
pip_list = False
for prefix, state_pkg_name, version_spec in pkgs_details:
if prefix:
state_pkg_name = state_pkg_name
version_spec = version_spec
out = _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env, env_vars,
index_url, extra_index_url, pip_list,
**kwargs)
# If _check_if_installed result is None, something went wrong with
# the command running. This way we keep stateful output.
if out['result'] is None:
ret['result'] = False
ret['comment'] = out['comment']
return ret
else:
out = {'result': False, 'comment': None}
result = out['result']
# The package is not present. Add it to the pkgs to install.
if result is False:
# Replace commas (used for version ranges) with semicolons
# (which are not supported) in name so it does not treat
# them as multiple packages.
target_pkgs.append((prefix, state_pkg_name.replace(',', ';')))
# Append comments if this is a dry run.
if __opts__['test']:
msg = 'Python package {0} is set to be installed'
ret['result'] = None
ret['comment'] = msg.format(state_pkg_name)
return ret
# The package is already present and will not be reinstalled.
elif result is True:
# Append comment stating its presence
already_installed_comments.append(out['comment'])
# The command pip.list failed. Abort.
elif result is None:
ret['result'] = None
ret['comment'] = out['comment']
return ret
# No packages to install.
if not target_pkgs:
ret['result'] = True
aicomms = '\n'.join(already_installed_comments)
last_line = 'All specified packages are already installed' + (' and up-to-date' if upgrade else '')
ret['comment'] = aicomms + ('\n' if aicomms else '') + last_line
return ret
# Construct the string that will get passed to the install call
pkgs_str = ','.join([state_name for _, state_name in target_pkgs])
# Call to install the package. Actual installation takes place here
pip_install_call = __salt__['pip.install'](
pkgs='{0}'.format(pkgs_str) if pkgs_str else '',
requirements=requirements,
bin_env=bin_env,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
no_binary=no_binary,
log=log,
proxy=proxy,
timeout=timeout,
editable=editable,
find_links=find_links,
index_url=index_url,
extra_index_url=extra_index_url,
no_index=no_index,
mirrors=mirrors,
build=build,
target=target,
download=download,
download_cache=download_cache,
source=source,
upgrade=upgrade,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
exists_action=exists_action,
no_deps=no_deps,
no_install=no_install,
no_download=no_download,
install_options=install_options,
global_options=global_options,
user=user,
cwd=cwd,
pre_releases=pre_releases,
cert=cert,
allow_all_external=allow_all_external,
allow_external=allow_external,
allow_unverified=allow_unverified,
process_dependency_links=process_dependency_links,
saltenv=__env__,
env_vars=env_vars,
use_vt=use_vt,
trusted_host=trusted_host,
no_cache_dir=no_cache_dir,
extra_args=extra_args,
**kwargs
)
if pip_install_call and pip_install_call.get('retcode', 1) == 0:
ret['result'] = True
if requirements or editable:
comments = []
if requirements:
PIP_REQUIREMENTS_NOCHANGE = [
'Requirement already satisfied',
'Requirement already up-to-date',
'Requirement not upgraded',
'Collecting',
'Cloning',
'Cleaning up...',
]
for line in pip_install_call.get('stdout', '').split('\n'):
if not any(
[
line.strip().startswith(x)
for x in PIP_REQUIREMENTS_NOCHANGE
]
):
ret['changes']['requirements'] = True
if ret['changes'].get('requirements'):
comments.append('Successfully processed requirements file '
'{0}.'.format(requirements))
else:
comments.append('Requirements were already installed.')
if editable:
comments.append('Package successfully installed from VCS '
'checkout {0}.'.format(editable))
ret['changes']['editable'] = True
ret['comment'] = ' '.join(comments)
else:
# Check that the packages set to be installed were installed.
# Create comments reporting success and failures
pkg_404_comms = []
already_installed_packages = set()
for line in pip_install_call.get('stdout', '').split('\n'):
# Output for already installed packages:
# 'Requirement already up-to-date: jinja2 in /usr/local/lib/python2.7/dist-packages\nCleaning up...'
if line.startswith('Requirement already up-to-date: '):
package = line.split(':', 1)[1].split()[0]
already_installed_packages.add(package.lower())
for prefix, state_name in target_pkgs:
# Case for packages that are not an URL
if prefix:
pipsearch = salt.utils.data.CaseInsensitiveDict(
__salt__['pip.list'](prefix, bin_env,
user=user, cwd=cwd,
env_vars=env_vars,
**kwargs)
)
# If we didn't find the package in the system after
# installing it report it
if not pipsearch:
pkg_404_comms.append(
'There was no error installing package \'{0}\' '
'although it does not show when calling '
'\'pip.freeze\'.'.format(pkg)
)
else:
if prefix in pipsearch \
and prefix.lower() not in already_installed_packages:
ver = pipsearch[prefix]
ret['changes']['{0}=={1}'.format(prefix, ver)] = 'Installed'
# Case for packages that are an URL
else:
ret['changes']['{0}==???'.format(state_name)] = 'Installed'
# Set comments
aicomms = '\n'.join(already_installed_comments)
succ_comm = 'All packages were successfully installed'\
if not pkg_404_comms else '\n'.join(pkg_404_comms)
ret['comment'] = aicomms + ('\n' if aicomms else '') + succ_comm
return ret
elif pip_install_call:
ret['result'] = False
if 'stdout' in pip_install_call:
error = 'Error: {0} {1}'.format(pip_install_call['stdout'],
pip_install_call['stderr'])
else:
error = 'Error: {0}'.format(pip_install_call['comment'])
if requirements or editable:
comments = []
if requirements:
comments.append('Unable to process requirements file '
'"{0}".'.format(requirements))
if editable:
comments.append('Unable to install from VCS checkout'
'{0}.'.format(editable))
comments.append(error)
ret['comment'] = ' '.join(comments)
else:
pkgs_str = ', '.join([state_name for _, state_name in target_pkgs])
aicomms = '\n'.join(already_installed_comments)
error_comm = ('Failed to install packages: {0}. '
'{1}'.format(pkgs_str, error))
ret['comment'] = aicomms + ('\n' if aicomms else '') + error_comm
else:
ret['result'] = False
ret['comment'] = 'Could not install package'
return ret
def removed(name,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
user=None,
cwd=None,
use_vt=False):
'''
Make sure that a package is not installed.
name
The name of the package to uninstall
user
The user under which to run pip
bin_env : None
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error uninstalling \'{0}\': {1}'.format(name, err)
return ret
if name not in pip_list:
ret['result'] = True
ret['comment'] = 'Package is not installed.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0} is set to be removed'.format(name)
return ret
if __salt__['pip.uninstall'](pkgs=name,
requirements=requirements,
bin_env=bin_env,
log=log,
proxy=proxy,
timeout=timeout,
user=user,
cwd=cwd,
use_vt=use_vt):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package was successfully removed.'
else:
ret['result'] = False
ret['comment'] = 'Could not remove package.'
return ret
def uptodate(name,
bin_env=None,
user=None,
cwd=None,
use_vt=False):
'''
.. versionadded:: 2015.5.0
Verify that the system is completely up to date.
name
The name has no functional value and is only used as a tracking
reference
user
The user under which to run pip
bin_env
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': 'Failed to update.'}
try:
packages = __salt__['pip.list_upgrades'](bin_env=bin_env, user=user, cwd=cwd)
except Exception as e:
ret['comment'] = six.text_type(e)
return ret
if not packages:
ret['comment'] = 'System is already up-to-date.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'System update will be performed'
ret['result'] = None
return ret
updated = __salt__['pip.upgrade'](bin_env=bin_env, user=user, cwd=cwd, use_vt=use_vt)
if updated.get('result') is False:
ret.update(updated)
elif updated:
ret['changes'] = updated
ret['comment'] = 'Upgrade successful.'
ret['result'] = True
else:
ret['comment'] = 'Upgrade failed.'
return ret
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
'''
pkgs = []
pkg_type = None
agg_enabled = [
'installed',
'removed',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__['state.gen_tag'](chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get('state') == 'pip':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
# Check first if 'sources' was passed so we don't aggregate pkgs
# and sources together.
if pkg_type is None:
pkg_type = 'pkgs'
if pkg_type == 'pkgs':
# Pull out the pkg names!
if 'pkgs' in chunk:
pkgs.extend(chunk['pkgs'])
chunk['__agg__'] = True
elif 'name' in chunk:
version = chunk.pop('version', None)
if version is not None:
pkgs.append({chunk['name']: version})
else:
pkgs.append(chunk['name'])
chunk['__agg__'] = True
if pkg_type is not None and pkgs:
if pkg_type in low:
low[pkg_type].extend(pkgs)
else:
low[pkg_type] = pkgs
return low
|
saltstack/salt
|
salt/states/pip_state.py
|
_pep440_version_cmp
|
python
|
def _pep440_version_cmp(pkg1, pkg2, ignore_epoch=False):
'''
Compares two version strings using pkg_resources.parse_version.
Return -1 if version1 < version2, 0 if version1 ==version2,
and 1 if version1 > version2. Return None if there was a problem
making the comparison.
'''
normalize = lambda x: six.text_type(x).split('!', 1)[-1] \
if ignore_epoch else six.text_type(x)
pkg1 = normalize(pkg1)
pkg2 = normalize(pkg2)
try:
if pkg_resources.parse_version(pkg1) < pkg_resources.parse_version(pkg2):
return -1
if pkg_resources.parse_version(pkg1) == pkg_resources.parse_version(pkg2):
return 0
if pkg_resources.parse_version(pkg1) > pkg_resources.parse_version(pkg2):
return 1
except Exception as exc:
log.exception(exc)
return None
|
Compares two version strings using pkg_resources.parse_version.
Return -1 if version1 < version2, 0 if version1 ==version2,
and 1 if version1 > version2. Return None if there was a problem
making the comparison.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pip_state.py#L276-L297
| null |
# -*- coding: utf-8 -*-
'''
Installation of Python Packages Using pip
=========================================
These states manage system installed python packages. Note that pip must be
installed for these states to be available, so pip states should include a
requisite to a pkg.installed state for the package which provides pip
(``python-pip`` in most cases). Example:
.. code-block:: yaml
python-pip:
pkg.installed
virtualenvwrapper:
pip.installed:
- require:
- pkg: python-pip
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import re
try:
import pkg_resources
HAS_PKG_RESOURCES = True
except ImportError:
HAS_PKG_RESOURCES = False
# Import salt libs
import salt.utils.data
import salt.utils.versions
from salt.exceptions import CommandExecutionError, CommandNotFoundError
# Import 3rd-party libs
from salt.ext import six
# pylint: disable=import-error
try:
import pip
HAS_PIP = True
except ImportError:
HAS_PIP = False
if HAS_PIP is True:
try:
from pip.req import InstallRequirement
_from_line = InstallRequirement.from_line
except ImportError:
# pip 10.0.0 move req module under pip._internal
try:
try:
from pip._internal.req import InstallRequirement
_from_line = InstallRequirement.from_line
except AttributeError:
from pip._internal.req.constructors import install_req_from_line as _from_line
except ImportError:
HAS_PIP = False
# Remove references to the loaded pip module above so reloading works
import sys
del pip
if 'pip' in sys.modules:
del sys.modules['pip']
try:
from pip.exceptions import InstallationError
except ImportError:
InstallationError = ValueError
# pylint: enable=import-error
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pip'
def __virtual__():
'''
Only load if the pip module is available in __salt__
'''
if HAS_PKG_RESOURCES is False:
return False, 'The pkg_resources python library is not installed'
if 'pip.list' in __salt__:
return __virtualname__
return False
def _fulfills_version_spec(version, version_spec):
'''
Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version.
'''
for oper, spec in version_spec:
if oper is None:
continue
if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec, cmp_func=_pep440_version_cmp):
return False
return True
def _check_pkg_version_format(pkg):
'''
Takes a package name and version specification (if any) and checks it using
the pip library.
'''
ret = {'result': False, 'comment': None,
'prefix': None, 'version_spec': None}
if not HAS_PIP:
ret['comment'] = (
'An importable Python 2 pip module is required but could not be '
'found on your system. This usually means that the system\'s pip '
'package is not installed properly.'
)
return ret
from_vcs = False
try:
# Get the requirement object from the pip library
try:
# With pip < 1.2, the __version__ attribute does not exist and
# vcs+URL urls are not properly parsed.
# The next line is meant to trigger an AttributeError and
# handle lower pip versions
log.debug('Installed pip version: %s', pip.__version__)
install_req = _from_line(pkg)
except AttributeError:
log.debug('Installed pip version is lower than 1.2')
supported_vcs = ('git', 'svn', 'hg', 'bzr')
if pkg.startswith(supported_vcs):
for vcs in supported_vcs:
if pkg.startswith(vcs):
from_vcs = True
install_req = _from_line(
pkg.split('{0}+'.format(vcs))[-1]
)
break
else:
install_req = _from_line(pkg)
except (ValueError, InstallationError) as exc:
ret['result'] = False
if not from_vcs and '=' in pkg and '==' not in pkg:
ret['comment'] = (
'Invalid version specification in package {0}. \'=\' is '
'not supported, use \'==\' instead.'.format(pkg)
)
return ret
ret['comment'] = (
'pip raised an exception while parsing \'{0}\': {1}'.format(
pkg, exc
)
)
return ret
if install_req.req is None:
# This is most likely an url and there's no way to know what will
# be installed before actually installing it.
ret['result'] = True
ret['prefix'] = ''
ret['version_spec'] = []
else:
ret['result'] = True
try:
ret['prefix'] = install_req.req.project_name
ret['version_spec'] = install_req.req.specs
except Exception:
ret['prefix'] = re.sub('[^A-Za-z0-9.]+', '-', install_req.name)
if hasattr(install_req, "specifier"):
specifier = install_req.specifier
else:
specifier = install_req.req.specifier
ret['version_spec'] = [(spec.operator, spec.version) for spec in specifier]
return ret
def _check_if_installed(prefix,
state_pkg_name,
version_spec,
ignore_installed,
force_reinstall,
upgrade,
user,
cwd,
bin_env,
env_vars,
index_url,
extra_index_url,
pip_list=False,
**kwargs):
'''
Takes a package name and version specification (if any) and checks it is
installed
Keyword arguments include:
pip_list: optional dict of installed pip packages, and their versions,
to search through to check if the package is installed. If not
provided, one will be generated in this function by querying the
system.
Returns:
result: None means the command failed to run
result: True means the package is installed
result: False means the package is not installed
'''
ret = {'result': False, 'comment': None}
# If we are not passed a pip list, get one:
pip_list = salt.utils.data.CaseInsensitiveDict(
pip_list or __salt__['pip.list'](prefix, bin_env=bin_env,
user=user, cwd=cwd,
env_vars=env_vars, **kwargs)
)
# If the package was already installed, check
# the ignore_installed and force_reinstall flags
if ignore_installed is False and prefix in pip_list:
if force_reinstall is False and not upgrade:
# Check desired version (if any) against currently-installed
if (
any(version_spec) and
_fulfills_version_spec(pip_list[prefix], version_spec)
) or (not any(version_spec)):
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
if force_reinstall is False and upgrade:
# Check desired version (if any) against currently-installed
include_alpha = False
include_beta = False
include_rc = False
if any(version_spec):
for spec in version_spec:
if 'a' in spec[1]:
include_alpha = True
if 'b' in spec[1]:
include_beta = True
if 'rc' in spec[1]:
include_rc = True
available_versions = __salt__['pip.list_all_versions'](
prefix, bin_env=bin_env, include_alpha=include_alpha,
include_beta=include_beta, include_rc=include_rc, user=user,
cwd=cwd, index_url=index_url, extra_index_url=extra_index_url)
desired_version = ''
if any(version_spec):
for version in reversed(available_versions):
if _fulfills_version_spec(version, version_spec):
desired_version = version
break
else:
desired_version = available_versions[-1]
if not desired_version:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed and\nthe available upgrade '
'doesn\'t fulfills the version '
'requirements'.format(prefix))
return ret
if _pep440_version_cmp(pip_list[prefix], desired_version) == 0:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
return ret
def installed(name,
pkgs=None,
pip_bin=None,
requirements=None,
bin_env=None,
use_wheel=False,
no_use_wheel=False,
log=None,
proxy=None,
timeout=None,
repo=None,
editable=None,
find_links=None,
index_url=None,
extra_index_url=None,
no_index=False,
mirrors=None,
build=None,
target=None,
download=None,
download_cache=None,
source=None,
upgrade=False,
force_reinstall=False,
ignore_installed=False,
exists_action=None,
no_deps=False,
no_install=False,
no_download=False,
install_options=None,
global_options=None,
user=None,
cwd=None,
pre_releases=False,
cert=None,
allow_all_external=False,
allow_external=None,
allow_unverified=None,
process_dependency_links=False,
env_vars=None,
use_vt=False,
trusted_host=None,
no_cache_dir=False,
cache_dir=None,
no_binary=None,
extra_args=None,
**kwargs):
'''
Make sure the package is installed
name
The name of the python package to install. You can also specify version
numbers here using the standard operators ``==, >=, <=``. If
``requirements`` is given, this parameter will be ignored.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- require:
- pkg: python-pip
This will install the latest Django version greater than 1.6 but less
than 1.7.
requirements
Path to a pip requirements file. If the path begins with salt://
the file will be transferred from the master file server.
user
The user under which to run pip
use_wheel : False
Prefer wheel archives (requires pip>=1.4)
no_use_wheel : False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
Example:
.. code-block:: yaml
django:
pip.installed:
- no_binary: ':all:'
flask:
pip.installed:
- no_binary:
- itsdangerous
- click
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable (i.e.
git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed)
find_links
URL to look for packages at
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URL(s) to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already
up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
exists_action
Default action when a path already exists: (s)witch, (i)gnore, (w)ipe,
(b)ackup
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_cache_dir:
Disable the cache.
cwd
Current working directory to run pip from
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
allow_all_external
Allow the installation of all externally hosted files
allow_external
Allow the installation of externally hosted files (comma separated list)
allow_unverified
Allow the installation of insecure and unverifiable files (comma separated list)
process_dependency_links
Enable the processing of dependency links
bin_env : None
Absolute path to a virtual environment directory or absolute path to
a pip executable. The example below assumes a virtual environment
has been created at ``/foo/.virtualenvs/bar``.
env_vars
Add or modify environment variables. Useful for tweaking build steps,
such as specifying INCLUDE or LIBRARY paths in Makefiles, build scripts or
compiler calls. This must be in the form of a dictionary or a mapping.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django_app
- env_vars:
CUSTOM_PATH: /opt/django_app
VERBOSE: True
use_vt
Use VT terminal emulation (see output while installing)
trusted_host
Mark this host as trusted, even though it does not have valid or any
HTTPS.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar
- require:
- pkg: python-pip
Or
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar/bin/pip
- require:
- pkg: python-pip
.. admonition:: Attention
The following arguments are deprecated, do not use.
pip_bin : None
Deprecated, use ``bin_env``
.. versionchanged:: 0.17.0
``use_wheel`` option added.
install_options
Extra arguments to be supplied to the setup.py install command.
If you are using an option with a directory path, be sure to use
absolute path.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django
- install_options:
- --prefix=/blah
- require:
- pkg: python-pip
global_options
Extra global options to be supplied to the setup.py call before the
install command.
.. versionadded:: 2014.1.3
.. admonition:: Attention
As of Salt 0.17.0 the pip state **needs** an importable pip module.
This usually means having the system's pip package installed or running
Salt from an active `virtualenv`_.
The reason for this requirement is because ``pip`` already does a
pretty good job parsing its own requirements. It makes no sense for
Salt to do ``pip`` requirements parsing and validation before passing
them to the ``pip`` library. It's functionality duplication and it's
more error prone.
.. admonition:: Attention
Please set ``reload_modules: True`` to have the salt minion
import this module after installation.
Example:
.. code-block:: yaml
pyopenssl:
pip.installed:
- name: pyOpenSSL
- reload_modules: True
- exists_action: i
extra_args
pip keyword and positional arguments not yet implemented in salt
.. code-block:: yaml
pandas:
pip.installed:
- name: pandas
- extra_args:
- --latest-pip-kwarg: param
- --latest-pip-arg
.. warning::
If unsupported options are passed here that are not supported in a
minion's version of pip, a `No such option error` will be thrown.
.. _`virtualenv`: http://www.virtualenv.org/en/latest/
'''
if pip_bin and not bin_env:
bin_env = pip_bin
# If pkgs is present, ignore name
if pkgs:
if not isinstance(pkgs, list):
return {'name': name,
'result': False,
'changes': {},
'comment': 'pkgs argument must be formatted as a list'}
else:
pkgs = [name]
# Assumption: If `pkg` is not an `string`, it's a `collections.OrderedDict`
# prepro = lambda pkg: pkg if type(pkg) == str else \
# ' '.join((pkg.items()[0][0], pkg.items()[0][1].replace(',', ';')))
# pkgs = ','.join([prepro(pkg) for pkg in pkgs])
prepro = lambda pkg: pkg if isinstance(pkg, six.string_types) else \
' '.join((six.iteritems(pkg)[0][0], six.iteritems(pkg)[0][1]))
pkgs = [prepro(pkg) for pkg in pkgs]
ret = {'name': ';'.join(pkgs), 'result': None,
'comment': '', 'changes': {}}
try:
cur_version = __salt__['pip.version'](bin_env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
ret['comment'] = 'Error installing \'{0}\': {1}'.format(name, err)
return ret
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_binary' option
if no_binary:
min_version = '7.0.0'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
ret['result'] = False
ret['comment'] = ('The \'no_binary\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Get the packages parsed name and version from the pip library.
# This only is done when there is no requirements or editable parameter.
pkgs_details = []
if pkgs and not (requirements or editable):
comments = []
for pkg in iter(pkgs):
out = _check_pkg_version_format(pkg)
if out['result'] is False:
ret['result'] = False
comments.append(out['comment'])
elif out['result'] is True:
pkgs_details.append((out['prefix'], pkg, out['version_spec']))
if ret['result'] is False:
ret['comment'] = '\n'.join(comments)
return ret
# If a requirements file is specified, only install the contents of the
# requirements file. Similarly, using the --editable flag with pip should
# also ignore the "name" and "pkgs" parameters.
target_pkgs = []
already_installed_comments = []
if requirements or editable:
comments = []
# Append comments if this is a dry run.
if __opts__['test']:
ret['result'] = None
if requirements:
# TODO: Check requirements file against currently-installed
# packages to provide more accurate state output.
comments.append('Requirements file \'{0}\' will be '
'processed.'.format(requirements))
if editable:
comments.append(
'Package will be installed in editable mode (i.e. '
'setuptools "develop mode") from {0}.'.format(editable)
)
ret['comment'] = ' '.join(comments)
return ret
# No requirements case.
# Check pre-existence of the requested packages.
else:
# Attempt to pre-cache a the current pip list
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
# If we fail, then just send False, and we'll try again in the next function call
except Exception as exc:
log.exception(exc)
pip_list = False
for prefix, state_pkg_name, version_spec in pkgs_details:
if prefix:
state_pkg_name = state_pkg_name
version_spec = version_spec
out = _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env, env_vars,
index_url, extra_index_url, pip_list,
**kwargs)
# If _check_if_installed result is None, something went wrong with
# the command running. This way we keep stateful output.
if out['result'] is None:
ret['result'] = False
ret['comment'] = out['comment']
return ret
else:
out = {'result': False, 'comment': None}
result = out['result']
# The package is not present. Add it to the pkgs to install.
if result is False:
# Replace commas (used for version ranges) with semicolons
# (which are not supported) in name so it does not treat
# them as multiple packages.
target_pkgs.append((prefix, state_pkg_name.replace(',', ';')))
# Append comments if this is a dry run.
if __opts__['test']:
msg = 'Python package {0} is set to be installed'
ret['result'] = None
ret['comment'] = msg.format(state_pkg_name)
return ret
# The package is already present and will not be reinstalled.
elif result is True:
# Append comment stating its presence
already_installed_comments.append(out['comment'])
# The command pip.list failed. Abort.
elif result is None:
ret['result'] = None
ret['comment'] = out['comment']
return ret
# No packages to install.
if not target_pkgs:
ret['result'] = True
aicomms = '\n'.join(already_installed_comments)
last_line = 'All specified packages are already installed' + (' and up-to-date' if upgrade else '')
ret['comment'] = aicomms + ('\n' if aicomms else '') + last_line
return ret
# Construct the string that will get passed to the install call
pkgs_str = ','.join([state_name for _, state_name in target_pkgs])
# Call to install the package. Actual installation takes place here
pip_install_call = __salt__['pip.install'](
pkgs='{0}'.format(pkgs_str) if pkgs_str else '',
requirements=requirements,
bin_env=bin_env,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
no_binary=no_binary,
log=log,
proxy=proxy,
timeout=timeout,
editable=editable,
find_links=find_links,
index_url=index_url,
extra_index_url=extra_index_url,
no_index=no_index,
mirrors=mirrors,
build=build,
target=target,
download=download,
download_cache=download_cache,
source=source,
upgrade=upgrade,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
exists_action=exists_action,
no_deps=no_deps,
no_install=no_install,
no_download=no_download,
install_options=install_options,
global_options=global_options,
user=user,
cwd=cwd,
pre_releases=pre_releases,
cert=cert,
allow_all_external=allow_all_external,
allow_external=allow_external,
allow_unverified=allow_unverified,
process_dependency_links=process_dependency_links,
saltenv=__env__,
env_vars=env_vars,
use_vt=use_vt,
trusted_host=trusted_host,
no_cache_dir=no_cache_dir,
extra_args=extra_args,
**kwargs
)
if pip_install_call and pip_install_call.get('retcode', 1) == 0:
ret['result'] = True
if requirements or editable:
comments = []
if requirements:
PIP_REQUIREMENTS_NOCHANGE = [
'Requirement already satisfied',
'Requirement already up-to-date',
'Requirement not upgraded',
'Collecting',
'Cloning',
'Cleaning up...',
]
for line in pip_install_call.get('stdout', '').split('\n'):
if not any(
[
line.strip().startswith(x)
for x in PIP_REQUIREMENTS_NOCHANGE
]
):
ret['changes']['requirements'] = True
if ret['changes'].get('requirements'):
comments.append('Successfully processed requirements file '
'{0}.'.format(requirements))
else:
comments.append('Requirements were already installed.')
if editable:
comments.append('Package successfully installed from VCS '
'checkout {0}.'.format(editable))
ret['changes']['editable'] = True
ret['comment'] = ' '.join(comments)
else:
# Check that the packages set to be installed were installed.
# Create comments reporting success and failures
pkg_404_comms = []
already_installed_packages = set()
for line in pip_install_call.get('stdout', '').split('\n'):
# Output for already installed packages:
# 'Requirement already up-to-date: jinja2 in /usr/local/lib/python2.7/dist-packages\nCleaning up...'
if line.startswith('Requirement already up-to-date: '):
package = line.split(':', 1)[1].split()[0]
already_installed_packages.add(package.lower())
for prefix, state_name in target_pkgs:
# Case for packages that are not an URL
if prefix:
pipsearch = salt.utils.data.CaseInsensitiveDict(
__salt__['pip.list'](prefix, bin_env,
user=user, cwd=cwd,
env_vars=env_vars,
**kwargs)
)
# If we didn't find the package in the system after
# installing it report it
if not pipsearch:
pkg_404_comms.append(
'There was no error installing package \'{0}\' '
'although it does not show when calling '
'\'pip.freeze\'.'.format(pkg)
)
else:
if prefix in pipsearch \
and prefix.lower() not in already_installed_packages:
ver = pipsearch[prefix]
ret['changes']['{0}=={1}'.format(prefix, ver)] = 'Installed'
# Case for packages that are an URL
else:
ret['changes']['{0}==???'.format(state_name)] = 'Installed'
# Set comments
aicomms = '\n'.join(already_installed_comments)
succ_comm = 'All packages were successfully installed'\
if not pkg_404_comms else '\n'.join(pkg_404_comms)
ret['comment'] = aicomms + ('\n' if aicomms else '') + succ_comm
return ret
elif pip_install_call:
ret['result'] = False
if 'stdout' in pip_install_call:
error = 'Error: {0} {1}'.format(pip_install_call['stdout'],
pip_install_call['stderr'])
else:
error = 'Error: {0}'.format(pip_install_call['comment'])
if requirements or editable:
comments = []
if requirements:
comments.append('Unable to process requirements file '
'"{0}".'.format(requirements))
if editable:
comments.append('Unable to install from VCS checkout'
'{0}.'.format(editable))
comments.append(error)
ret['comment'] = ' '.join(comments)
else:
pkgs_str = ', '.join([state_name for _, state_name in target_pkgs])
aicomms = '\n'.join(already_installed_comments)
error_comm = ('Failed to install packages: {0}. '
'{1}'.format(pkgs_str, error))
ret['comment'] = aicomms + ('\n' if aicomms else '') + error_comm
else:
ret['result'] = False
ret['comment'] = 'Could not install package'
return ret
def removed(name,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
user=None,
cwd=None,
use_vt=False):
'''
Make sure that a package is not installed.
name
The name of the package to uninstall
user
The user under which to run pip
bin_env : None
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error uninstalling \'{0}\': {1}'.format(name, err)
return ret
if name not in pip_list:
ret['result'] = True
ret['comment'] = 'Package is not installed.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0} is set to be removed'.format(name)
return ret
if __salt__['pip.uninstall'](pkgs=name,
requirements=requirements,
bin_env=bin_env,
log=log,
proxy=proxy,
timeout=timeout,
user=user,
cwd=cwd,
use_vt=use_vt):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package was successfully removed.'
else:
ret['result'] = False
ret['comment'] = 'Could not remove package.'
return ret
def uptodate(name,
bin_env=None,
user=None,
cwd=None,
use_vt=False):
'''
.. versionadded:: 2015.5.0
Verify that the system is completely up to date.
name
The name has no functional value and is only used as a tracking
reference
user
The user under which to run pip
bin_env
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': 'Failed to update.'}
try:
packages = __salt__['pip.list_upgrades'](bin_env=bin_env, user=user, cwd=cwd)
except Exception as e:
ret['comment'] = six.text_type(e)
return ret
if not packages:
ret['comment'] = 'System is already up-to-date.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'System update will be performed'
ret['result'] = None
return ret
updated = __salt__['pip.upgrade'](bin_env=bin_env, user=user, cwd=cwd, use_vt=use_vt)
if updated.get('result') is False:
ret.update(updated)
elif updated:
ret['changes'] = updated
ret['comment'] = 'Upgrade successful.'
ret['result'] = True
else:
ret['comment'] = 'Upgrade failed.'
return ret
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
'''
pkgs = []
pkg_type = None
agg_enabled = [
'installed',
'removed',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__['state.gen_tag'](chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get('state') == 'pip':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
# Check first if 'sources' was passed so we don't aggregate pkgs
# and sources together.
if pkg_type is None:
pkg_type = 'pkgs'
if pkg_type == 'pkgs':
# Pull out the pkg names!
if 'pkgs' in chunk:
pkgs.extend(chunk['pkgs'])
chunk['__agg__'] = True
elif 'name' in chunk:
version = chunk.pop('version', None)
if version is not None:
pkgs.append({chunk['name']: version})
else:
pkgs.append(chunk['name'])
chunk['__agg__'] = True
if pkg_type is not None and pkgs:
if pkg_type in low:
low[pkg_type].extend(pkgs)
else:
low[pkg_type] = pkgs
return low
|
saltstack/salt
|
salt/states/pip_state.py
|
installed
|
python
|
def installed(name,
pkgs=None,
pip_bin=None,
requirements=None,
bin_env=None,
use_wheel=False,
no_use_wheel=False,
log=None,
proxy=None,
timeout=None,
repo=None,
editable=None,
find_links=None,
index_url=None,
extra_index_url=None,
no_index=False,
mirrors=None,
build=None,
target=None,
download=None,
download_cache=None,
source=None,
upgrade=False,
force_reinstall=False,
ignore_installed=False,
exists_action=None,
no_deps=False,
no_install=False,
no_download=False,
install_options=None,
global_options=None,
user=None,
cwd=None,
pre_releases=False,
cert=None,
allow_all_external=False,
allow_external=None,
allow_unverified=None,
process_dependency_links=False,
env_vars=None,
use_vt=False,
trusted_host=None,
no_cache_dir=False,
cache_dir=None,
no_binary=None,
extra_args=None,
**kwargs):
'''
Make sure the package is installed
name
The name of the python package to install. You can also specify version
numbers here using the standard operators ``==, >=, <=``. If
``requirements`` is given, this parameter will be ignored.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- require:
- pkg: python-pip
This will install the latest Django version greater than 1.6 but less
than 1.7.
requirements
Path to a pip requirements file. If the path begins with salt://
the file will be transferred from the master file server.
user
The user under which to run pip
use_wheel : False
Prefer wheel archives (requires pip>=1.4)
no_use_wheel : False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
Example:
.. code-block:: yaml
django:
pip.installed:
- no_binary: ':all:'
flask:
pip.installed:
- no_binary:
- itsdangerous
- click
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable (i.e.
git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed)
find_links
URL to look for packages at
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URL(s) to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already
up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
exists_action
Default action when a path already exists: (s)witch, (i)gnore, (w)ipe,
(b)ackup
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_cache_dir:
Disable the cache.
cwd
Current working directory to run pip from
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
allow_all_external
Allow the installation of all externally hosted files
allow_external
Allow the installation of externally hosted files (comma separated list)
allow_unverified
Allow the installation of insecure and unverifiable files (comma separated list)
process_dependency_links
Enable the processing of dependency links
bin_env : None
Absolute path to a virtual environment directory or absolute path to
a pip executable. The example below assumes a virtual environment
has been created at ``/foo/.virtualenvs/bar``.
env_vars
Add or modify environment variables. Useful for tweaking build steps,
such as specifying INCLUDE or LIBRARY paths in Makefiles, build scripts or
compiler calls. This must be in the form of a dictionary or a mapping.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django_app
- env_vars:
CUSTOM_PATH: /opt/django_app
VERBOSE: True
use_vt
Use VT terminal emulation (see output while installing)
trusted_host
Mark this host as trusted, even though it does not have valid or any
HTTPS.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar
- require:
- pkg: python-pip
Or
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar/bin/pip
- require:
- pkg: python-pip
.. admonition:: Attention
The following arguments are deprecated, do not use.
pip_bin : None
Deprecated, use ``bin_env``
.. versionchanged:: 0.17.0
``use_wheel`` option added.
install_options
Extra arguments to be supplied to the setup.py install command.
If you are using an option with a directory path, be sure to use
absolute path.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django
- install_options:
- --prefix=/blah
- require:
- pkg: python-pip
global_options
Extra global options to be supplied to the setup.py call before the
install command.
.. versionadded:: 2014.1.3
.. admonition:: Attention
As of Salt 0.17.0 the pip state **needs** an importable pip module.
This usually means having the system's pip package installed or running
Salt from an active `virtualenv`_.
The reason for this requirement is because ``pip`` already does a
pretty good job parsing its own requirements. It makes no sense for
Salt to do ``pip`` requirements parsing and validation before passing
them to the ``pip`` library. It's functionality duplication and it's
more error prone.
.. admonition:: Attention
Please set ``reload_modules: True`` to have the salt minion
import this module after installation.
Example:
.. code-block:: yaml
pyopenssl:
pip.installed:
- name: pyOpenSSL
- reload_modules: True
- exists_action: i
extra_args
pip keyword and positional arguments not yet implemented in salt
.. code-block:: yaml
pandas:
pip.installed:
- name: pandas
- extra_args:
- --latest-pip-kwarg: param
- --latest-pip-arg
.. warning::
If unsupported options are passed here that are not supported in a
minion's version of pip, a `No such option error` will be thrown.
.. _`virtualenv`: http://www.virtualenv.org/en/latest/
'''
if pip_bin and not bin_env:
bin_env = pip_bin
# If pkgs is present, ignore name
if pkgs:
if not isinstance(pkgs, list):
return {'name': name,
'result': False,
'changes': {},
'comment': 'pkgs argument must be formatted as a list'}
else:
pkgs = [name]
# Assumption: If `pkg` is not an `string`, it's a `collections.OrderedDict`
# prepro = lambda pkg: pkg if type(pkg) == str else \
# ' '.join((pkg.items()[0][0], pkg.items()[0][1].replace(',', ';')))
# pkgs = ','.join([prepro(pkg) for pkg in pkgs])
prepro = lambda pkg: pkg if isinstance(pkg, six.string_types) else \
' '.join((six.iteritems(pkg)[0][0], six.iteritems(pkg)[0][1]))
pkgs = [prepro(pkg) for pkg in pkgs]
ret = {'name': ';'.join(pkgs), 'result': None,
'comment': '', 'changes': {}}
try:
cur_version = __salt__['pip.version'](bin_env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
ret['comment'] = 'Error installing \'{0}\': {1}'.format(name, err)
return ret
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_binary' option
if no_binary:
min_version = '7.0.0'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
ret['result'] = False
ret['comment'] = ('The \'no_binary\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Get the packages parsed name and version from the pip library.
# This only is done when there is no requirements or editable parameter.
pkgs_details = []
if pkgs and not (requirements or editable):
comments = []
for pkg in iter(pkgs):
out = _check_pkg_version_format(pkg)
if out['result'] is False:
ret['result'] = False
comments.append(out['comment'])
elif out['result'] is True:
pkgs_details.append((out['prefix'], pkg, out['version_spec']))
if ret['result'] is False:
ret['comment'] = '\n'.join(comments)
return ret
# If a requirements file is specified, only install the contents of the
# requirements file. Similarly, using the --editable flag with pip should
# also ignore the "name" and "pkgs" parameters.
target_pkgs = []
already_installed_comments = []
if requirements or editable:
comments = []
# Append comments if this is a dry run.
if __opts__['test']:
ret['result'] = None
if requirements:
# TODO: Check requirements file against currently-installed
# packages to provide more accurate state output.
comments.append('Requirements file \'{0}\' will be '
'processed.'.format(requirements))
if editable:
comments.append(
'Package will be installed in editable mode (i.e. '
'setuptools "develop mode") from {0}.'.format(editable)
)
ret['comment'] = ' '.join(comments)
return ret
# No requirements case.
# Check pre-existence of the requested packages.
else:
# Attempt to pre-cache a the current pip list
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
# If we fail, then just send False, and we'll try again in the next function call
except Exception as exc:
log.exception(exc)
pip_list = False
for prefix, state_pkg_name, version_spec in pkgs_details:
if prefix:
state_pkg_name = state_pkg_name
version_spec = version_spec
out = _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env, env_vars,
index_url, extra_index_url, pip_list,
**kwargs)
# If _check_if_installed result is None, something went wrong with
# the command running. This way we keep stateful output.
if out['result'] is None:
ret['result'] = False
ret['comment'] = out['comment']
return ret
else:
out = {'result': False, 'comment': None}
result = out['result']
# The package is not present. Add it to the pkgs to install.
if result is False:
# Replace commas (used for version ranges) with semicolons
# (which are not supported) in name so it does not treat
# them as multiple packages.
target_pkgs.append((prefix, state_pkg_name.replace(',', ';')))
# Append comments if this is a dry run.
if __opts__['test']:
msg = 'Python package {0} is set to be installed'
ret['result'] = None
ret['comment'] = msg.format(state_pkg_name)
return ret
# The package is already present and will not be reinstalled.
elif result is True:
# Append comment stating its presence
already_installed_comments.append(out['comment'])
# The command pip.list failed. Abort.
elif result is None:
ret['result'] = None
ret['comment'] = out['comment']
return ret
# No packages to install.
if not target_pkgs:
ret['result'] = True
aicomms = '\n'.join(already_installed_comments)
last_line = 'All specified packages are already installed' + (' and up-to-date' if upgrade else '')
ret['comment'] = aicomms + ('\n' if aicomms else '') + last_line
return ret
# Construct the string that will get passed to the install call
pkgs_str = ','.join([state_name for _, state_name in target_pkgs])
# Call to install the package. Actual installation takes place here
pip_install_call = __salt__['pip.install'](
pkgs='{0}'.format(pkgs_str) if pkgs_str else '',
requirements=requirements,
bin_env=bin_env,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
no_binary=no_binary,
log=log,
proxy=proxy,
timeout=timeout,
editable=editable,
find_links=find_links,
index_url=index_url,
extra_index_url=extra_index_url,
no_index=no_index,
mirrors=mirrors,
build=build,
target=target,
download=download,
download_cache=download_cache,
source=source,
upgrade=upgrade,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
exists_action=exists_action,
no_deps=no_deps,
no_install=no_install,
no_download=no_download,
install_options=install_options,
global_options=global_options,
user=user,
cwd=cwd,
pre_releases=pre_releases,
cert=cert,
allow_all_external=allow_all_external,
allow_external=allow_external,
allow_unverified=allow_unverified,
process_dependency_links=process_dependency_links,
saltenv=__env__,
env_vars=env_vars,
use_vt=use_vt,
trusted_host=trusted_host,
no_cache_dir=no_cache_dir,
extra_args=extra_args,
**kwargs
)
if pip_install_call and pip_install_call.get('retcode', 1) == 0:
ret['result'] = True
if requirements or editable:
comments = []
if requirements:
PIP_REQUIREMENTS_NOCHANGE = [
'Requirement already satisfied',
'Requirement already up-to-date',
'Requirement not upgraded',
'Collecting',
'Cloning',
'Cleaning up...',
]
for line in pip_install_call.get('stdout', '').split('\n'):
if not any(
[
line.strip().startswith(x)
for x in PIP_REQUIREMENTS_NOCHANGE
]
):
ret['changes']['requirements'] = True
if ret['changes'].get('requirements'):
comments.append('Successfully processed requirements file '
'{0}.'.format(requirements))
else:
comments.append('Requirements were already installed.')
if editable:
comments.append('Package successfully installed from VCS '
'checkout {0}.'.format(editable))
ret['changes']['editable'] = True
ret['comment'] = ' '.join(comments)
else:
# Check that the packages set to be installed were installed.
# Create comments reporting success and failures
pkg_404_comms = []
already_installed_packages = set()
for line in pip_install_call.get('stdout', '').split('\n'):
# Output for already installed packages:
# 'Requirement already up-to-date: jinja2 in /usr/local/lib/python2.7/dist-packages\nCleaning up...'
if line.startswith('Requirement already up-to-date: '):
package = line.split(':', 1)[1].split()[0]
already_installed_packages.add(package.lower())
for prefix, state_name in target_pkgs:
# Case for packages that are not an URL
if prefix:
pipsearch = salt.utils.data.CaseInsensitiveDict(
__salt__['pip.list'](prefix, bin_env,
user=user, cwd=cwd,
env_vars=env_vars,
**kwargs)
)
# If we didn't find the package in the system after
# installing it report it
if not pipsearch:
pkg_404_comms.append(
'There was no error installing package \'{0}\' '
'although it does not show when calling '
'\'pip.freeze\'.'.format(pkg)
)
else:
if prefix in pipsearch \
and prefix.lower() not in already_installed_packages:
ver = pipsearch[prefix]
ret['changes']['{0}=={1}'.format(prefix, ver)] = 'Installed'
# Case for packages that are an URL
else:
ret['changes']['{0}==???'.format(state_name)] = 'Installed'
# Set comments
aicomms = '\n'.join(already_installed_comments)
succ_comm = 'All packages were successfully installed'\
if not pkg_404_comms else '\n'.join(pkg_404_comms)
ret['comment'] = aicomms + ('\n' if aicomms else '') + succ_comm
return ret
elif pip_install_call:
ret['result'] = False
if 'stdout' in pip_install_call:
error = 'Error: {0} {1}'.format(pip_install_call['stdout'],
pip_install_call['stderr'])
else:
error = 'Error: {0}'.format(pip_install_call['comment'])
if requirements or editable:
comments = []
if requirements:
comments.append('Unable to process requirements file '
'"{0}".'.format(requirements))
if editable:
comments.append('Unable to install from VCS checkout'
'{0}.'.format(editable))
comments.append(error)
ret['comment'] = ' '.join(comments)
else:
pkgs_str = ', '.join([state_name for _, state_name in target_pkgs])
aicomms = '\n'.join(already_installed_comments)
error_comm = ('Failed to install packages: {0}. '
'{1}'.format(pkgs_str, error))
ret['comment'] = aicomms + ('\n' if aicomms else '') + error_comm
else:
ret['result'] = False
ret['comment'] = 'Could not install package'
return ret
|
Make sure the package is installed
name
The name of the python package to install. You can also specify version
numbers here using the standard operators ``==, >=, <=``. If
``requirements`` is given, this parameter will be ignored.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- require:
- pkg: python-pip
This will install the latest Django version greater than 1.6 but less
than 1.7.
requirements
Path to a pip requirements file. If the path begins with salt://
the file will be transferred from the master file server.
user
The user under which to run pip
use_wheel : False
Prefer wheel archives (requires pip>=1.4)
no_use_wheel : False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
Example:
.. code-block:: yaml
django:
pip.installed:
- no_binary: ':all:'
flask:
pip.installed:
- no_binary:
- itsdangerous
- click
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable (i.e.
git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed)
find_links
URL to look for packages at
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URL(s) to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already
up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
exists_action
Default action when a path already exists: (s)witch, (i)gnore, (w)ipe,
(b)ackup
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_cache_dir:
Disable the cache.
cwd
Current working directory to run pip from
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
allow_all_external
Allow the installation of all externally hosted files
allow_external
Allow the installation of externally hosted files (comma separated list)
allow_unverified
Allow the installation of insecure and unverifiable files (comma separated list)
process_dependency_links
Enable the processing of dependency links
bin_env : None
Absolute path to a virtual environment directory or absolute path to
a pip executable. The example below assumes a virtual environment
has been created at ``/foo/.virtualenvs/bar``.
env_vars
Add or modify environment variables. Useful for tweaking build steps,
such as specifying INCLUDE or LIBRARY paths in Makefiles, build scripts or
compiler calls. This must be in the form of a dictionary or a mapping.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django_app
- env_vars:
CUSTOM_PATH: /opt/django_app
VERBOSE: True
use_vt
Use VT terminal emulation (see output while installing)
trusted_host
Mark this host as trusted, even though it does not have valid or any
HTTPS.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar
- require:
- pkg: python-pip
Or
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar/bin/pip
- require:
- pkg: python-pip
.. admonition:: Attention
The following arguments are deprecated, do not use.
pip_bin : None
Deprecated, use ``bin_env``
.. versionchanged:: 0.17.0
``use_wheel`` option added.
install_options
Extra arguments to be supplied to the setup.py install command.
If you are using an option with a directory path, be sure to use
absolute path.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django
- install_options:
- --prefix=/blah
- require:
- pkg: python-pip
global_options
Extra global options to be supplied to the setup.py call before the
install command.
.. versionadded:: 2014.1.3
.. admonition:: Attention
As of Salt 0.17.0 the pip state **needs** an importable pip module.
This usually means having the system's pip package installed or running
Salt from an active `virtualenv`_.
The reason for this requirement is because ``pip`` already does a
pretty good job parsing its own requirements. It makes no sense for
Salt to do ``pip`` requirements parsing and validation before passing
them to the ``pip`` library. It's functionality duplication and it's
more error prone.
.. admonition:: Attention
Please set ``reload_modules: True`` to have the salt minion
import this module after installation.
Example:
.. code-block:: yaml
pyopenssl:
pip.installed:
- name: pyOpenSSL
- reload_modules: True
- exists_action: i
extra_args
pip keyword and positional arguments not yet implemented in salt
.. code-block:: yaml
pandas:
pip.installed:
- name: pandas
- extra_args:
- --latest-pip-kwarg: param
- --latest-pip-arg
.. warning::
If unsupported options are passed here that are not supported in a
minion's version of pip, a `No such option error` will be thrown.
.. _`virtualenv`: http://www.virtualenv.org/en/latest/
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pip_state.py#L300-L961
|
[
"def compare(ver1='', oper='==', ver2='', cmp_func=None, ignore_epoch=False):\n '''\n Compares two version numbers. Accepts a custom function to perform the\n cmp-style version comparison, otherwise uses version_cmp().\n '''\n cmp_map = {'<': (-1,), '<=': (-1, 0), '==': (0,),\n '>=': (0, 1), '>': (1,)}\n if oper not in ('!=',) and oper not in cmp_map:\n log.error('Invalid operator \\'%s\\' for version comparison', oper)\n return False\n\n if cmp_func is None:\n cmp_func = version_cmp\n\n cmp_result = cmp_func(ver1, ver2, ignore_epoch=ignore_epoch)\n if cmp_result is None:\n return False\n\n # Check if integer/long\n if not isinstance(cmp_result, numbers.Integral):\n log.error('The version comparison function did not return an '\n 'integer/long.')\n return False\n\n if oper == '!=':\n return cmp_result not in cmp_map['==']\n else:\n # Gracefully handle cmp_result not in (-1, 0, 1).\n if cmp_result < -1:\n cmp_result = -1\n elif cmp_result > 1:\n cmp_result = 1\n\n return cmp_result in cmp_map[oper]\n",
"def _check_pkg_version_format(pkg):\n '''\n Takes a package name and version specification (if any) and checks it using\n the pip library.\n '''\n\n ret = {'result': False, 'comment': None,\n 'prefix': None, 'version_spec': None}\n\n if not HAS_PIP:\n ret['comment'] = (\n 'An importable Python 2 pip module is required but could not be '\n 'found on your system. This usually means that the system\\'s pip '\n 'package is not installed properly.'\n )\n\n return ret\n\n from_vcs = False\n try:\n # Get the requirement object from the pip library\n try:\n # With pip < 1.2, the __version__ attribute does not exist and\n # vcs+URL urls are not properly parsed.\n # The next line is meant to trigger an AttributeError and\n # handle lower pip versions\n log.debug('Installed pip version: %s', pip.__version__)\n install_req = _from_line(pkg)\n except AttributeError:\n log.debug('Installed pip version is lower than 1.2')\n supported_vcs = ('git', 'svn', 'hg', 'bzr')\n if pkg.startswith(supported_vcs):\n for vcs in supported_vcs:\n if pkg.startswith(vcs):\n from_vcs = True\n install_req = _from_line(\n pkg.split('{0}+'.format(vcs))[-1]\n )\n break\n else:\n install_req = _from_line(pkg)\n except (ValueError, InstallationError) as exc:\n ret['result'] = False\n if not from_vcs and '=' in pkg and '==' not in pkg:\n ret['comment'] = (\n 'Invalid version specification in package {0}. \\'=\\' is '\n 'not supported, use \\'==\\' instead.'.format(pkg)\n )\n return ret\n ret['comment'] = (\n 'pip raised an exception while parsing \\'{0}\\': {1}'.format(\n pkg, exc\n )\n )\n return ret\n\n if install_req.req is None:\n # This is most likely an url and there's no way to know what will\n # be installed before actually installing it.\n ret['result'] = True\n ret['prefix'] = ''\n ret['version_spec'] = []\n else:\n ret['result'] = True\n try:\n ret['prefix'] = install_req.req.project_name\n ret['version_spec'] = install_req.req.specs\n except Exception:\n ret['prefix'] = re.sub('[^A-Za-z0-9.]+', '-', install_req.name)\n if hasattr(install_req, \"specifier\"):\n specifier = install_req.specifier\n else:\n specifier = install_req.req.specifier\n ret['version_spec'] = [(spec.operator, spec.version) for spec in specifier]\n\n return ret\n",
"def _check_if_installed(prefix,\n state_pkg_name,\n version_spec,\n ignore_installed,\n force_reinstall,\n upgrade,\n user,\n cwd,\n bin_env,\n env_vars,\n index_url,\n extra_index_url,\n pip_list=False,\n **kwargs):\n '''\n Takes a package name and version specification (if any) and checks it is\n installed\n\n Keyword arguments include:\n pip_list: optional dict of installed pip packages, and their versions,\n to search through to check if the package is installed. If not\n provided, one will be generated in this function by querying the\n system.\n\n Returns:\n result: None means the command failed to run\n result: True means the package is installed\n result: False means the package is not installed\n '''\n ret = {'result': False, 'comment': None}\n\n # If we are not passed a pip list, get one:\n pip_list = salt.utils.data.CaseInsensitiveDict(\n pip_list or __salt__['pip.list'](prefix, bin_env=bin_env,\n user=user, cwd=cwd,\n env_vars=env_vars, **kwargs)\n )\n\n # If the package was already installed, check\n # the ignore_installed and force_reinstall flags\n if ignore_installed is False and prefix in pip_list:\n if force_reinstall is False and not upgrade:\n # Check desired version (if any) against currently-installed\n if (\n any(version_spec) and\n _fulfills_version_spec(pip_list[prefix], version_spec)\n ) or (not any(version_spec)):\n ret['result'] = True\n ret['comment'] = ('Python package {0} was already '\n 'installed'.format(state_pkg_name))\n return ret\n if force_reinstall is False and upgrade:\n # Check desired version (if any) against currently-installed\n include_alpha = False\n include_beta = False\n include_rc = False\n if any(version_spec):\n for spec in version_spec:\n if 'a' in spec[1]:\n include_alpha = True\n if 'b' in spec[1]:\n include_beta = True\n if 'rc' in spec[1]:\n include_rc = True\n available_versions = __salt__['pip.list_all_versions'](\n prefix, bin_env=bin_env, include_alpha=include_alpha,\n include_beta=include_beta, include_rc=include_rc, user=user,\n cwd=cwd, index_url=index_url, extra_index_url=extra_index_url)\n desired_version = ''\n if any(version_spec):\n for version in reversed(available_versions):\n if _fulfills_version_spec(version, version_spec):\n desired_version = version\n break\n else:\n desired_version = available_versions[-1]\n if not desired_version:\n ret['result'] = True\n ret['comment'] = ('Python package {0} was already '\n 'installed and\\nthe available upgrade '\n 'doesn\\'t fulfills the version '\n 'requirements'.format(prefix))\n return ret\n if _pep440_version_cmp(pip_list[prefix], desired_version) == 0:\n ret['result'] = True\n ret['comment'] = ('Python package {0} was already '\n 'installed'.format(state_pkg_name))\n return ret\n\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
Installation of Python Packages Using pip
=========================================
These states manage system installed python packages. Note that pip must be
installed for these states to be available, so pip states should include a
requisite to a pkg.installed state for the package which provides pip
(``python-pip`` in most cases). Example:
.. code-block:: yaml
python-pip:
pkg.installed
virtualenvwrapper:
pip.installed:
- require:
- pkg: python-pip
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import re
try:
import pkg_resources
HAS_PKG_RESOURCES = True
except ImportError:
HAS_PKG_RESOURCES = False
# Import salt libs
import salt.utils.data
import salt.utils.versions
from salt.exceptions import CommandExecutionError, CommandNotFoundError
# Import 3rd-party libs
from salt.ext import six
# pylint: disable=import-error
try:
import pip
HAS_PIP = True
except ImportError:
HAS_PIP = False
if HAS_PIP is True:
try:
from pip.req import InstallRequirement
_from_line = InstallRequirement.from_line
except ImportError:
# pip 10.0.0 move req module under pip._internal
try:
try:
from pip._internal.req import InstallRequirement
_from_line = InstallRequirement.from_line
except AttributeError:
from pip._internal.req.constructors import install_req_from_line as _from_line
except ImportError:
HAS_PIP = False
# Remove references to the loaded pip module above so reloading works
import sys
del pip
if 'pip' in sys.modules:
del sys.modules['pip']
try:
from pip.exceptions import InstallationError
except ImportError:
InstallationError = ValueError
# pylint: enable=import-error
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pip'
def __virtual__():
'''
Only load if the pip module is available in __salt__
'''
if HAS_PKG_RESOURCES is False:
return False, 'The pkg_resources python library is not installed'
if 'pip.list' in __salt__:
return __virtualname__
return False
def _fulfills_version_spec(version, version_spec):
'''
Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version.
'''
for oper, spec in version_spec:
if oper is None:
continue
if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec, cmp_func=_pep440_version_cmp):
return False
return True
def _check_pkg_version_format(pkg):
'''
Takes a package name and version specification (if any) and checks it using
the pip library.
'''
ret = {'result': False, 'comment': None,
'prefix': None, 'version_spec': None}
if not HAS_PIP:
ret['comment'] = (
'An importable Python 2 pip module is required but could not be '
'found on your system. This usually means that the system\'s pip '
'package is not installed properly.'
)
return ret
from_vcs = False
try:
# Get the requirement object from the pip library
try:
# With pip < 1.2, the __version__ attribute does not exist and
# vcs+URL urls are not properly parsed.
# The next line is meant to trigger an AttributeError and
# handle lower pip versions
log.debug('Installed pip version: %s', pip.__version__)
install_req = _from_line(pkg)
except AttributeError:
log.debug('Installed pip version is lower than 1.2')
supported_vcs = ('git', 'svn', 'hg', 'bzr')
if pkg.startswith(supported_vcs):
for vcs in supported_vcs:
if pkg.startswith(vcs):
from_vcs = True
install_req = _from_line(
pkg.split('{0}+'.format(vcs))[-1]
)
break
else:
install_req = _from_line(pkg)
except (ValueError, InstallationError) as exc:
ret['result'] = False
if not from_vcs and '=' in pkg and '==' not in pkg:
ret['comment'] = (
'Invalid version specification in package {0}. \'=\' is '
'not supported, use \'==\' instead.'.format(pkg)
)
return ret
ret['comment'] = (
'pip raised an exception while parsing \'{0}\': {1}'.format(
pkg, exc
)
)
return ret
if install_req.req is None:
# This is most likely an url and there's no way to know what will
# be installed before actually installing it.
ret['result'] = True
ret['prefix'] = ''
ret['version_spec'] = []
else:
ret['result'] = True
try:
ret['prefix'] = install_req.req.project_name
ret['version_spec'] = install_req.req.specs
except Exception:
ret['prefix'] = re.sub('[^A-Za-z0-9.]+', '-', install_req.name)
if hasattr(install_req, "specifier"):
specifier = install_req.specifier
else:
specifier = install_req.req.specifier
ret['version_spec'] = [(spec.operator, spec.version) for spec in specifier]
return ret
def _check_if_installed(prefix,
state_pkg_name,
version_spec,
ignore_installed,
force_reinstall,
upgrade,
user,
cwd,
bin_env,
env_vars,
index_url,
extra_index_url,
pip_list=False,
**kwargs):
'''
Takes a package name and version specification (if any) and checks it is
installed
Keyword arguments include:
pip_list: optional dict of installed pip packages, and their versions,
to search through to check if the package is installed. If not
provided, one will be generated in this function by querying the
system.
Returns:
result: None means the command failed to run
result: True means the package is installed
result: False means the package is not installed
'''
ret = {'result': False, 'comment': None}
# If we are not passed a pip list, get one:
pip_list = salt.utils.data.CaseInsensitiveDict(
pip_list or __salt__['pip.list'](prefix, bin_env=bin_env,
user=user, cwd=cwd,
env_vars=env_vars, **kwargs)
)
# If the package was already installed, check
# the ignore_installed and force_reinstall flags
if ignore_installed is False and prefix in pip_list:
if force_reinstall is False and not upgrade:
# Check desired version (if any) against currently-installed
if (
any(version_spec) and
_fulfills_version_spec(pip_list[prefix], version_spec)
) or (not any(version_spec)):
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
if force_reinstall is False and upgrade:
# Check desired version (if any) against currently-installed
include_alpha = False
include_beta = False
include_rc = False
if any(version_spec):
for spec in version_spec:
if 'a' in spec[1]:
include_alpha = True
if 'b' in spec[1]:
include_beta = True
if 'rc' in spec[1]:
include_rc = True
available_versions = __salt__['pip.list_all_versions'](
prefix, bin_env=bin_env, include_alpha=include_alpha,
include_beta=include_beta, include_rc=include_rc, user=user,
cwd=cwd, index_url=index_url, extra_index_url=extra_index_url)
desired_version = ''
if any(version_spec):
for version in reversed(available_versions):
if _fulfills_version_spec(version, version_spec):
desired_version = version
break
else:
desired_version = available_versions[-1]
if not desired_version:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed and\nthe available upgrade '
'doesn\'t fulfills the version '
'requirements'.format(prefix))
return ret
if _pep440_version_cmp(pip_list[prefix], desired_version) == 0:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
return ret
def _pep440_version_cmp(pkg1, pkg2, ignore_epoch=False):
'''
Compares two version strings using pkg_resources.parse_version.
Return -1 if version1 < version2, 0 if version1 ==version2,
and 1 if version1 > version2. Return None if there was a problem
making the comparison.
'''
normalize = lambda x: six.text_type(x).split('!', 1)[-1] \
if ignore_epoch else six.text_type(x)
pkg1 = normalize(pkg1)
pkg2 = normalize(pkg2)
try:
if pkg_resources.parse_version(pkg1) < pkg_resources.parse_version(pkg2):
return -1
if pkg_resources.parse_version(pkg1) == pkg_resources.parse_version(pkg2):
return 0
if pkg_resources.parse_version(pkg1) > pkg_resources.parse_version(pkg2):
return 1
except Exception as exc:
log.exception(exc)
return None
def removed(name,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
user=None,
cwd=None,
use_vt=False):
'''
Make sure that a package is not installed.
name
The name of the package to uninstall
user
The user under which to run pip
bin_env : None
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error uninstalling \'{0}\': {1}'.format(name, err)
return ret
if name not in pip_list:
ret['result'] = True
ret['comment'] = 'Package is not installed.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0} is set to be removed'.format(name)
return ret
if __salt__['pip.uninstall'](pkgs=name,
requirements=requirements,
bin_env=bin_env,
log=log,
proxy=proxy,
timeout=timeout,
user=user,
cwd=cwd,
use_vt=use_vt):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package was successfully removed.'
else:
ret['result'] = False
ret['comment'] = 'Could not remove package.'
return ret
def uptodate(name,
bin_env=None,
user=None,
cwd=None,
use_vt=False):
'''
.. versionadded:: 2015.5.0
Verify that the system is completely up to date.
name
The name has no functional value and is only used as a tracking
reference
user
The user under which to run pip
bin_env
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': 'Failed to update.'}
try:
packages = __salt__['pip.list_upgrades'](bin_env=bin_env, user=user, cwd=cwd)
except Exception as e:
ret['comment'] = six.text_type(e)
return ret
if not packages:
ret['comment'] = 'System is already up-to-date.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'System update will be performed'
ret['result'] = None
return ret
updated = __salt__['pip.upgrade'](bin_env=bin_env, user=user, cwd=cwd, use_vt=use_vt)
if updated.get('result') is False:
ret.update(updated)
elif updated:
ret['changes'] = updated
ret['comment'] = 'Upgrade successful.'
ret['result'] = True
else:
ret['comment'] = 'Upgrade failed.'
return ret
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
'''
pkgs = []
pkg_type = None
agg_enabled = [
'installed',
'removed',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__['state.gen_tag'](chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get('state') == 'pip':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
# Check first if 'sources' was passed so we don't aggregate pkgs
# and sources together.
if pkg_type is None:
pkg_type = 'pkgs'
if pkg_type == 'pkgs':
# Pull out the pkg names!
if 'pkgs' in chunk:
pkgs.extend(chunk['pkgs'])
chunk['__agg__'] = True
elif 'name' in chunk:
version = chunk.pop('version', None)
if version is not None:
pkgs.append({chunk['name']: version})
else:
pkgs.append(chunk['name'])
chunk['__agg__'] = True
if pkg_type is not None and pkgs:
if pkg_type in low:
low[pkg_type].extend(pkgs)
else:
low[pkg_type] = pkgs
return low
|
saltstack/salt
|
salt/states/pip_state.py
|
removed
|
python
|
def removed(name,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
user=None,
cwd=None,
use_vt=False):
'''
Make sure that a package is not installed.
name
The name of the package to uninstall
user
The user under which to run pip
bin_env : None
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error uninstalling \'{0}\': {1}'.format(name, err)
return ret
if name not in pip_list:
ret['result'] = True
ret['comment'] = 'Package is not installed.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0} is set to be removed'.format(name)
return ret
if __salt__['pip.uninstall'](pkgs=name,
requirements=requirements,
bin_env=bin_env,
log=log,
proxy=proxy,
timeout=timeout,
user=user,
cwd=cwd,
use_vt=use_vt):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package was successfully removed.'
else:
ret['result'] = False
ret['comment'] = 'Could not remove package.'
return ret
|
Make sure that a package is not installed.
name
The name of the package to uninstall
user
The user under which to run pip
bin_env : None
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pip_state.py#L964-L1019
| null |
# -*- coding: utf-8 -*-
'''
Installation of Python Packages Using pip
=========================================
These states manage system installed python packages. Note that pip must be
installed for these states to be available, so pip states should include a
requisite to a pkg.installed state for the package which provides pip
(``python-pip`` in most cases). Example:
.. code-block:: yaml
python-pip:
pkg.installed
virtualenvwrapper:
pip.installed:
- require:
- pkg: python-pip
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import re
try:
import pkg_resources
HAS_PKG_RESOURCES = True
except ImportError:
HAS_PKG_RESOURCES = False
# Import salt libs
import salt.utils.data
import salt.utils.versions
from salt.exceptions import CommandExecutionError, CommandNotFoundError
# Import 3rd-party libs
from salt.ext import six
# pylint: disable=import-error
try:
import pip
HAS_PIP = True
except ImportError:
HAS_PIP = False
if HAS_PIP is True:
try:
from pip.req import InstallRequirement
_from_line = InstallRequirement.from_line
except ImportError:
# pip 10.0.0 move req module under pip._internal
try:
try:
from pip._internal.req import InstallRequirement
_from_line = InstallRequirement.from_line
except AttributeError:
from pip._internal.req.constructors import install_req_from_line as _from_line
except ImportError:
HAS_PIP = False
# Remove references to the loaded pip module above so reloading works
import sys
del pip
if 'pip' in sys.modules:
del sys.modules['pip']
try:
from pip.exceptions import InstallationError
except ImportError:
InstallationError = ValueError
# pylint: enable=import-error
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pip'
def __virtual__():
'''
Only load if the pip module is available in __salt__
'''
if HAS_PKG_RESOURCES is False:
return False, 'The pkg_resources python library is not installed'
if 'pip.list' in __salt__:
return __virtualname__
return False
def _fulfills_version_spec(version, version_spec):
'''
Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version.
'''
for oper, spec in version_spec:
if oper is None:
continue
if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec, cmp_func=_pep440_version_cmp):
return False
return True
def _check_pkg_version_format(pkg):
'''
Takes a package name and version specification (if any) and checks it using
the pip library.
'''
ret = {'result': False, 'comment': None,
'prefix': None, 'version_spec': None}
if not HAS_PIP:
ret['comment'] = (
'An importable Python 2 pip module is required but could not be '
'found on your system. This usually means that the system\'s pip '
'package is not installed properly.'
)
return ret
from_vcs = False
try:
# Get the requirement object from the pip library
try:
# With pip < 1.2, the __version__ attribute does not exist and
# vcs+URL urls are not properly parsed.
# The next line is meant to trigger an AttributeError and
# handle lower pip versions
log.debug('Installed pip version: %s', pip.__version__)
install_req = _from_line(pkg)
except AttributeError:
log.debug('Installed pip version is lower than 1.2')
supported_vcs = ('git', 'svn', 'hg', 'bzr')
if pkg.startswith(supported_vcs):
for vcs in supported_vcs:
if pkg.startswith(vcs):
from_vcs = True
install_req = _from_line(
pkg.split('{0}+'.format(vcs))[-1]
)
break
else:
install_req = _from_line(pkg)
except (ValueError, InstallationError) as exc:
ret['result'] = False
if not from_vcs and '=' in pkg and '==' not in pkg:
ret['comment'] = (
'Invalid version specification in package {0}. \'=\' is '
'not supported, use \'==\' instead.'.format(pkg)
)
return ret
ret['comment'] = (
'pip raised an exception while parsing \'{0}\': {1}'.format(
pkg, exc
)
)
return ret
if install_req.req is None:
# This is most likely an url and there's no way to know what will
# be installed before actually installing it.
ret['result'] = True
ret['prefix'] = ''
ret['version_spec'] = []
else:
ret['result'] = True
try:
ret['prefix'] = install_req.req.project_name
ret['version_spec'] = install_req.req.specs
except Exception:
ret['prefix'] = re.sub('[^A-Za-z0-9.]+', '-', install_req.name)
if hasattr(install_req, "specifier"):
specifier = install_req.specifier
else:
specifier = install_req.req.specifier
ret['version_spec'] = [(spec.operator, spec.version) for spec in specifier]
return ret
def _check_if_installed(prefix,
state_pkg_name,
version_spec,
ignore_installed,
force_reinstall,
upgrade,
user,
cwd,
bin_env,
env_vars,
index_url,
extra_index_url,
pip_list=False,
**kwargs):
'''
Takes a package name and version specification (if any) and checks it is
installed
Keyword arguments include:
pip_list: optional dict of installed pip packages, and their versions,
to search through to check if the package is installed. If not
provided, one will be generated in this function by querying the
system.
Returns:
result: None means the command failed to run
result: True means the package is installed
result: False means the package is not installed
'''
ret = {'result': False, 'comment': None}
# If we are not passed a pip list, get one:
pip_list = salt.utils.data.CaseInsensitiveDict(
pip_list or __salt__['pip.list'](prefix, bin_env=bin_env,
user=user, cwd=cwd,
env_vars=env_vars, **kwargs)
)
# If the package was already installed, check
# the ignore_installed and force_reinstall flags
if ignore_installed is False and prefix in pip_list:
if force_reinstall is False and not upgrade:
# Check desired version (if any) against currently-installed
if (
any(version_spec) and
_fulfills_version_spec(pip_list[prefix], version_spec)
) or (not any(version_spec)):
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
if force_reinstall is False and upgrade:
# Check desired version (if any) against currently-installed
include_alpha = False
include_beta = False
include_rc = False
if any(version_spec):
for spec in version_spec:
if 'a' in spec[1]:
include_alpha = True
if 'b' in spec[1]:
include_beta = True
if 'rc' in spec[1]:
include_rc = True
available_versions = __salt__['pip.list_all_versions'](
prefix, bin_env=bin_env, include_alpha=include_alpha,
include_beta=include_beta, include_rc=include_rc, user=user,
cwd=cwd, index_url=index_url, extra_index_url=extra_index_url)
desired_version = ''
if any(version_spec):
for version in reversed(available_versions):
if _fulfills_version_spec(version, version_spec):
desired_version = version
break
else:
desired_version = available_versions[-1]
if not desired_version:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed and\nthe available upgrade '
'doesn\'t fulfills the version '
'requirements'.format(prefix))
return ret
if _pep440_version_cmp(pip_list[prefix], desired_version) == 0:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
return ret
def _pep440_version_cmp(pkg1, pkg2, ignore_epoch=False):
'''
Compares two version strings using pkg_resources.parse_version.
Return -1 if version1 < version2, 0 if version1 ==version2,
and 1 if version1 > version2. Return None if there was a problem
making the comparison.
'''
normalize = lambda x: six.text_type(x).split('!', 1)[-1] \
if ignore_epoch else six.text_type(x)
pkg1 = normalize(pkg1)
pkg2 = normalize(pkg2)
try:
if pkg_resources.parse_version(pkg1) < pkg_resources.parse_version(pkg2):
return -1
if pkg_resources.parse_version(pkg1) == pkg_resources.parse_version(pkg2):
return 0
if pkg_resources.parse_version(pkg1) > pkg_resources.parse_version(pkg2):
return 1
except Exception as exc:
log.exception(exc)
return None
def installed(name,
pkgs=None,
pip_bin=None,
requirements=None,
bin_env=None,
use_wheel=False,
no_use_wheel=False,
log=None,
proxy=None,
timeout=None,
repo=None,
editable=None,
find_links=None,
index_url=None,
extra_index_url=None,
no_index=False,
mirrors=None,
build=None,
target=None,
download=None,
download_cache=None,
source=None,
upgrade=False,
force_reinstall=False,
ignore_installed=False,
exists_action=None,
no_deps=False,
no_install=False,
no_download=False,
install_options=None,
global_options=None,
user=None,
cwd=None,
pre_releases=False,
cert=None,
allow_all_external=False,
allow_external=None,
allow_unverified=None,
process_dependency_links=False,
env_vars=None,
use_vt=False,
trusted_host=None,
no_cache_dir=False,
cache_dir=None,
no_binary=None,
extra_args=None,
**kwargs):
'''
Make sure the package is installed
name
The name of the python package to install. You can also specify version
numbers here using the standard operators ``==, >=, <=``. If
``requirements`` is given, this parameter will be ignored.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- require:
- pkg: python-pip
This will install the latest Django version greater than 1.6 but less
than 1.7.
requirements
Path to a pip requirements file. If the path begins with salt://
the file will be transferred from the master file server.
user
The user under which to run pip
use_wheel : False
Prefer wheel archives (requires pip>=1.4)
no_use_wheel : False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
Example:
.. code-block:: yaml
django:
pip.installed:
- no_binary: ':all:'
flask:
pip.installed:
- no_binary:
- itsdangerous
- click
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable (i.e.
git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed)
find_links
URL to look for packages at
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URL(s) to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already
up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
exists_action
Default action when a path already exists: (s)witch, (i)gnore, (w)ipe,
(b)ackup
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_cache_dir:
Disable the cache.
cwd
Current working directory to run pip from
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
allow_all_external
Allow the installation of all externally hosted files
allow_external
Allow the installation of externally hosted files (comma separated list)
allow_unverified
Allow the installation of insecure and unverifiable files (comma separated list)
process_dependency_links
Enable the processing of dependency links
bin_env : None
Absolute path to a virtual environment directory or absolute path to
a pip executable. The example below assumes a virtual environment
has been created at ``/foo/.virtualenvs/bar``.
env_vars
Add or modify environment variables. Useful for tweaking build steps,
such as specifying INCLUDE or LIBRARY paths in Makefiles, build scripts or
compiler calls. This must be in the form of a dictionary or a mapping.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django_app
- env_vars:
CUSTOM_PATH: /opt/django_app
VERBOSE: True
use_vt
Use VT terminal emulation (see output while installing)
trusted_host
Mark this host as trusted, even though it does not have valid or any
HTTPS.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar
- require:
- pkg: python-pip
Or
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar/bin/pip
- require:
- pkg: python-pip
.. admonition:: Attention
The following arguments are deprecated, do not use.
pip_bin : None
Deprecated, use ``bin_env``
.. versionchanged:: 0.17.0
``use_wheel`` option added.
install_options
Extra arguments to be supplied to the setup.py install command.
If you are using an option with a directory path, be sure to use
absolute path.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django
- install_options:
- --prefix=/blah
- require:
- pkg: python-pip
global_options
Extra global options to be supplied to the setup.py call before the
install command.
.. versionadded:: 2014.1.3
.. admonition:: Attention
As of Salt 0.17.0 the pip state **needs** an importable pip module.
This usually means having the system's pip package installed or running
Salt from an active `virtualenv`_.
The reason for this requirement is because ``pip`` already does a
pretty good job parsing its own requirements. It makes no sense for
Salt to do ``pip`` requirements parsing and validation before passing
them to the ``pip`` library. It's functionality duplication and it's
more error prone.
.. admonition:: Attention
Please set ``reload_modules: True`` to have the salt minion
import this module after installation.
Example:
.. code-block:: yaml
pyopenssl:
pip.installed:
- name: pyOpenSSL
- reload_modules: True
- exists_action: i
extra_args
pip keyword and positional arguments not yet implemented in salt
.. code-block:: yaml
pandas:
pip.installed:
- name: pandas
- extra_args:
- --latest-pip-kwarg: param
- --latest-pip-arg
.. warning::
If unsupported options are passed here that are not supported in a
minion's version of pip, a `No such option error` will be thrown.
.. _`virtualenv`: http://www.virtualenv.org/en/latest/
'''
if pip_bin and not bin_env:
bin_env = pip_bin
# If pkgs is present, ignore name
if pkgs:
if not isinstance(pkgs, list):
return {'name': name,
'result': False,
'changes': {},
'comment': 'pkgs argument must be formatted as a list'}
else:
pkgs = [name]
# Assumption: If `pkg` is not an `string`, it's a `collections.OrderedDict`
# prepro = lambda pkg: pkg if type(pkg) == str else \
# ' '.join((pkg.items()[0][0], pkg.items()[0][1].replace(',', ';')))
# pkgs = ','.join([prepro(pkg) for pkg in pkgs])
prepro = lambda pkg: pkg if isinstance(pkg, six.string_types) else \
' '.join((six.iteritems(pkg)[0][0], six.iteritems(pkg)[0][1]))
pkgs = [prepro(pkg) for pkg in pkgs]
ret = {'name': ';'.join(pkgs), 'result': None,
'comment': '', 'changes': {}}
try:
cur_version = __salt__['pip.version'](bin_env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
ret['comment'] = 'Error installing \'{0}\': {1}'.format(name, err)
return ret
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_binary' option
if no_binary:
min_version = '7.0.0'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
ret['result'] = False
ret['comment'] = ('The \'no_binary\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Get the packages parsed name and version from the pip library.
# This only is done when there is no requirements or editable parameter.
pkgs_details = []
if pkgs and not (requirements or editable):
comments = []
for pkg in iter(pkgs):
out = _check_pkg_version_format(pkg)
if out['result'] is False:
ret['result'] = False
comments.append(out['comment'])
elif out['result'] is True:
pkgs_details.append((out['prefix'], pkg, out['version_spec']))
if ret['result'] is False:
ret['comment'] = '\n'.join(comments)
return ret
# If a requirements file is specified, only install the contents of the
# requirements file. Similarly, using the --editable flag with pip should
# also ignore the "name" and "pkgs" parameters.
target_pkgs = []
already_installed_comments = []
if requirements or editable:
comments = []
# Append comments if this is a dry run.
if __opts__['test']:
ret['result'] = None
if requirements:
# TODO: Check requirements file against currently-installed
# packages to provide more accurate state output.
comments.append('Requirements file \'{0}\' will be '
'processed.'.format(requirements))
if editable:
comments.append(
'Package will be installed in editable mode (i.e. '
'setuptools "develop mode") from {0}.'.format(editable)
)
ret['comment'] = ' '.join(comments)
return ret
# No requirements case.
# Check pre-existence of the requested packages.
else:
# Attempt to pre-cache a the current pip list
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
# If we fail, then just send False, and we'll try again in the next function call
except Exception as exc:
log.exception(exc)
pip_list = False
for prefix, state_pkg_name, version_spec in pkgs_details:
if prefix:
state_pkg_name = state_pkg_name
version_spec = version_spec
out = _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env, env_vars,
index_url, extra_index_url, pip_list,
**kwargs)
# If _check_if_installed result is None, something went wrong with
# the command running. This way we keep stateful output.
if out['result'] is None:
ret['result'] = False
ret['comment'] = out['comment']
return ret
else:
out = {'result': False, 'comment': None}
result = out['result']
# The package is not present. Add it to the pkgs to install.
if result is False:
# Replace commas (used for version ranges) with semicolons
# (which are not supported) in name so it does not treat
# them as multiple packages.
target_pkgs.append((prefix, state_pkg_name.replace(',', ';')))
# Append comments if this is a dry run.
if __opts__['test']:
msg = 'Python package {0} is set to be installed'
ret['result'] = None
ret['comment'] = msg.format(state_pkg_name)
return ret
# The package is already present and will not be reinstalled.
elif result is True:
# Append comment stating its presence
already_installed_comments.append(out['comment'])
# The command pip.list failed. Abort.
elif result is None:
ret['result'] = None
ret['comment'] = out['comment']
return ret
# No packages to install.
if not target_pkgs:
ret['result'] = True
aicomms = '\n'.join(already_installed_comments)
last_line = 'All specified packages are already installed' + (' and up-to-date' if upgrade else '')
ret['comment'] = aicomms + ('\n' if aicomms else '') + last_line
return ret
# Construct the string that will get passed to the install call
pkgs_str = ','.join([state_name for _, state_name in target_pkgs])
# Call to install the package. Actual installation takes place here
pip_install_call = __salt__['pip.install'](
pkgs='{0}'.format(pkgs_str) if pkgs_str else '',
requirements=requirements,
bin_env=bin_env,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
no_binary=no_binary,
log=log,
proxy=proxy,
timeout=timeout,
editable=editable,
find_links=find_links,
index_url=index_url,
extra_index_url=extra_index_url,
no_index=no_index,
mirrors=mirrors,
build=build,
target=target,
download=download,
download_cache=download_cache,
source=source,
upgrade=upgrade,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
exists_action=exists_action,
no_deps=no_deps,
no_install=no_install,
no_download=no_download,
install_options=install_options,
global_options=global_options,
user=user,
cwd=cwd,
pre_releases=pre_releases,
cert=cert,
allow_all_external=allow_all_external,
allow_external=allow_external,
allow_unverified=allow_unverified,
process_dependency_links=process_dependency_links,
saltenv=__env__,
env_vars=env_vars,
use_vt=use_vt,
trusted_host=trusted_host,
no_cache_dir=no_cache_dir,
extra_args=extra_args,
**kwargs
)
if pip_install_call and pip_install_call.get('retcode', 1) == 0:
ret['result'] = True
if requirements or editable:
comments = []
if requirements:
PIP_REQUIREMENTS_NOCHANGE = [
'Requirement already satisfied',
'Requirement already up-to-date',
'Requirement not upgraded',
'Collecting',
'Cloning',
'Cleaning up...',
]
for line in pip_install_call.get('stdout', '').split('\n'):
if not any(
[
line.strip().startswith(x)
for x in PIP_REQUIREMENTS_NOCHANGE
]
):
ret['changes']['requirements'] = True
if ret['changes'].get('requirements'):
comments.append('Successfully processed requirements file '
'{0}.'.format(requirements))
else:
comments.append('Requirements were already installed.')
if editable:
comments.append('Package successfully installed from VCS '
'checkout {0}.'.format(editable))
ret['changes']['editable'] = True
ret['comment'] = ' '.join(comments)
else:
# Check that the packages set to be installed were installed.
# Create comments reporting success and failures
pkg_404_comms = []
already_installed_packages = set()
for line in pip_install_call.get('stdout', '').split('\n'):
# Output for already installed packages:
# 'Requirement already up-to-date: jinja2 in /usr/local/lib/python2.7/dist-packages\nCleaning up...'
if line.startswith('Requirement already up-to-date: '):
package = line.split(':', 1)[1].split()[0]
already_installed_packages.add(package.lower())
for prefix, state_name in target_pkgs:
# Case for packages that are not an URL
if prefix:
pipsearch = salt.utils.data.CaseInsensitiveDict(
__salt__['pip.list'](prefix, bin_env,
user=user, cwd=cwd,
env_vars=env_vars,
**kwargs)
)
# If we didn't find the package in the system after
# installing it report it
if not pipsearch:
pkg_404_comms.append(
'There was no error installing package \'{0}\' '
'although it does not show when calling '
'\'pip.freeze\'.'.format(pkg)
)
else:
if prefix in pipsearch \
and prefix.lower() not in already_installed_packages:
ver = pipsearch[prefix]
ret['changes']['{0}=={1}'.format(prefix, ver)] = 'Installed'
# Case for packages that are an URL
else:
ret['changes']['{0}==???'.format(state_name)] = 'Installed'
# Set comments
aicomms = '\n'.join(already_installed_comments)
succ_comm = 'All packages were successfully installed'\
if not pkg_404_comms else '\n'.join(pkg_404_comms)
ret['comment'] = aicomms + ('\n' if aicomms else '') + succ_comm
return ret
elif pip_install_call:
ret['result'] = False
if 'stdout' in pip_install_call:
error = 'Error: {0} {1}'.format(pip_install_call['stdout'],
pip_install_call['stderr'])
else:
error = 'Error: {0}'.format(pip_install_call['comment'])
if requirements or editable:
comments = []
if requirements:
comments.append('Unable to process requirements file '
'"{0}".'.format(requirements))
if editable:
comments.append('Unable to install from VCS checkout'
'{0}.'.format(editable))
comments.append(error)
ret['comment'] = ' '.join(comments)
else:
pkgs_str = ', '.join([state_name for _, state_name in target_pkgs])
aicomms = '\n'.join(already_installed_comments)
error_comm = ('Failed to install packages: {0}. '
'{1}'.format(pkgs_str, error))
ret['comment'] = aicomms + ('\n' if aicomms else '') + error_comm
else:
ret['result'] = False
ret['comment'] = 'Could not install package'
return ret
def uptodate(name,
bin_env=None,
user=None,
cwd=None,
use_vt=False):
'''
.. versionadded:: 2015.5.0
Verify that the system is completely up to date.
name
The name has no functional value and is only used as a tracking
reference
user
The user under which to run pip
bin_env
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': 'Failed to update.'}
try:
packages = __salt__['pip.list_upgrades'](bin_env=bin_env, user=user, cwd=cwd)
except Exception as e:
ret['comment'] = six.text_type(e)
return ret
if not packages:
ret['comment'] = 'System is already up-to-date.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'System update will be performed'
ret['result'] = None
return ret
updated = __salt__['pip.upgrade'](bin_env=bin_env, user=user, cwd=cwd, use_vt=use_vt)
if updated.get('result') is False:
ret.update(updated)
elif updated:
ret['changes'] = updated
ret['comment'] = 'Upgrade successful.'
ret['result'] = True
else:
ret['comment'] = 'Upgrade failed.'
return ret
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
'''
pkgs = []
pkg_type = None
agg_enabled = [
'installed',
'removed',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__['state.gen_tag'](chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get('state') == 'pip':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
# Check first if 'sources' was passed so we don't aggregate pkgs
# and sources together.
if pkg_type is None:
pkg_type = 'pkgs'
if pkg_type == 'pkgs':
# Pull out the pkg names!
if 'pkgs' in chunk:
pkgs.extend(chunk['pkgs'])
chunk['__agg__'] = True
elif 'name' in chunk:
version = chunk.pop('version', None)
if version is not None:
pkgs.append({chunk['name']: version})
else:
pkgs.append(chunk['name'])
chunk['__agg__'] = True
if pkg_type is not None and pkgs:
if pkg_type in low:
low[pkg_type].extend(pkgs)
else:
low[pkg_type] = pkgs
return low
|
saltstack/salt
|
salt/states/pip_state.py
|
uptodate
|
python
|
def uptodate(name,
bin_env=None,
user=None,
cwd=None,
use_vt=False):
'''
.. versionadded:: 2015.5.0
Verify that the system is completely up to date.
name
The name has no functional value and is only used as a tracking
reference
user
The user under which to run pip
bin_env
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': 'Failed to update.'}
try:
packages = __salt__['pip.list_upgrades'](bin_env=bin_env, user=user, cwd=cwd)
except Exception as e:
ret['comment'] = six.text_type(e)
return ret
if not packages:
ret['comment'] = 'System is already up-to-date.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'System update will be performed'
ret['result'] = None
return ret
updated = __salt__['pip.upgrade'](bin_env=bin_env, user=user, cwd=cwd, use_vt=use_vt)
if updated.get('result') is False:
ret.update(updated)
elif updated:
ret['changes'] = updated
ret['comment'] = 'Upgrade successful.'
ret['result'] = True
else:
ret['comment'] = 'Upgrade failed.'
return ret
|
.. versionadded:: 2015.5.0
Verify that the system is completely up to date.
name
The name has no functional value and is only used as a tracking
reference
user
The user under which to run pip
bin_env
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pip_state.py#L1022-L1073
| null |
# -*- coding: utf-8 -*-
'''
Installation of Python Packages Using pip
=========================================
These states manage system installed python packages. Note that pip must be
installed for these states to be available, so pip states should include a
requisite to a pkg.installed state for the package which provides pip
(``python-pip`` in most cases). Example:
.. code-block:: yaml
python-pip:
pkg.installed
virtualenvwrapper:
pip.installed:
- require:
- pkg: python-pip
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import re
try:
import pkg_resources
HAS_PKG_RESOURCES = True
except ImportError:
HAS_PKG_RESOURCES = False
# Import salt libs
import salt.utils.data
import salt.utils.versions
from salt.exceptions import CommandExecutionError, CommandNotFoundError
# Import 3rd-party libs
from salt.ext import six
# pylint: disable=import-error
try:
import pip
HAS_PIP = True
except ImportError:
HAS_PIP = False
if HAS_PIP is True:
try:
from pip.req import InstallRequirement
_from_line = InstallRequirement.from_line
except ImportError:
# pip 10.0.0 move req module under pip._internal
try:
try:
from pip._internal.req import InstallRequirement
_from_line = InstallRequirement.from_line
except AttributeError:
from pip._internal.req.constructors import install_req_from_line as _from_line
except ImportError:
HAS_PIP = False
# Remove references to the loaded pip module above so reloading works
import sys
del pip
if 'pip' in sys.modules:
del sys.modules['pip']
try:
from pip.exceptions import InstallationError
except ImportError:
InstallationError = ValueError
# pylint: enable=import-error
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pip'
def __virtual__():
'''
Only load if the pip module is available in __salt__
'''
if HAS_PKG_RESOURCES is False:
return False, 'The pkg_resources python library is not installed'
if 'pip.list' in __salt__:
return __virtualname__
return False
def _fulfills_version_spec(version, version_spec):
'''
Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version.
'''
for oper, spec in version_spec:
if oper is None:
continue
if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec, cmp_func=_pep440_version_cmp):
return False
return True
def _check_pkg_version_format(pkg):
'''
Takes a package name and version specification (if any) and checks it using
the pip library.
'''
ret = {'result': False, 'comment': None,
'prefix': None, 'version_spec': None}
if not HAS_PIP:
ret['comment'] = (
'An importable Python 2 pip module is required but could not be '
'found on your system. This usually means that the system\'s pip '
'package is not installed properly.'
)
return ret
from_vcs = False
try:
# Get the requirement object from the pip library
try:
# With pip < 1.2, the __version__ attribute does not exist and
# vcs+URL urls are not properly parsed.
# The next line is meant to trigger an AttributeError and
# handle lower pip versions
log.debug('Installed pip version: %s', pip.__version__)
install_req = _from_line(pkg)
except AttributeError:
log.debug('Installed pip version is lower than 1.2')
supported_vcs = ('git', 'svn', 'hg', 'bzr')
if pkg.startswith(supported_vcs):
for vcs in supported_vcs:
if pkg.startswith(vcs):
from_vcs = True
install_req = _from_line(
pkg.split('{0}+'.format(vcs))[-1]
)
break
else:
install_req = _from_line(pkg)
except (ValueError, InstallationError) as exc:
ret['result'] = False
if not from_vcs and '=' in pkg and '==' not in pkg:
ret['comment'] = (
'Invalid version specification in package {0}. \'=\' is '
'not supported, use \'==\' instead.'.format(pkg)
)
return ret
ret['comment'] = (
'pip raised an exception while parsing \'{0}\': {1}'.format(
pkg, exc
)
)
return ret
if install_req.req is None:
# This is most likely an url and there's no way to know what will
# be installed before actually installing it.
ret['result'] = True
ret['prefix'] = ''
ret['version_spec'] = []
else:
ret['result'] = True
try:
ret['prefix'] = install_req.req.project_name
ret['version_spec'] = install_req.req.specs
except Exception:
ret['prefix'] = re.sub('[^A-Za-z0-9.]+', '-', install_req.name)
if hasattr(install_req, "specifier"):
specifier = install_req.specifier
else:
specifier = install_req.req.specifier
ret['version_spec'] = [(spec.operator, spec.version) for spec in specifier]
return ret
def _check_if_installed(prefix,
state_pkg_name,
version_spec,
ignore_installed,
force_reinstall,
upgrade,
user,
cwd,
bin_env,
env_vars,
index_url,
extra_index_url,
pip_list=False,
**kwargs):
'''
Takes a package name and version specification (if any) and checks it is
installed
Keyword arguments include:
pip_list: optional dict of installed pip packages, and their versions,
to search through to check if the package is installed. If not
provided, one will be generated in this function by querying the
system.
Returns:
result: None means the command failed to run
result: True means the package is installed
result: False means the package is not installed
'''
ret = {'result': False, 'comment': None}
# If we are not passed a pip list, get one:
pip_list = salt.utils.data.CaseInsensitiveDict(
pip_list or __salt__['pip.list'](prefix, bin_env=bin_env,
user=user, cwd=cwd,
env_vars=env_vars, **kwargs)
)
# If the package was already installed, check
# the ignore_installed and force_reinstall flags
if ignore_installed is False and prefix in pip_list:
if force_reinstall is False and not upgrade:
# Check desired version (if any) against currently-installed
if (
any(version_spec) and
_fulfills_version_spec(pip_list[prefix], version_spec)
) or (not any(version_spec)):
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
if force_reinstall is False and upgrade:
# Check desired version (if any) against currently-installed
include_alpha = False
include_beta = False
include_rc = False
if any(version_spec):
for spec in version_spec:
if 'a' in spec[1]:
include_alpha = True
if 'b' in spec[1]:
include_beta = True
if 'rc' in spec[1]:
include_rc = True
available_versions = __salt__['pip.list_all_versions'](
prefix, bin_env=bin_env, include_alpha=include_alpha,
include_beta=include_beta, include_rc=include_rc, user=user,
cwd=cwd, index_url=index_url, extra_index_url=extra_index_url)
desired_version = ''
if any(version_spec):
for version in reversed(available_versions):
if _fulfills_version_spec(version, version_spec):
desired_version = version
break
else:
desired_version = available_versions[-1]
if not desired_version:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed and\nthe available upgrade '
'doesn\'t fulfills the version '
'requirements'.format(prefix))
return ret
if _pep440_version_cmp(pip_list[prefix], desired_version) == 0:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
return ret
def _pep440_version_cmp(pkg1, pkg2, ignore_epoch=False):
'''
Compares two version strings using pkg_resources.parse_version.
Return -1 if version1 < version2, 0 if version1 ==version2,
and 1 if version1 > version2. Return None if there was a problem
making the comparison.
'''
normalize = lambda x: six.text_type(x).split('!', 1)[-1] \
if ignore_epoch else six.text_type(x)
pkg1 = normalize(pkg1)
pkg2 = normalize(pkg2)
try:
if pkg_resources.parse_version(pkg1) < pkg_resources.parse_version(pkg2):
return -1
if pkg_resources.parse_version(pkg1) == pkg_resources.parse_version(pkg2):
return 0
if pkg_resources.parse_version(pkg1) > pkg_resources.parse_version(pkg2):
return 1
except Exception as exc:
log.exception(exc)
return None
def installed(name,
pkgs=None,
pip_bin=None,
requirements=None,
bin_env=None,
use_wheel=False,
no_use_wheel=False,
log=None,
proxy=None,
timeout=None,
repo=None,
editable=None,
find_links=None,
index_url=None,
extra_index_url=None,
no_index=False,
mirrors=None,
build=None,
target=None,
download=None,
download_cache=None,
source=None,
upgrade=False,
force_reinstall=False,
ignore_installed=False,
exists_action=None,
no_deps=False,
no_install=False,
no_download=False,
install_options=None,
global_options=None,
user=None,
cwd=None,
pre_releases=False,
cert=None,
allow_all_external=False,
allow_external=None,
allow_unverified=None,
process_dependency_links=False,
env_vars=None,
use_vt=False,
trusted_host=None,
no_cache_dir=False,
cache_dir=None,
no_binary=None,
extra_args=None,
**kwargs):
'''
Make sure the package is installed
name
The name of the python package to install. You can also specify version
numbers here using the standard operators ``==, >=, <=``. If
``requirements`` is given, this parameter will be ignored.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- require:
- pkg: python-pip
This will install the latest Django version greater than 1.6 but less
than 1.7.
requirements
Path to a pip requirements file. If the path begins with salt://
the file will be transferred from the master file server.
user
The user under which to run pip
use_wheel : False
Prefer wheel archives (requires pip>=1.4)
no_use_wheel : False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
Example:
.. code-block:: yaml
django:
pip.installed:
- no_binary: ':all:'
flask:
pip.installed:
- no_binary:
- itsdangerous
- click
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable (i.e.
git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed)
find_links
URL to look for packages at
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URL(s) to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already
up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
exists_action
Default action when a path already exists: (s)witch, (i)gnore, (w)ipe,
(b)ackup
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_cache_dir:
Disable the cache.
cwd
Current working directory to run pip from
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
allow_all_external
Allow the installation of all externally hosted files
allow_external
Allow the installation of externally hosted files (comma separated list)
allow_unverified
Allow the installation of insecure and unverifiable files (comma separated list)
process_dependency_links
Enable the processing of dependency links
bin_env : None
Absolute path to a virtual environment directory or absolute path to
a pip executable. The example below assumes a virtual environment
has been created at ``/foo/.virtualenvs/bar``.
env_vars
Add or modify environment variables. Useful for tweaking build steps,
such as specifying INCLUDE or LIBRARY paths in Makefiles, build scripts or
compiler calls. This must be in the form of a dictionary or a mapping.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django_app
- env_vars:
CUSTOM_PATH: /opt/django_app
VERBOSE: True
use_vt
Use VT terminal emulation (see output while installing)
trusted_host
Mark this host as trusted, even though it does not have valid or any
HTTPS.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar
- require:
- pkg: python-pip
Or
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar/bin/pip
- require:
- pkg: python-pip
.. admonition:: Attention
The following arguments are deprecated, do not use.
pip_bin : None
Deprecated, use ``bin_env``
.. versionchanged:: 0.17.0
``use_wheel`` option added.
install_options
Extra arguments to be supplied to the setup.py install command.
If you are using an option with a directory path, be sure to use
absolute path.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django
- install_options:
- --prefix=/blah
- require:
- pkg: python-pip
global_options
Extra global options to be supplied to the setup.py call before the
install command.
.. versionadded:: 2014.1.3
.. admonition:: Attention
As of Salt 0.17.0 the pip state **needs** an importable pip module.
This usually means having the system's pip package installed or running
Salt from an active `virtualenv`_.
The reason for this requirement is because ``pip`` already does a
pretty good job parsing its own requirements. It makes no sense for
Salt to do ``pip`` requirements parsing and validation before passing
them to the ``pip`` library. It's functionality duplication and it's
more error prone.
.. admonition:: Attention
Please set ``reload_modules: True`` to have the salt minion
import this module after installation.
Example:
.. code-block:: yaml
pyopenssl:
pip.installed:
- name: pyOpenSSL
- reload_modules: True
- exists_action: i
extra_args
pip keyword and positional arguments not yet implemented in salt
.. code-block:: yaml
pandas:
pip.installed:
- name: pandas
- extra_args:
- --latest-pip-kwarg: param
- --latest-pip-arg
.. warning::
If unsupported options are passed here that are not supported in a
minion's version of pip, a `No such option error` will be thrown.
.. _`virtualenv`: http://www.virtualenv.org/en/latest/
'''
if pip_bin and not bin_env:
bin_env = pip_bin
# If pkgs is present, ignore name
if pkgs:
if not isinstance(pkgs, list):
return {'name': name,
'result': False,
'changes': {},
'comment': 'pkgs argument must be formatted as a list'}
else:
pkgs = [name]
# Assumption: If `pkg` is not an `string`, it's a `collections.OrderedDict`
# prepro = lambda pkg: pkg if type(pkg) == str else \
# ' '.join((pkg.items()[0][0], pkg.items()[0][1].replace(',', ';')))
# pkgs = ','.join([prepro(pkg) for pkg in pkgs])
prepro = lambda pkg: pkg if isinstance(pkg, six.string_types) else \
' '.join((six.iteritems(pkg)[0][0], six.iteritems(pkg)[0][1]))
pkgs = [prepro(pkg) for pkg in pkgs]
ret = {'name': ';'.join(pkgs), 'result': None,
'comment': '', 'changes': {}}
try:
cur_version = __salt__['pip.version'](bin_env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
ret['comment'] = 'Error installing \'{0}\': {1}'.format(name, err)
return ret
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_binary' option
if no_binary:
min_version = '7.0.0'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
ret['result'] = False
ret['comment'] = ('The \'no_binary\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Get the packages parsed name and version from the pip library.
# This only is done when there is no requirements or editable parameter.
pkgs_details = []
if pkgs and not (requirements or editable):
comments = []
for pkg in iter(pkgs):
out = _check_pkg_version_format(pkg)
if out['result'] is False:
ret['result'] = False
comments.append(out['comment'])
elif out['result'] is True:
pkgs_details.append((out['prefix'], pkg, out['version_spec']))
if ret['result'] is False:
ret['comment'] = '\n'.join(comments)
return ret
# If a requirements file is specified, only install the contents of the
# requirements file. Similarly, using the --editable flag with pip should
# also ignore the "name" and "pkgs" parameters.
target_pkgs = []
already_installed_comments = []
if requirements or editable:
comments = []
# Append comments if this is a dry run.
if __opts__['test']:
ret['result'] = None
if requirements:
# TODO: Check requirements file against currently-installed
# packages to provide more accurate state output.
comments.append('Requirements file \'{0}\' will be '
'processed.'.format(requirements))
if editable:
comments.append(
'Package will be installed in editable mode (i.e. '
'setuptools "develop mode") from {0}.'.format(editable)
)
ret['comment'] = ' '.join(comments)
return ret
# No requirements case.
# Check pre-existence of the requested packages.
else:
# Attempt to pre-cache a the current pip list
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
# If we fail, then just send False, and we'll try again in the next function call
except Exception as exc:
log.exception(exc)
pip_list = False
for prefix, state_pkg_name, version_spec in pkgs_details:
if prefix:
state_pkg_name = state_pkg_name
version_spec = version_spec
out = _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env, env_vars,
index_url, extra_index_url, pip_list,
**kwargs)
# If _check_if_installed result is None, something went wrong with
# the command running. This way we keep stateful output.
if out['result'] is None:
ret['result'] = False
ret['comment'] = out['comment']
return ret
else:
out = {'result': False, 'comment': None}
result = out['result']
# The package is not present. Add it to the pkgs to install.
if result is False:
# Replace commas (used for version ranges) with semicolons
# (which are not supported) in name so it does not treat
# them as multiple packages.
target_pkgs.append((prefix, state_pkg_name.replace(',', ';')))
# Append comments if this is a dry run.
if __opts__['test']:
msg = 'Python package {0} is set to be installed'
ret['result'] = None
ret['comment'] = msg.format(state_pkg_name)
return ret
# The package is already present and will not be reinstalled.
elif result is True:
# Append comment stating its presence
already_installed_comments.append(out['comment'])
# The command pip.list failed. Abort.
elif result is None:
ret['result'] = None
ret['comment'] = out['comment']
return ret
# No packages to install.
if not target_pkgs:
ret['result'] = True
aicomms = '\n'.join(already_installed_comments)
last_line = 'All specified packages are already installed' + (' and up-to-date' if upgrade else '')
ret['comment'] = aicomms + ('\n' if aicomms else '') + last_line
return ret
# Construct the string that will get passed to the install call
pkgs_str = ','.join([state_name for _, state_name in target_pkgs])
# Call to install the package. Actual installation takes place here
pip_install_call = __salt__['pip.install'](
pkgs='{0}'.format(pkgs_str) if pkgs_str else '',
requirements=requirements,
bin_env=bin_env,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
no_binary=no_binary,
log=log,
proxy=proxy,
timeout=timeout,
editable=editable,
find_links=find_links,
index_url=index_url,
extra_index_url=extra_index_url,
no_index=no_index,
mirrors=mirrors,
build=build,
target=target,
download=download,
download_cache=download_cache,
source=source,
upgrade=upgrade,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
exists_action=exists_action,
no_deps=no_deps,
no_install=no_install,
no_download=no_download,
install_options=install_options,
global_options=global_options,
user=user,
cwd=cwd,
pre_releases=pre_releases,
cert=cert,
allow_all_external=allow_all_external,
allow_external=allow_external,
allow_unverified=allow_unverified,
process_dependency_links=process_dependency_links,
saltenv=__env__,
env_vars=env_vars,
use_vt=use_vt,
trusted_host=trusted_host,
no_cache_dir=no_cache_dir,
extra_args=extra_args,
**kwargs
)
if pip_install_call and pip_install_call.get('retcode', 1) == 0:
ret['result'] = True
if requirements or editable:
comments = []
if requirements:
PIP_REQUIREMENTS_NOCHANGE = [
'Requirement already satisfied',
'Requirement already up-to-date',
'Requirement not upgraded',
'Collecting',
'Cloning',
'Cleaning up...',
]
for line in pip_install_call.get('stdout', '').split('\n'):
if not any(
[
line.strip().startswith(x)
for x in PIP_REQUIREMENTS_NOCHANGE
]
):
ret['changes']['requirements'] = True
if ret['changes'].get('requirements'):
comments.append('Successfully processed requirements file '
'{0}.'.format(requirements))
else:
comments.append('Requirements were already installed.')
if editable:
comments.append('Package successfully installed from VCS '
'checkout {0}.'.format(editable))
ret['changes']['editable'] = True
ret['comment'] = ' '.join(comments)
else:
# Check that the packages set to be installed were installed.
# Create comments reporting success and failures
pkg_404_comms = []
already_installed_packages = set()
for line in pip_install_call.get('stdout', '').split('\n'):
# Output for already installed packages:
# 'Requirement already up-to-date: jinja2 in /usr/local/lib/python2.7/dist-packages\nCleaning up...'
if line.startswith('Requirement already up-to-date: '):
package = line.split(':', 1)[1].split()[0]
already_installed_packages.add(package.lower())
for prefix, state_name in target_pkgs:
# Case for packages that are not an URL
if prefix:
pipsearch = salt.utils.data.CaseInsensitiveDict(
__salt__['pip.list'](prefix, bin_env,
user=user, cwd=cwd,
env_vars=env_vars,
**kwargs)
)
# If we didn't find the package in the system after
# installing it report it
if not pipsearch:
pkg_404_comms.append(
'There was no error installing package \'{0}\' '
'although it does not show when calling '
'\'pip.freeze\'.'.format(pkg)
)
else:
if prefix in pipsearch \
and prefix.lower() not in already_installed_packages:
ver = pipsearch[prefix]
ret['changes']['{0}=={1}'.format(prefix, ver)] = 'Installed'
# Case for packages that are an URL
else:
ret['changes']['{0}==???'.format(state_name)] = 'Installed'
# Set comments
aicomms = '\n'.join(already_installed_comments)
succ_comm = 'All packages were successfully installed'\
if not pkg_404_comms else '\n'.join(pkg_404_comms)
ret['comment'] = aicomms + ('\n' if aicomms else '') + succ_comm
return ret
elif pip_install_call:
ret['result'] = False
if 'stdout' in pip_install_call:
error = 'Error: {0} {1}'.format(pip_install_call['stdout'],
pip_install_call['stderr'])
else:
error = 'Error: {0}'.format(pip_install_call['comment'])
if requirements or editable:
comments = []
if requirements:
comments.append('Unable to process requirements file '
'"{0}".'.format(requirements))
if editable:
comments.append('Unable to install from VCS checkout'
'{0}.'.format(editable))
comments.append(error)
ret['comment'] = ' '.join(comments)
else:
pkgs_str = ', '.join([state_name for _, state_name in target_pkgs])
aicomms = '\n'.join(already_installed_comments)
error_comm = ('Failed to install packages: {0}. '
'{1}'.format(pkgs_str, error))
ret['comment'] = aicomms + ('\n' if aicomms else '') + error_comm
else:
ret['result'] = False
ret['comment'] = 'Could not install package'
return ret
def removed(name,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
user=None,
cwd=None,
use_vt=False):
'''
Make sure that a package is not installed.
name
The name of the package to uninstall
user
The user under which to run pip
bin_env : None
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error uninstalling \'{0}\': {1}'.format(name, err)
return ret
if name not in pip_list:
ret['result'] = True
ret['comment'] = 'Package is not installed.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0} is set to be removed'.format(name)
return ret
if __salt__['pip.uninstall'](pkgs=name,
requirements=requirements,
bin_env=bin_env,
log=log,
proxy=proxy,
timeout=timeout,
user=user,
cwd=cwd,
use_vt=use_vt):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package was successfully removed.'
else:
ret['result'] = False
ret['comment'] = 'Could not remove package.'
return ret
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
'''
pkgs = []
pkg_type = None
agg_enabled = [
'installed',
'removed',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__['state.gen_tag'](chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get('state') == 'pip':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
# Check first if 'sources' was passed so we don't aggregate pkgs
# and sources together.
if pkg_type is None:
pkg_type = 'pkgs'
if pkg_type == 'pkgs':
# Pull out the pkg names!
if 'pkgs' in chunk:
pkgs.extend(chunk['pkgs'])
chunk['__agg__'] = True
elif 'name' in chunk:
version = chunk.pop('version', None)
if version is not None:
pkgs.append({chunk['name']: version})
else:
pkgs.append(chunk['name'])
chunk['__agg__'] = True
if pkg_type is not None and pkgs:
if pkg_type in low:
low[pkg_type].extend(pkgs)
else:
low[pkg_type] = pkgs
return low
|
saltstack/salt
|
salt/states/pip_state.py
|
mod_aggregate
|
python
|
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
'''
pkgs = []
pkg_type = None
agg_enabled = [
'installed',
'removed',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__['state.gen_tag'](chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get('state') == 'pip':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
# Check first if 'sources' was passed so we don't aggregate pkgs
# and sources together.
if pkg_type is None:
pkg_type = 'pkgs'
if pkg_type == 'pkgs':
# Pull out the pkg names!
if 'pkgs' in chunk:
pkgs.extend(chunk['pkgs'])
chunk['__agg__'] = True
elif 'name' in chunk:
version = chunk.pop('version', None)
if version is not None:
pkgs.append({chunk['name']: version})
else:
pkgs.append(chunk['name'])
chunk['__agg__'] = True
if pkg_type is not None and pkgs:
if pkg_type in low:
low[pkg_type].extend(pkgs)
else:
low[pkg_type] = pkgs
return low
|
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pip_state.py#L1076-L1121
| null |
# -*- coding: utf-8 -*-
'''
Installation of Python Packages Using pip
=========================================
These states manage system installed python packages. Note that pip must be
installed for these states to be available, so pip states should include a
requisite to a pkg.installed state for the package which provides pip
(``python-pip`` in most cases). Example:
.. code-block:: yaml
python-pip:
pkg.installed
virtualenvwrapper:
pip.installed:
- require:
- pkg: python-pip
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import re
try:
import pkg_resources
HAS_PKG_RESOURCES = True
except ImportError:
HAS_PKG_RESOURCES = False
# Import salt libs
import salt.utils.data
import salt.utils.versions
from salt.exceptions import CommandExecutionError, CommandNotFoundError
# Import 3rd-party libs
from salt.ext import six
# pylint: disable=import-error
try:
import pip
HAS_PIP = True
except ImportError:
HAS_PIP = False
if HAS_PIP is True:
try:
from pip.req import InstallRequirement
_from_line = InstallRequirement.from_line
except ImportError:
# pip 10.0.0 move req module under pip._internal
try:
try:
from pip._internal.req import InstallRequirement
_from_line = InstallRequirement.from_line
except AttributeError:
from pip._internal.req.constructors import install_req_from_line as _from_line
except ImportError:
HAS_PIP = False
# Remove references to the loaded pip module above so reloading works
import sys
del pip
if 'pip' in sys.modules:
del sys.modules['pip']
try:
from pip.exceptions import InstallationError
except ImportError:
InstallationError = ValueError
# pylint: enable=import-error
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pip'
def __virtual__():
'''
Only load if the pip module is available in __salt__
'''
if HAS_PKG_RESOURCES is False:
return False, 'The pkg_resources python library is not installed'
if 'pip.list' in __salt__:
return __virtualname__
return False
def _fulfills_version_spec(version, version_spec):
'''
Check version number against version specification info and return a
boolean value based on whether or not the version number meets the
specified version.
'''
for oper, spec in version_spec:
if oper is None:
continue
if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec, cmp_func=_pep440_version_cmp):
return False
return True
def _check_pkg_version_format(pkg):
'''
Takes a package name and version specification (if any) and checks it using
the pip library.
'''
ret = {'result': False, 'comment': None,
'prefix': None, 'version_spec': None}
if not HAS_PIP:
ret['comment'] = (
'An importable Python 2 pip module is required but could not be '
'found on your system. This usually means that the system\'s pip '
'package is not installed properly.'
)
return ret
from_vcs = False
try:
# Get the requirement object from the pip library
try:
# With pip < 1.2, the __version__ attribute does not exist and
# vcs+URL urls are not properly parsed.
# The next line is meant to trigger an AttributeError and
# handle lower pip versions
log.debug('Installed pip version: %s', pip.__version__)
install_req = _from_line(pkg)
except AttributeError:
log.debug('Installed pip version is lower than 1.2')
supported_vcs = ('git', 'svn', 'hg', 'bzr')
if pkg.startswith(supported_vcs):
for vcs in supported_vcs:
if pkg.startswith(vcs):
from_vcs = True
install_req = _from_line(
pkg.split('{0}+'.format(vcs))[-1]
)
break
else:
install_req = _from_line(pkg)
except (ValueError, InstallationError) as exc:
ret['result'] = False
if not from_vcs and '=' in pkg and '==' not in pkg:
ret['comment'] = (
'Invalid version specification in package {0}. \'=\' is '
'not supported, use \'==\' instead.'.format(pkg)
)
return ret
ret['comment'] = (
'pip raised an exception while parsing \'{0}\': {1}'.format(
pkg, exc
)
)
return ret
if install_req.req is None:
# This is most likely an url and there's no way to know what will
# be installed before actually installing it.
ret['result'] = True
ret['prefix'] = ''
ret['version_spec'] = []
else:
ret['result'] = True
try:
ret['prefix'] = install_req.req.project_name
ret['version_spec'] = install_req.req.specs
except Exception:
ret['prefix'] = re.sub('[^A-Za-z0-9.]+', '-', install_req.name)
if hasattr(install_req, "specifier"):
specifier = install_req.specifier
else:
specifier = install_req.req.specifier
ret['version_spec'] = [(spec.operator, spec.version) for spec in specifier]
return ret
def _check_if_installed(prefix,
state_pkg_name,
version_spec,
ignore_installed,
force_reinstall,
upgrade,
user,
cwd,
bin_env,
env_vars,
index_url,
extra_index_url,
pip_list=False,
**kwargs):
'''
Takes a package name and version specification (if any) and checks it is
installed
Keyword arguments include:
pip_list: optional dict of installed pip packages, and their versions,
to search through to check if the package is installed. If not
provided, one will be generated in this function by querying the
system.
Returns:
result: None means the command failed to run
result: True means the package is installed
result: False means the package is not installed
'''
ret = {'result': False, 'comment': None}
# If we are not passed a pip list, get one:
pip_list = salt.utils.data.CaseInsensitiveDict(
pip_list or __salt__['pip.list'](prefix, bin_env=bin_env,
user=user, cwd=cwd,
env_vars=env_vars, **kwargs)
)
# If the package was already installed, check
# the ignore_installed and force_reinstall flags
if ignore_installed is False and prefix in pip_list:
if force_reinstall is False and not upgrade:
# Check desired version (if any) against currently-installed
if (
any(version_spec) and
_fulfills_version_spec(pip_list[prefix], version_spec)
) or (not any(version_spec)):
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
if force_reinstall is False and upgrade:
# Check desired version (if any) against currently-installed
include_alpha = False
include_beta = False
include_rc = False
if any(version_spec):
for spec in version_spec:
if 'a' in spec[1]:
include_alpha = True
if 'b' in spec[1]:
include_beta = True
if 'rc' in spec[1]:
include_rc = True
available_versions = __salt__['pip.list_all_versions'](
prefix, bin_env=bin_env, include_alpha=include_alpha,
include_beta=include_beta, include_rc=include_rc, user=user,
cwd=cwd, index_url=index_url, extra_index_url=extra_index_url)
desired_version = ''
if any(version_spec):
for version in reversed(available_versions):
if _fulfills_version_spec(version, version_spec):
desired_version = version
break
else:
desired_version = available_versions[-1]
if not desired_version:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed and\nthe available upgrade '
'doesn\'t fulfills the version '
'requirements'.format(prefix))
return ret
if _pep440_version_cmp(pip_list[prefix], desired_version) == 0:
ret['result'] = True
ret['comment'] = ('Python package {0} was already '
'installed'.format(state_pkg_name))
return ret
return ret
def _pep440_version_cmp(pkg1, pkg2, ignore_epoch=False):
'''
Compares two version strings using pkg_resources.parse_version.
Return -1 if version1 < version2, 0 if version1 ==version2,
and 1 if version1 > version2. Return None if there was a problem
making the comparison.
'''
normalize = lambda x: six.text_type(x).split('!', 1)[-1] \
if ignore_epoch else six.text_type(x)
pkg1 = normalize(pkg1)
pkg2 = normalize(pkg2)
try:
if pkg_resources.parse_version(pkg1) < pkg_resources.parse_version(pkg2):
return -1
if pkg_resources.parse_version(pkg1) == pkg_resources.parse_version(pkg2):
return 0
if pkg_resources.parse_version(pkg1) > pkg_resources.parse_version(pkg2):
return 1
except Exception as exc:
log.exception(exc)
return None
def installed(name,
pkgs=None,
pip_bin=None,
requirements=None,
bin_env=None,
use_wheel=False,
no_use_wheel=False,
log=None,
proxy=None,
timeout=None,
repo=None,
editable=None,
find_links=None,
index_url=None,
extra_index_url=None,
no_index=False,
mirrors=None,
build=None,
target=None,
download=None,
download_cache=None,
source=None,
upgrade=False,
force_reinstall=False,
ignore_installed=False,
exists_action=None,
no_deps=False,
no_install=False,
no_download=False,
install_options=None,
global_options=None,
user=None,
cwd=None,
pre_releases=False,
cert=None,
allow_all_external=False,
allow_external=None,
allow_unverified=None,
process_dependency_links=False,
env_vars=None,
use_vt=False,
trusted_host=None,
no_cache_dir=False,
cache_dir=None,
no_binary=None,
extra_args=None,
**kwargs):
'''
Make sure the package is installed
name
The name of the python package to install. You can also specify version
numbers here using the standard operators ``==, >=, <=``. If
``requirements`` is given, this parameter will be ignored.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- require:
- pkg: python-pip
This will install the latest Django version greater than 1.6 but less
than 1.7.
requirements
Path to a pip requirements file. If the path begins with salt://
the file will be transferred from the master file server.
user
The user under which to run pip
use_wheel : False
Prefer wheel archives (requires pip>=1.4)
no_use_wheel : False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
Example:
.. code-block:: yaml
django:
pip.installed:
- no_binary: ':all:'
flask:
pip.installed:
- no_binary:
- itsdangerous
- click
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable (i.e.
git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed)
find_links
URL to look for packages at
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URL(s) to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already
up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
exists_action
Default action when a path already exists: (s)witch, (i)gnore, (w)ipe,
(b)ackup
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_cache_dir:
Disable the cache.
cwd
Current working directory to run pip from
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
allow_all_external
Allow the installation of all externally hosted files
allow_external
Allow the installation of externally hosted files (comma separated list)
allow_unverified
Allow the installation of insecure and unverifiable files (comma separated list)
process_dependency_links
Enable the processing of dependency links
bin_env : None
Absolute path to a virtual environment directory or absolute path to
a pip executable. The example below assumes a virtual environment
has been created at ``/foo/.virtualenvs/bar``.
env_vars
Add or modify environment variables. Useful for tweaking build steps,
such as specifying INCLUDE or LIBRARY paths in Makefiles, build scripts or
compiler calls. This must be in the form of a dictionary or a mapping.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django_app
- env_vars:
CUSTOM_PATH: /opt/django_app
VERBOSE: True
use_vt
Use VT terminal emulation (see output while installing)
trusted_host
Mark this host as trusted, even though it does not have valid or any
HTTPS.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar
- require:
- pkg: python-pip
Or
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar/bin/pip
- require:
- pkg: python-pip
.. admonition:: Attention
The following arguments are deprecated, do not use.
pip_bin : None
Deprecated, use ``bin_env``
.. versionchanged:: 0.17.0
``use_wheel`` option added.
install_options
Extra arguments to be supplied to the setup.py install command.
If you are using an option with a directory path, be sure to use
absolute path.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django
- install_options:
- --prefix=/blah
- require:
- pkg: python-pip
global_options
Extra global options to be supplied to the setup.py call before the
install command.
.. versionadded:: 2014.1.3
.. admonition:: Attention
As of Salt 0.17.0 the pip state **needs** an importable pip module.
This usually means having the system's pip package installed or running
Salt from an active `virtualenv`_.
The reason for this requirement is because ``pip`` already does a
pretty good job parsing its own requirements. It makes no sense for
Salt to do ``pip`` requirements parsing and validation before passing
them to the ``pip`` library. It's functionality duplication and it's
more error prone.
.. admonition:: Attention
Please set ``reload_modules: True`` to have the salt minion
import this module after installation.
Example:
.. code-block:: yaml
pyopenssl:
pip.installed:
- name: pyOpenSSL
- reload_modules: True
- exists_action: i
extra_args
pip keyword and positional arguments not yet implemented in salt
.. code-block:: yaml
pandas:
pip.installed:
- name: pandas
- extra_args:
- --latest-pip-kwarg: param
- --latest-pip-arg
.. warning::
If unsupported options are passed here that are not supported in a
minion's version of pip, a `No such option error` will be thrown.
.. _`virtualenv`: http://www.virtualenv.org/en/latest/
'''
if pip_bin and not bin_env:
bin_env = pip_bin
# If pkgs is present, ignore name
if pkgs:
if not isinstance(pkgs, list):
return {'name': name,
'result': False,
'changes': {},
'comment': 'pkgs argument must be formatted as a list'}
else:
pkgs = [name]
# Assumption: If `pkg` is not an `string`, it's a `collections.OrderedDict`
# prepro = lambda pkg: pkg if type(pkg) == str else \
# ' '.join((pkg.items()[0][0], pkg.items()[0][1].replace(',', ';')))
# pkgs = ','.join([prepro(pkg) for pkg in pkgs])
prepro = lambda pkg: pkg if isinstance(pkg, six.string_types) else \
' '.join((six.iteritems(pkg)[0][0], six.iteritems(pkg)[0][1]))
pkgs = [prepro(pkg) for pkg in pkgs]
ret = {'name': ';'.join(pkgs), 'result': None,
'comment': '', 'changes': {}}
try:
cur_version = __salt__['pip.version'](bin_env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
ret['comment'] = 'Error installing \'{0}\': {1}'.format(name, err)
return ret
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_binary' option
if no_binary:
min_version = '7.0.0'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
ret['result'] = False
ret['comment'] = ('The \'no_binary\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Get the packages parsed name and version from the pip library.
# This only is done when there is no requirements or editable parameter.
pkgs_details = []
if pkgs and not (requirements or editable):
comments = []
for pkg in iter(pkgs):
out = _check_pkg_version_format(pkg)
if out['result'] is False:
ret['result'] = False
comments.append(out['comment'])
elif out['result'] is True:
pkgs_details.append((out['prefix'], pkg, out['version_spec']))
if ret['result'] is False:
ret['comment'] = '\n'.join(comments)
return ret
# If a requirements file is specified, only install the contents of the
# requirements file. Similarly, using the --editable flag with pip should
# also ignore the "name" and "pkgs" parameters.
target_pkgs = []
already_installed_comments = []
if requirements or editable:
comments = []
# Append comments if this is a dry run.
if __opts__['test']:
ret['result'] = None
if requirements:
# TODO: Check requirements file against currently-installed
# packages to provide more accurate state output.
comments.append('Requirements file \'{0}\' will be '
'processed.'.format(requirements))
if editable:
comments.append(
'Package will be installed in editable mode (i.e. '
'setuptools "develop mode") from {0}.'.format(editable)
)
ret['comment'] = ' '.join(comments)
return ret
# No requirements case.
# Check pre-existence of the requested packages.
else:
# Attempt to pre-cache a the current pip list
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
# If we fail, then just send False, and we'll try again in the next function call
except Exception as exc:
log.exception(exc)
pip_list = False
for prefix, state_pkg_name, version_spec in pkgs_details:
if prefix:
state_pkg_name = state_pkg_name
version_spec = version_spec
out = _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env, env_vars,
index_url, extra_index_url, pip_list,
**kwargs)
# If _check_if_installed result is None, something went wrong with
# the command running. This way we keep stateful output.
if out['result'] is None:
ret['result'] = False
ret['comment'] = out['comment']
return ret
else:
out = {'result': False, 'comment': None}
result = out['result']
# The package is not present. Add it to the pkgs to install.
if result is False:
# Replace commas (used for version ranges) with semicolons
# (which are not supported) in name so it does not treat
# them as multiple packages.
target_pkgs.append((prefix, state_pkg_name.replace(',', ';')))
# Append comments if this is a dry run.
if __opts__['test']:
msg = 'Python package {0} is set to be installed'
ret['result'] = None
ret['comment'] = msg.format(state_pkg_name)
return ret
# The package is already present and will not be reinstalled.
elif result is True:
# Append comment stating its presence
already_installed_comments.append(out['comment'])
# The command pip.list failed. Abort.
elif result is None:
ret['result'] = None
ret['comment'] = out['comment']
return ret
# No packages to install.
if not target_pkgs:
ret['result'] = True
aicomms = '\n'.join(already_installed_comments)
last_line = 'All specified packages are already installed' + (' and up-to-date' if upgrade else '')
ret['comment'] = aicomms + ('\n' if aicomms else '') + last_line
return ret
# Construct the string that will get passed to the install call
pkgs_str = ','.join([state_name for _, state_name in target_pkgs])
# Call to install the package. Actual installation takes place here
pip_install_call = __salt__['pip.install'](
pkgs='{0}'.format(pkgs_str) if pkgs_str else '',
requirements=requirements,
bin_env=bin_env,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
no_binary=no_binary,
log=log,
proxy=proxy,
timeout=timeout,
editable=editable,
find_links=find_links,
index_url=index_url,
extra_index_url=extra_index_url,
no_index=no_index,
mirrors=mirrors,
build=build,
target=target,
download=download,
download_cache=download_cache,
source=source,
upgrade=upgrade,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
exists_action=exists_action,
no_deps=no_deps,
no_install=no_install,
no_download=no_download,
install_options=install_options,
global_options=global_options,
user=user,
cwd=cwd,
pre_releases=pre_releases,
cert=cert,
allow_all_external=allow_all_external,
allow_external=allow_external,
allow_unverified=allow_unverified,
process_dependency_links=process_dependency_links,
saltenv=__env__,
env_vars=env_vars,
use_vt=use_vt,
trusted_host=trusted_host,
no_cache_dir=no_cache_dir,
extra_args=extra_args,
**kwargs
)
if pip_install_call and pip_install_call.get('retcode', 1) == 0:
ret['result'] = True
if requirements or editable:
comments = []
if requirements:
PIP_REQUIREMENTS_NOCHANGE = [
'Requirement already satisfied',
'Requirement already up-to-date',
'Requirement not upgraded',
'Collecting',
'Cloning',
'Cleaning up...',
]
for line in pip_install_call.get('stdout', '').split('\n'):
if not any(
[
line.strip().startswith(x)
for x in PIP_REQUIREMENTS_NOCHANGE
]
):
ret['changes']['requirements'] = True
if ret['changes'].get('requirements'):
comments.append('Successfully processed requirements file '
'{0}.'.format(requirements))
else:
comments.append('Requirements were already installed.')
if editable:
comments.append('Package successfully installed from VCS '
'checkout {0}.'.format(editable))
ret['changes']['editable'] = True
ret['comment'] = ' '.join(comments)
else:
# Check that the packages set to be installed were installed.
# Create comments reporting success and failures
pkg_404_comms = []
already_installed_packages = set()
for line in pip_install_call.get('stdout', '').split('\n'):
# Output for already installed packages:
# 'Requirement already up-to-date: jinja2 in /usr/local/lib/python2.7/dist-packages\nCleaning up...'
if line.startswith('Requirement already up-to-date: '):
package = line.split(':', 1)[1].split()[0]
already_installed_packages.add(package.lower())
for prefix, state_name in target_pkgs:
# Case for packages that are not an URL
if prefix:
pipsearch = salt.utils.data.CaseInsensitiveDict(
__salt__['pip.list'](prefix, bin_env,
user=user, cwd=cwd,
env_vars=env_vars,
**kwargs)
)
# If we didn't find the package in the system after
# installing it report it
if not pipsearch:
pkg_404_comms.append(
'There was no error installing package \'{0}\' '
'although it does not show when calling '
'\'pip.freeze\'.'.format(pkg)
)
else:
if prefix in pipsearch \
and prefix.lower() not in already_installed_packages:
ver = pipsearch[prefix]
ret['changes']['{0}=={1}'.format(prefix, ver)] = 'Installed'
# Case for packages that are an URL
else:
ret['changes']['{0}==???'.format(state_name)] = 'Installed'
# Set comments
aicomms = '\n'.join(already_installed_comments)
succ_comm = 'All packages were successfully installed'\
if not pkg_404_comms else '\n'.join(pkg_404_comms)
ret['comment'] = aicomms + ('\n' if aicomms else '') + succ_comm
return ret
elif pip_install_call:
ret['result'] = False
if 'stdout' in pip_install_call:
error = 'Error: {0} {1}'.format(pip_install_call['stdout'],
pip_install_call['stderr'])
else:
error = 'Error: {0}'.format(pip_install_call['comment'])
if requirements or editable:
comments = []
if requirements:
comments.append('Unable to process requirements file '
'"{0}".'.format(requirements))
if editable:
comments.append('Unable to install from VCS checkout'
'{0}.'.format(editable))
comments.append(error)
ret['comment'] = ' '.join(comments)
else:
pkgs_str = ', '.join([state_name for _, state_name in target_pkgs])
aicomms = '\n'.join(already_installed_comments)
error_comm = ('Failed to install packages: {0}. '
'{1}'.format(pkgs_str, error))
ret['comment'] = aicomms + ('\n' if aicomms else '') + error_comm
else:
ret['result'] = False
ret['comment'] = 'Could not install package'
return ret
def removed(name,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
user=None,
cwd=None,
use_vt=False):
'''
Make sure that a package is not installed.
name
The name of the package to uninstall
user
The user under which to run pip
bin_env : None
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error uninstalling \'{0}\': {1}'.format(name, err)
return ret
if name not in pip_list:
ret['result'] = True
ret['comment'] = 'Package is not installed.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0} is set to be removed'.format(name)
return ret
if __salt__['pip.uninstall'](pkgs=name,
requirements=requirements,
bin_env=bin_env,
log=log,
proxy=proxy,
timeout=timeout,
user=user,
cwd=cwd,
use_vt=use_vt):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package was successfully removed.'
else:
ret['result'] = False
ret['comment'] = 'Could not remove package.'
return ret
def uptodate(name,
bin_env=None,
user=None,
cwd=None,
use_vt=False):
'''
.. versionadded:: 2015.5.0
Verify that the system is completely up to date.
name
The name has no functional value and is only used as a tracking
reference
user
The user under which to run pip
bin_env
the pip executable or virtualenenv to use
use_vt
Use VT terminal emulation (see output while installing)
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': 'Failed to update.'}
try:
packages = __salt__['pip.list_upgrades'](bin_env=bin_env, user=user, cwd=cwd)
except Exception as e:
ret['comment'] = six.text_type(e)
return ret
if not packages:
ret['comment'] = 'System is already up-to-date.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'System update will be performed'
ret['result'] = None
return ret
updated = __salt__['pip.upgrade'](bin_env=bin_env, user=user, cwd=cwd, use_vt=use_vt)
if updated.get('result') is False:
ret.update(updated)
elif updated:
ret['changes'] = updated
ret['comment'] = 'Upgrade successful.'
ret['result'] = True
else:
ret['comment'] = 'Upgrade failed.'
return ret
|
saltstack/salt
|
salt/states/cloud.py
|
present
|
python
|
def present(name, cloud_provider, onlyif=None, unless=None, opts=None, **kwargs):
'''
Spin up a single instance on a cloud provider, using salt-cloud. This state
does not take a profile argument; rather, it takes the arguments that would
normally be configured as part of the state.
Note that while this function does take any configuration argument that
would normally be used to create an instance, it will not verify the state
of any of those arguments on an existing instance. Stateful properties of
an instance should be configured using their own individual state (i.e.,
cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
cloud_provider
The name of the cloud provider to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
opts
Any extra opts that need to be used
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
# provider=None not cloud_provider because
# need to ensure ALL providers don't have the instance
if __salt__['cloud.has_instance'](name=name, provider=None):
ret['result'] = True
ret['comment'] = 'Already present instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.create'](cloud_provider, name, opts=opts, **kwargs)
if info and 'Error' not in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = ('Created instance {0} using provider {1} '
'and the following options: {2}').format(
name,
cloud_provider,
pprint.pformat(kwargs)
)
elif info and 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}: {2}').format(
name,
profile,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1},'
' please check your configuration').format(name,
profile)
return ret
|
Spin up a single instance on a cloud provider, using salt-cloud. This state
does not take a profile argument; rather, it takes the arguments that would
normally be configured as part of the state.
Note that while this function does take any configuration argument that
would normally be used to create an instance, it will not verify the state
of any of those arguments on an existing instance. Stateful properties of
an instance should be configured using their own individual state (i.e.,
cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
cloud_provider
The name of the cloud provider to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
opts
Any extra opts that need to be used
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cloud.py#L67-L150
|
[
"def _valid(name, comment='', changes=None):\n if not changes:\n changes = {}\n return {'name': name,\n 'result': True,\n 'changes': changes,\n 'comment': comment}\n"
] |
# -*- coding: utf-8 -*-
'''
Using states instead of maps to deploy clouds
=============================================
.. versionadded:: 2014.1.0
Use this minion to spin up a cloud instance:
.. code-block:: yaml
my-ec2-instance:
cloud.profile:
my-ec2-config
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
# Import 3rd-party libs
from salt.ext import six
# Import Salt Libs
import salt.utils.cloud as suc
def __virtual__():
'''
Only load if the cloud module is available in __salt__
'''
return 'cloud.profile' in __salt__
def _check_name(name):
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if suc.check_name(name, 'a-zA-Z0-9._-'):
ret['comment'] = 'Invalid characters in name.'
ret['result'] = False
return ret
else:
ret['result'] = True
return ret
def _valid(name, comment='', changes=None):
if not changes:
changes = {}
return {'name': name,
'result': True,
'changes': changes,
'comment': comment}
def _get_instance(names):
# for some reason loader overwrites __opts__['test'] with default
# value of False, thus store and then load it again after action
test = __opts__.get('test', False)
instance = __salt__['cloud.action'](fun='show_instance', names=names)
__opts__['test'] = test
return instance
def absent(name, onlyif=None, unless=None):
'''
Ensure that no instances with the specified names exist.
CAUTION: This is a destructive state, which will search all
configured cloud providers for the named instance,
and destroy it.
name
The name of the instance to destroy
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
if not __salt__['cloud.has_instance'](name=name, provider=None):
ret['result'] = True
ret['comment'] = 'Already absent instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be destroyed'.format(name)
return ret
info = __salt__['cloud.destroy'](name)
if info and 'Error' not in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = 'Destroyed instance {0}'.format(name)
elif 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to destroy instance {0}: {1}').format(
name,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = 'Failed to destroy instance {0}'.format(name)
return ret
def profile(name, profile, onlyif=None, unless=None, opts=None, **kwargs):
'''
Create a single instance on a cloud provider, using a salt-cloud profile.
Note that while profiles used this function do take any configuration
argument that would normally be used to create an instance using a profile,
this state will not verify the state of any of those arguments on an
existing instance. Stateful properties of an instance should be configured
using their own individual state (i.e., cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
profile
The name of the cloud profile to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
kwargs
Any profile override or addition
opts
Any extra opts that need to be used
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
instance = _get_instance([name])
if instance and not any('Not Actioned' in key for key in instance):
ret['result'] = True
ret['comment'] = 'Already present instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.profile'](profile, name, vm_overrides=kwargs, opts=opts)
# get either {Error: ''} or {namestring: {Error: ''}}
# which is what we can get from providers returns
main_error = info.get('Error', '')
name_error = ''
if isinstance(info, dict):
subinfo = info.get(name, {})
if isinstance(subinfo, dict):
name_error = subinfo.get('Error', None)
error = main_error or name_error
if info and not error:
node_info = info.get(name)
ret['result'] = True
default_msg = 'Created instance {0} using profile {1}'.format(
name, profile,)
# some providers support changes
if 'changes' in node_info:
ret['changes'] = node_info['changes']
ret['comment'] = node_info.get('comment', default_msg)
else:
ret['changes'] = info
ret['comment'] = default_msg
elif error:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1}: {2}').format(
name,
profile,
'{0}\n{1}\n'.format(main_error, name_error).strip(),
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}').format(
name,
profile,
)
return ret
def volume_present(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name in volumes:
ret['comment'] = 'Volume exists: {0}'.format(name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be created.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_create'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': None, 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to create.'.format(name)
return ret
def volume_absent(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name not in volumes:
ret['comment'] = 'Volume is absent.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be deleted.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_delete'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was deleted'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to delete.'.format(name)
return ret
def volume_attached(name, server_name, provider=None, **kwargs):
'''
Check if a block volume is attached.
'''
ret = _check_name(name)
if not ret['result']:
return ret
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
instance = __salt__['cloud.action'](
fun='show_instance',
names=server_name
)
if name in volumes and volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = (
'Volume {name} is already attached: {attachments}'.format(
**volumes[name]
)
)
ret['result'] = True
return ret
elif name not in volumes:
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = False
return ret
elif not instance:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = False
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be attached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_attach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to attach.'.format(name)
return ret
def volume_detached(name, server_name=None, provider=None, **kwargs):
'''
Check if a block volume is attached.
Returns True if server or Volume do not exist.
'''
ret = _check_name(name)
if not ret['result']:
return ret
if server_name is not None:
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if server_name:
instance = __salt__['cloud.action'](fun='show_instance', names=[name])
else:
instance = None
if name in volumes and not volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = (
'Volume {name} is not currently attached to anything.'
).format(**volumes[name])
ret['result'] = True
return ret
elif name not in volumes:
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = True
return ret
elif not instance and server_name is not None:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be detached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_detach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to detach.'.format(name)
return ret
|
saltstack/salt
|
salt/states/cloud.py
|
absent
|
python
|
def absent(name, onlyif=None, unless=None):
'''
Ensure that no instances with the specified names exist.
CAUTION: This is a destructive state, which will search all
configured cloud providers for the named instance,
and destroy it.
name
The name of the instance to destroy
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
if not __salt__['cloud.has_instance'](name=name, provider=None):
ret['result'] = True
ret['comment'] = 'Already absent instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be destroyed'.format(name)
return ret
info = __salt__['cloud.destroy'](name)
if info and 'Error' not in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = 'Destroyed instance {0}'.format(name)
elif 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to destroy instance {0}: {1}').format(
name,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = 'Failed to destroy instance {0}'.format(name)
return ret
|
Ensure that no instances with the specified names exist.
CAUTION: This is a destructive state, which will search all
configured cloud providers for the named instance,
and destroy it.
name
The name of the instance to destroy
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cloud.py#L153-L215
|
[
"def _valid(name, comment='', changes=None):\n if not changes:\n changes = {}\n return {'name': name,\n 'result': True,\n 'changes': changes,\n 'comment': comment}\n"
] |
# -*- coding: utf-8 -*-
'''
Using states instead of maps to deploy clouds
=============================================
.. versionadded:: 2014.1.0
Use this minion to spin up a cloud instance:
.. code-block:: yaml
my-ec2-instance:
cloud.profile:
my-ec2-config
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
# Import 3rd-party libs
from salt.ext import six
# Import Salt Libs
import salt.utils.cloud as suc
def __virtual__():
'''
Only load if the cloud module is available in __salt__
'''
return 'cloud.profile' in __salt__
def _check_name(name):
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if suc.check_name(name, 'a-zA-Z0-9._-'):
ret['comment'] = 'Invalid characters in name.'
ret['result'] = False
return ret
else:
ret['result'] = True
return ret
def _valid(name, comment='', changes=None):
if not changes:
changes = {}
return {'name': name,
'result': True,
'changes': changes,
'comment': comment}
def _get_instance(names):
# for some reason loader overwrites __opts__['test'] with default
# value of False, thus store and then load it again after action
test = __opts__.get('test', False)
instance = __salt__['cloud.action'](fun='show_instance', names=names)
__opts__['test'] = test
return instance
def present(name, cloud_provider, onlyif=None, unless=None, opts=None, **kwargs):
'''
Spin up a single instance on a cloud provider, using salt-cloud. This state
does not take a profile argument; rather, it takes the arguments that would
normally be configured as part of the state.
Note that while this function does take any configuration argument that
would normally be used to create an instance, it will not verify the state
of any of those arguments on an existing instance. Stateful properties of
an instance should be configured using their own individual state (i.e.,
cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
cloud_provider
The name of the cloud provider to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
opts
Any extra opts that need to be used
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
# provider=None not cloud_provider because
# need to ensure ALL providers don't have the instance
if __salt__['cloud.has_instance'](name=name, provider=None):
ret['result'] = True
ret['comment'] = 'Already present instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.create'](cloud_provider, name, opts=opts, **kwargs)
if info and 'Error' not in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = ('Created instance {0} using provider {1} '
'and the following options: {2}').format(
name,
cloud_provider,
pprint.pformat(kwargs)
)
elif info and 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}: {2}').format(
name,
profile,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1},'
' please check your configuration').format(name,
profile)
return ret
def profile(name, profile, onlyif=None, unless=None, opts=None, **kwargs):
'''
Create a single instance on a cloud provider, using a salt-cloud profile.
Note that while profiles used this function do take any configuration
argument that would normally be used to create an instance using a profile,
this state will not verify the state of any of those arguments on an
existing instance. Stateful properties of an instance should be configured
using their own individual state (i.e., cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
profile
The name of the cloud profile to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
kwargs
Any profile override or addition
opts
Any extra opts that need to be used
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
instance = _get_instance([name])
if instance and not any('Not Actioned' in key for key in instance):
ret['result'] = True
ret['comment'] = 'Already present instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.profile'](profile, name, vm_overrides=kwargs, opts=opts)
# get either {Error: ''} or {namestring: {Error: ''}}
# which is what we can get from providers returns
main_error = info.get('Error', '')
name_error = ''
if isinstance(info, dict):
subinfo = info.get(name, {})
if isinstance(subinfo, dict):
name_error = subinfo.get('Error', None)
error = main_error or name_error
if info and not error:
node_info = info.get(name)
ret['result'] = True
default_msg = 'Created instance {0} using profile {1}'.format(
name, profile,)
# some providers support changes
if 'changes' in node_info:
ret['changes'] = node_info['changes']
ret['comment'] = node_info.get('comment', default_msg)
else:
ret['changes'] = info
ret['comment'] = default_msg
elif error:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1}: {2}').format(
name,
profile,
'{0}\n{1}\n'.format(main_error, name_error).strip(),
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}').format(
name,
profile,
)
return ret
def volume_present(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name in volumes:
ret['comment'] = 'Volume exists: {0}'.format(name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be created.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_create'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': None, 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to create.'.format(name)
return ret
def volume_absent(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name not in volumes:
ret['comment'] = 'Volume is absent.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be deleted.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_delete'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was deleted'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to delete.'.format(name)
return ret
def volume_attached(name, server_name, provider=None, **kwargs):
'''
Check if a block volume is attached.
'''
ret = _check_name(name)
if not ret['result']:
return ret
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
instance = __salt__['cloud.action'](
fun='show_instance',
names=server_name
)
if name in volumes and volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = (
'Volume {name} is already attached: {attachments}'.format(
**volumes[name]
)
)
ret['result'] = True
return ret
elif name not in volumes:
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = False
return ret
elif not instance:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = False
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be attached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_attach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to attach.'.format(name)
return ret
def volume_detached(name, server_name=None, provider=None, **kwargs):
'''
Check if a block volume is attached.
Returns True if server or Volume do not exist.
'''
ret = _check_name(name)
if not ret['result']:
return ret
if server_name is not None:
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if server_name:
instance = __salt__['cloud.action'](fun='show_instance', names=[name])
else:
instance = None
if name in volumes and not volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = (
'Volume {name} is not currently attached to anything.'
).format(**volumes[name])
ret['result'] = True
return ret
elif name not in volumes:
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = True
return ret
elif not instance and server_name is not None:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be detached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_detach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to detach.'.format(name)
return ret
|
saltstack/salt
|
salt/states/cloud.py
|
profile
|
python
|
def profile(name, profile, onlyif=None, unless=None, opts=None, **kwargs):
'''
Create a single instance on a cloud provider, using a salt-cloud profile.
Note that while profiles used this function do take any configuration
argument that would normally be used to create an instance using a profile,
this state will not verify the state of any of those arguments on an
existing instance. Stateful properties of an instance should be configured
using their own individual state (i.e., cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
profile
The name of the cloud profile to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
kwargs
Any profile override or addition
opts
Any extra opts that need to be used
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
instance = _get_instance([name])
if instance and not any('Not Actioned' in key for key in instance):
ret['result'] = True
ret['comment'] = 'Already present instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.profile'](profile, name, vm_overrides=kwargs, opts=opts)
# get either {Error: ''} or {namestring: {Error: ''}}
# which is what we can get from providers returns
main_error = info.get('Error', '')
name_error = ''
if isinstance(info, dict):
subinfo = info.get(name, {})
if isinstance(subinfo, dict):
name_error = subinfo.get('Error', None)
error = main_error or name_error
if info and not error:
node_info = info.get(name)
ret['result'] = True
default_msg = 'Created instance {0} using profile {1}'.format(
name, profile,)
# some providers support changes
if 'changes' in node_info:
ret['changes'] = node_info['changes']
ret['comment'] = node_info.get('comment', default_msg)
else:
ret['changes'] = info
ret['comment'] = default_msg
elif error:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1}: {2}').format(
name,
profile,
'{0}\n{1}\n'.format(main_error, name_error).strip(),
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}').format(
name,
profile,
)
return ret
|
Create a single instance on a cloud provider, using a salt-cloud profile.
Note that while profiles used this function do take any configuration
argument that would normally be used to create an instance using a profile,
this state will not verify the state of any of those arguments on an
existing instance. Stateful properties of an instance should be configured
using their own individual state (i.e., cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
profile
The name of the cloud profile to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
kwargs
Any profile override or addition
opts
Any extra opts that need to be used
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cloud.py#L218-L313
|
[
"def _valid(name, comment='', changes=None):\n if not changes:\n changes = {}\n return {'name': name,\n 'result': True,\n 'changes': changes,\n 'comment': comment}\n",
"def _get_instance(names):\n # for some reason loader overwrites __opts__['test'] with default\n # value of False, thus store and then load it again after action\n test = __opts__.get('test', False)\n instance = __salt__['cloud.action'](fun='show_instance', names=names)\n __opts__['test'] = test\n return instance\n"
] |
# -*- coding: utf-8 -*-
'''
Using states instead of maps to deploy clouds
=============================================
.. versionadded:: 2014.1.0
Use this minion to spin up a cloud instance:
.. code-block:: yaml
my-ec2-instance:
cloud.profile:
my-ec2-config
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
# Import 3rd-party libs
from salt.ext import six
# Import Salt Libs
import salt.utils.cloud as suc
def __virtual__():
'''
Only load if the cloud module is available in __salt__
'''
return 'cloud.profile' in __salt__
def _check_name(name):
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if suc.check_name(name, 'a-zA-Z0-9._-'):
ret['comment'] = 'Invalid characters in name.'
ret['result'] = False
return ret
else:
ret['result'] = True
return ret
def _valid(name, comment='', changes=None):
if not changes:
changes = {}
return {'name': name,
'result': True,
'changes': changes,
'comment': comment}
def _get_instance(names):
# for some reason loader overwrites __opts__['test'] with default
# value of False, thus store and then load it again after action
test = __opts__.get('test', False)
instance = __salt__['cloud.action'](fun='show_instance', names=names)
__opts__['test'] = test
return instance
def present(name, cloud_provider, onlyif=None, unless=None, opts=None, **kwargs):
'''
Spin up a single instance on a cloud provider, using salt-cloud. This state
does not take a profile argument; rather, it takes the arguments that would
normally be configured as part of the state.
Note that while this function does take any configuration argument that
would normally be used to create an instance, it will not verify the state
of any of those arguments on an existing instance. Stateful properties of
an instance should be configured using their own individual state (i.e.,
cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
cloud_provider
The name of the cloud provider to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
opts
Any extra opts that need to be used
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
# provider=None not cloud_provider because
# need to ensure ALL providers don't have the instance
if __salt__['cloud.has_instance'](name=name, provider=None):
ret['result'] = True
ret['comment'] = 'Already present instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.create'](cloud_provider, name, opts=opts, **kwargs)
if info and 'Error' not in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = ('Created instance {0} using provider {1} '
'and the following options: {2}').format(
name,
cloud_provider,
pprint.pformat(kwargs)
)
elif info and 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}: {2}').format(
name,
profile,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1},'
' please check your configuration').format(name,
profile)
return ret
def absent(name, onlyif=None, unless=None):
'''
Ensure that no instances with the specified names exist.
CAUTION: This is a destructive state, which will search all
configured cloud providers for the named instance,
and destroy it.
name
The name of the instance to destroy
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
if not __salt__['cloud.has_instance'](name=name, provider=None):
ret['result'] = True
ret['comment'] = 'Already absent instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be destroyed'.format(name)
return ret
info = __salt__['cloud.destroy'](name)
if info and 'Error' not in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = 'Destroyed instance {0}'.format(name)
elif 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to destroy instance {0}: {1}').format(
name,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = 'Failed to destroy instance {0}'.format(name)
return ret
def volume_present(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name in volumes:
ret['comment'] = 'Volume exists: {0}'.format(name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be created.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_create'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': None, 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to create.'.format(name)
return ret
def volume_absent(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name not in volumes:
ret['comment'] = 'Volume is absent.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be deleted.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_delete'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was deleted'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to delete.'.format(name)
return ret
def volume_attached(name, server_name, provider=None, **kwargs):
'''
Check if a block volume is attached.
'''
ret = _check_name(name)
if not ret['result']:
return ret
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
instance = __salt__['cloud.action'](
fun='show_instance',
names=server_name
)
if name in volumes and volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = (
'Volume {name} is already attached: {attachments}'.format(
**volumes[name]
)
)
ret['result'] = True
return ret
elif name not in volumes:
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = False
return ret
elif not instance:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = False
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be attached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_attach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to attach.'.format(name)
return ret
def volume_detached(name, server_name=None, provider=None, **kwargs):
'''
Check if a block volume is attached.
Returns True if server or Volume do not exist.
'''
ret = _check_name(name)
if not ret['result']:
return ret
if server_name is not None:
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if server_name:
instance = __salt__['cloud.action'](fun='show_instance', names=[name])
else:
instance = None
if name in volumes and not volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = (
'Volume {name} is not currently attached to anything.'
).format(**volumes[name])
ret['result'] = True
return ret
elif name not in volumes:
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = True
return ret
elif not instance and server_name is not None:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be detached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_detach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to detach.'.format(name)
return ret
|
saltstack/salt
|
salt/states/cloud.py
|
volume_present
|
python
|
def volume_present(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name in volumes:
ret['comment'] = 'Volume exists: {0}'.format(name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be created.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_create'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': None, 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to create.'.format(name)
return ret
|
Check that a block volume exists.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cloud.py#L316-L347
|
[
"def _check_name(name):\n ret = {'name': name,\n 'changes': {},\n 'result': None,\n 'comment': ''}\n if suc.check_name(name, 'a-zA-Z0-9._-'):\n ret['comment'] = 'Invalid characters in name.'\n ret['result'] = False\n return ret\n else:\n ret['result'] = True\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
Using states instead of maps to deploy clouds
=============================================
.. versionadded:: 2014.1.0
Use this minion to spin up a cloud instance:
.. code-block:: yaml
my-ec2-instance:
cloud.profile:
my-ec2-config
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
# Import 3rd-party libs
from salt.ext import six
# Import Salt Libs
import salt.utils.cloud as suc
def __virtual__():
'''
Only load if the cloud module is available in __salt__
'''
return 'cloud.profile' in __salt__
def _check_name(name):
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if suc.check_name(name, 'a-zA-Z0-9._-'):
ret['comment'] = 'Invalid characters in name.'
ret['result'] = False
return ret
else:
ret['result'] = True
return ret
def _valid(name, comment='', changes=None):
if not changes:
changes = {}
return {'name': name,
'result': True,
'changes': changes,
'comment': comment}
def _get_instance(names):
# for some reason loader overwrites __opts__['test'] with default
# value of False, thus store and then load it again after action
test = __opts__.get('test', False)
instance = __salt__['cloud.action'](fun='show_instance', names=names)
__opts__['test'] = test
return instance
def present(name, cloud_provider, onlyif=None, unless=None, opts=None, **kwargs):
'''
Spin up a single instance on a cloud provider, using salt-cloud. This state
does not take a profile argument; rather, it takes the arguments that would
normally be configured as part of the state.
Note that while this function does take any configuration argument that
would normally be used to create an instance, it will not verify the state
of any of those arguments on an existing instance. Stateful properties of
an instance should be configured using their own individual state (i.e.,
cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
cloud_provider
The name of the cloud provider to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
opts
Any extra opts that need to be used
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
# provider=None not cloud_provider because
# need to ensure ALL providers don't have the instance
if __salt__['cloud.has_instance'](name=name, provider=None):
ret['result'] = True
ret['comment'] = 'Already present instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.create'](cloud_provider, name, opts=opts, **kwargs)
if info and 'Error' not in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = ('Created instance {0} using provider {1} '
'and the following options: {2}').format(
name,
cloud_provider,
pprint.pformat(kwargs)
)
elif info and 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}: {2}').format(
name,
profile,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1},'
' please check your configuration').format(name,
profile)
return ret
def absent(name, onlyif=None, unless=None):
'''
Ensure that no instances with the specified names exist.
CAUTION: This is a destructive state, which will search all
configured cloud providers for the named instance,
and destroy it.
name
The name of the instance to destroy
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
if not __salt__['cloud.has_instance'](name=name, provider=None):
ret['result'] = True
ret['comment'] = 'Already absent instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be destroyed'.format(name)
return ret
info = __salt__['cloud.destroy'](name)
if info and 'Error' not in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = 'Destroyed instance {0}'.format(name)
elif 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to destroy instance {0}: {1}').format(
name,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = 'Failed to destroy instance {0}'.format(name)
return ret
def profile(name, profile, onlyif=None, unless=None, opts=None, **kwargs):
'''
Create a single instance on a cloud provider, using a salt-cloud profile.
Note that while profiles used this function do take any configuration
argument that would normally be used to create an instance using a profile,
this state will not verify the state of any of those arguments on an
existing instance. Stateful properties of an instance should be configured
using their own individual state (i.e., cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
profile
The name of the cloud profile to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
kwargs
Any profile override or addition
opts
Any extra opts that need to be used
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
instance = _get_instance([name])
if instance and not any('Not Actioned' in key for key in instance):
ret['result'] = True
ret['comment'] = 'Already present instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.profile'](profile, name, vm_overrides=kwargs, opts=opts)
# get either {Error: ''} or {namestring: {Error: ''}}
# which is what we can get from providers returns
main_error = info.get('Error', '')
name_error = ''
if isinstance(info, dict):
subinfo = info.get(name, {})
if isinstance(subinfo, dict):
name_error = subinfo.get('Error', None)
error = main_error or name_error
if info and not error:
node_info = info.get(name)
ret['result'] = True
default_msg = 'Created instance {0} using profile {1}'.format(
name, profile,)
# some providers support changes
if 'changes' in node_info:
ret['changes'] = node_info['changes']
ret['comment'] = node_info.get('comment', default_msg)
else:
ret['changes'] = info
ret['comment'] = default_msg
elif error:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1}: {2}').format(
name,
profile,
'{0}\n{1}\n'.format(main_error, name_error).strip(),
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}').format(
name,
profile,
)
return ret
def volume_absent(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name not in volumes:
ret['comment'] = 'Volume is absent.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be deleted.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_delete'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was deleted'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to delete.'.format(name)
return ret
def volume_attached(name, server_name, provider=None, **kwargs):
'''
Check if a block volume is attached.
'''
ret = _check_name(name)
if not ret['result']:
return ret
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
instance = __salt__['cloud.action'](
fun='show_instance',
names=server_name
)
if name in volumes and volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = (
'Volume {name} is already attached: {attachments}'.format(
**volumes[name]
)
)
ret['result'] = True
return ret
elif name not in volumes:
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = False
return ret
elif not instance:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = False
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be attached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_attach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to attach.'.format(name)
return ret
def volume_detached(name, server_name=None, provider=None, **kwargs):
'''
Check if a block volume is attached.
Returns True if server or Volume do not exist.
'''
ret = _check_name(name)
if not ret['result']:
return ret
if server_name is not None:
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if server_name:
instance = __salt__['cloud.action'](fun='show_instance', names=[name])
else:
instance = None
if name in volumes and not volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = (
'Volume {name} is not currently attached to anything.'
).format(**volumes[name])
ret['result'] = True
return ret
elif name not in volumes:
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = True
return ret
elif not instance and server_name is not None:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be detached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_detach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to detach.'.format(name)
return ret
|
saltstack/salt
|
salt/states/cloud.py
|
volume_attached
|
python
|
def volume_attached(name, server_name, provider=None, **kwargs):
'''
Check if a block volume is attached.
'''
ret = _check_name(name)
if not ret['result']:
return ret
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
instance = __salt__['cloud.action'](
fun='show_instance',
names=server_name
)
if name in volumes and volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = (
'Volume {name} is already attached: {attachments}'.format(
**volumes[name]
)
)
ret['result'] = True
return ret
elif name not in volumes:
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = False
return ret
elif not instance:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = False
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be attached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_attach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to attach.'.format(name)
return ret
|
Check if a block volume is attached.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cloud.py#L384-L439
|
[
"def _check_name(name):\n ret = {'name': name,\n 'changes': {},\n 'result': None,\n 'comment': ''}\n if suc.check_name(name, 'a-zA-Z0-9._-'):\n ret['comment'] = 'Invalid characters in name.'\n ret['result'] = False\n return ret\n else:\n ret['result'] = True\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
Using states instead of maps to deploy clouds
=============================================
.. versionadded:: 2014.1.0
Use this minion to spin up a cloud instance:
.. code-block:: yaml
my-ec2-instance:
cloud.profile:
my-ec2-config
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
# Import 3rd-party libs
from salt.ext import six
# Import Salt Libs
import salt.utils.cloud as suc
def __virtual__():
'''
Only load if the cloud module is available in __salt__
'''
return 'cloud.profile' in __salt__
def _check_name(name):
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if suc.check_name(name, 'a-zA-Z0-9._-'):
ret['comment'] = 'Invalid characters in name.'
ret['result'] = False
return ret
else:
ret['result'] = True
return ret
def _valid(name, comment='', changes=None):
if not changes:
changes = {}
return {'name': name,
'result': True,
'changes': changes,
'comment': comment}
def _get_instance(names):
# for some reason loader overwrites __opts__['test'] with default
# value of False, thus store and then load it again after action
test = __opts__.get('test', False)
instance = __salt__['cloud.action'](fun='show_instance', names=names)
__opts__['test'] = test
return instance
def present(name, cloud_provider, onlyif=None, unless=None, opts=None, **kwargs):
'''
Spin up a single instance on a cloud provider, using salt-cloud. This state
does not take a profile argument; rather, it takes the arguments that would
normally be configured as part of the state.
Note that while this function does take any configuration argument that
would normally be used to create an instance, it will not verify the state
of any of those arguments on an existing instance. Stateful properties of
an instance should be configured using their own individual state (i.e.,
cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
cloud_provider
The name of the cloud provider to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
opts
Any extra opts that need to be used
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
# provider=None not cloud_provider because
# need to ensure ALL providers don't have the instance
if __salt__['cloud.has_instance'](name=name, provider=None):
ret['result'] = True
ret['comment'] = 'Already present instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.create'](cloud_provider, name, opts=opts, **kwargs)
if info and 'Error' not in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = ('Created instance {0} using provider {1} '
'and the following options: {2}').format(
name,
cloud_provider,
pprint.pformat(kwargs)
)
elif info and 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}: {2}').format(
name,
profile,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1},'
' please check your configuration').format(name,
profile)
return ret
def absent(name, onlyif=None, unless=None):
'''
Ensure that no instances with the specified names exist.
CAUTION: This is a destructive state, which will search all
configured cloud providers for the named instance,
and destroy it.
name
The name of the instance to destroy
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
if not __salt__['cloud.has_instance'](name=name, provider=None):
ret['result'] = True
ret['comment'] = 'Already absent instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be destroyed'.format(name)
return ret
info = __salt__['cloud.destroy'](name)
if info and 'Error' not in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = 'Destroyed instance {0}'.format(name)
elif 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to destroy instance {0}: {1}').format(
name,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = 'Failed to destroy instance {0}'.format(name)
return ret
def profile(name, profile, onlyif=None, unless=None, opts=None, **kwargs):
'''
Create a single instance on a cloud provider, using a salt-cloud profile.
Note that while profiles used this function do take any configuration
argument that would normally be used to create an instance using a profile,
this state will not verify the state of any of those arguments on an
existing instance. Stateful properties of an instance should be configured
using their own individual state (i.e., cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
profile
The name of the cloud profile to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
kwargs
Any profile override or addition
opts
Any extra opts that need to be used
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
instance = _get_instance([name])
if instance and not any('Not Actioned' in key for key in instance):
ret['result'] = True
ret['comment'] = 'Already present instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.profile'](profile, name, vm_overrides=kwargs, opts=opts)
# get either {Error: ''} or {namestring: {Error: ''}}
# which is what we can get from providers returns
main_error = info.get('Error', '')
name_error = ''
if isinstance(info, dict):
subinfo = info.get(name, {})
if isinstance(subinfo, dict):
name_error = subinfo.get('Error', None)
error = main_error or name_error
if info and not error:
node_info = info.get(name)
ret['result'] = True
default_msg = 'Created instance {0} using profile {1}'.format(
name, profile,)
# some providers support changes
if 'changes' in node_info:
ret['changes'] = node_info['changes']
ret['comment'] = node_info.get('comment', default_msg)
else:
ret['changes'] = info
ret['comment'] = default_msg
elif error:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1}: {2}').format(
name,
profile,
'{0}\n{1}\n'.format(main_error, name_error).strip(),
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}').format(
name,
profile,
)
return ret
def volume_present(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name in volumes:
ret['comment'] = 'Volume exists: {0}'.format(name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be created.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_create'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': None, 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to create.'.format(name)
return ret
def volume_absent(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name not in volumes:
ret['comment'] = 'Volume is absent.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be deleted.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_delete'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was deleted'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to delete.'.format(name)
return ret
def volume_detached(name, server_name=None, provider=None, **kwargs):
'''
Check if a block volume is attached.
Returns True if server or Volume do not exist.
'''
ret = _check_name(name)
if not ret['result']:
return ret
if server_name is not None:
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if server_name:
instance = __salt__['cloud.action'](fun='show_instance', names=[name])
else:
instance = None
if name in volumes and not volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = (
'Volume {name} is not currently attached to anything.'
).format(**volumes[name])
ret['result'] = True
return ret
elif name not in volumes:
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = True
return ret
elif not instance and server_name is not None:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be detached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_detach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to detach.'.format(name)
return ret
|
saltstack/salt
|
salt/states/schedule.py
|
present
|
python
|
def present(name,
**kwargs):
'''
Ensure a job is present in the schedule
name
The unique name that is given to the scheduled job.
seconds
The scheduled job will be executed after the specified
number of seconds have passed.
minutes
The scheduled job will be executed after the specified
number of minutes have passed.
hours
The scheduled job will be executed after the specified
number of hours have passed.
days
The scheduled job will be executed after the specified
number of days have passed.
when
This will schedule the job at the specified time(s).
The when parameter must be a single value or a dictionary
with the date string(s) using the dateutil format.
Requires python-dateutil.
cron
This will schedule the job at the specified time(s)
using the crontab format.
Requires python-croniter.
run_on_start
Whether the job will run when Salt minion start. Value should be
a boolean.
function
The function that should be executed by the scheduled job.
job_args
The arguments that will be used by the scheduled job.
job_kwargs
The keyword arguments that will be used by the scheduled job.
maxrunning
Ensure that there are no more than N copies of a particular job running.
jid_include
Include the job into the job cache.
splay
The amount of time in seconds to splay a scheduled job.
Can be specified as a single value in seconds or as a dictionary
range with 'start' and 'end' values.
range
This will schedule the command within the range specified.
The range parameter must be a dictionary with the date strings
using the dateutil format. Requires python-dateutil.
once
This will schedule a job to run once on the specified date.
once_fmt
The default date format is ISO 8601 but can be overridden by
also specifying the ``once_fmt`` option.
enabled
Whether the job should be enabled or disabled. Value should be a boolean.
return_job
Whether to return information to the Salt master upon job completion.
metadata
Using the metadata parameter special values can be associated with
a scheduled job. These values are not used in the execution of the job,
but can be used to search for specific jobs later if combined with the
return_job parameter. The metadata parameter must be specified as a
dictionary, othewise it will be ignored.
returner
The returner to use to return the results of the scheduled job.
return_config
The alternative configuration to use for returner configuration options.
return_kwargs
Any individual returner configuration items to override. Should be passed
as a dictionary.
persist
Whether the job should persist between minion restarts, defaults to True.
skip_during_range
This will ensure that the scheduled command does not run within the
range specified. The range parameter must be a dictionary with the
date strings using the dateutil format. Requires python-dateutil.
run_after_skip_range
Whether the job should run immediately after the skip_during_range time
period ends.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
new_item = __salt__['schedule.build_schedule_item'](name, **kwargs)
# See if the new_item is valid
if isinstance(new_item, dict):
if 'result' in new_item and not new_item['result']:
ret['result'] = new_item['result']
ret['comment'] = new_item['comment']
return ret
# The schedule.list gives us an item that is guaranteed to have an
# 'enabled' argument. Before comparing, add 'enabled' if it's not
# available (assume True, like schedule.list does)
if 'enabled' not in new_item:
new_item['enabled'] = True
if new_item == current_schedule[name]:
ret['comment'].append('Job {0} in correct state'.format(name))
else:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.modify'](name, **kwargs)
ret['comment'].append(result['comment'])
ret['changes'] = result['changes']
else:
result = __salt__['schedule.modify'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Modifying job {0} in schedule'.format(name))
ret['changes'] = result['changes']
else:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.add'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.add'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Adding new job {0} to schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
|
Ensure a job is present in the schedule
name
The unique name that is given to the scheduled job.
seconds
The scheduled job will be executed after the specified
number of seconds have passed.
minutes
The scheduled job will be executed after the specified
number of minutes have passed.
hours
The scheduled job will be executed after the specified
number of hours have passed.
days
The scheduled job will be executed after the specified
number of days have passed.
when
This will schedule the job at the specified time(s).
The when parameter must be a single value or a dictionary
with the date string(s) using the dateutil format.
Requires python-dateutil.
cron
This will schedule the job at the specified time(s)
using the crontab format.
Requires python-croniter.
run_on_start
Whether the job will run when Salt minion start. Value should be
a boolean.
function
The function that should be executed by the scheduled job.
job_args
The arguments that will be used by the scheduled job.
job_kwargs
The keyword arguments that will be used by the scheduled job.
maxrunning
Ensure that there are no more than N copies of a particular job running.
jid_include
Include the job into the job cache.
splay
The amount of time in seconds to splay a scheduled job.
Can be specified as a single value in seconds or as a dictionary
range with 'start' and 'end' values.
range
This will schedule the command within the range specified.
The range parameter must be a dictionary with the date strings
using the dateutil format. Requires python-dateutil.
once
This will schedule a job to run once on the specified date.
once_fmt
The default date format is ISO 8601 but can be overridden by
also specifying the ``once_fmt`` option.
enabled
Whether the job should be enabled or disabled. Value should be a boolean.
return_job
Whether to return information to the Salt master upon job completion.
metadata
Using the metadata parameter special values can be associated with
a scheduled job. These values are not used in the execution of the job,
but can be used to search for specific jobs later if combined with the
return_job parameter. The metadata parameter must be specified as a
dictionary, othewise it will be ignored.
returner
The returner to use to return the results of the scheduled job.
return_config
The alternative configuration to use for returner configuration options.
return_kwargs
Any individual returner configuration items to override. Should be passed
as a dictionary.
persist
Whether the job should persist between minion restarts, defaults to True.
skip_during_range
This will ensure that the scheduled command does not run within the
range specified. The range parameter must be a dictionary with the
date strings using the dateutil format. Requires python-dateutil.
run_after_skip_range
Whether the job should run immediately after the skip_during_range time
period ends.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/schedule.py#L103-L266
| null |
# -*- coding: utf-8 -*-
'''
Management of the Salt scheduler
==============================================
.. code-block:: yaml
job3:
schedule.present:
- function: test.ping
- seconds: 3600
- splay: 10
This will schedule the command: test.ping every 3600 seconds
(every hour) splaying the time between 0 and 10 seconds
job2:
schedule.present:
- function: test.ping
- seconds: 15
- splay:
start: 10
end: 20
This will schedule the command: test.ping every 15 seconds
splaying the time between 10 and 20 seconds
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- when:
- Monday 5:00pm
- Tuesday 3:00pm
- Wednesday 5:00pm
- Thursday 3:00pm
- Friday 5:00pm
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday. Requires that
python-dateutil is installed on the minion.
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- cron: '*/5 * * * *'
Scheduled jobs can also be specified using the format used by cron. This will
schedule the command: state.sls httpd test=True to run every 5 minutes. Requires
that python-croniter is installed on the minion.
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- when:
- Monday 5:00pm
- Tuesday 3:00pm
- Wednesday 5:00pm
- Thursday 3:00pm
- Friday 5:00pm
- returner: xmpp
- return_config: xmpp_state_run
- return_kwargs:
recipient: user@domain.com
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday. Using the xmpp returner
to return the results of the scheduled job, with the alternative configuration
options found in the xmpp_state_run section.
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- hours: 1
- skip_during_range:
- start: 2pm
- end: 3pm
- run_after_skip_range: True
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday. Requires that
python-dateutil is installed on the minion.
'''
from __future__ import absolute_import, print_function, unicode_literals
def absent(name, **kwargs):
'''
Ensure a job is absent from the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.delete'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.delete'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Removed job {0} from schedule'.format(name))
else:
ret['comment'].append('Job {0} not present in schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
def enabled(name, **kwargs):
'''
Ensure a job is enabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.enable_job'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.enable_job'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Enabled job {0} from schedule'.format(name))
else:
ret['comment'].append('Job {0} not present in schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
def disabled(name, **kwargs):
'''
Ensure a job is disabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.disable_job'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.disable_job'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Disabled job {0} from schedule'.format(name))
else:
ret['comment'].append('Job {0} not present in schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
|
saltstack/salt
|
salt/states/schedule.py
|
disabled
|
python
|
def disabled(name, **kwargs):
'''
Ensure a job is disabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.disable_job'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.disable_job'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Disabled job {0} from schedule'.format(name))
else:
ret['comment'].append('Job {0} not present in schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
|
Ensure a job is disabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/schedule.py#L344-L379
| null |
# -*- coding: utf-8 -*-
'''
Management of the Salt scheduler
==============================================
.. code-block:: yaml
job3:
schedule.present:
- function: test.ping
- seconds: 3600
- splay: 10
This will schedule the command: test.ping every 3600 seconds
(every hour) splaying the time between 0 and 10 seconds
job2:
schedule.present:
- function: test.ping
- seconds: 15
- splay:
start: 10
end: 20
This will schedule the command: test.ping every 15 seconds
splaying the time between 10 and 20 seconds
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- when:
- Monday 5:00pm
- Tuesday 3:00pm
- Wednesday 5:00pm
- Thursday 3:00pm
- Friday 5:00pm
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday. Requires that
python-dateutil is installed on the minion.
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- cron: '*/5 * * * *'
Scheduled jobs can also be specified using the format used by cron. This will
schedule the command: state.sls httpd test=True to run every 5 minutes. Requires
that python-croniter is installed on the minion.
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- when:
- Monday 5:00pm
- Tuesday 3:00pm
- Wednesday 5:00pm
- Thursday 3:00pm
- Friday 5:00pm
- returner: xmpp
- return_config: xmpp_state_run
- return_kwargs:
recipient: user@domain.com
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday. Using the xmpp returner
to return the results of the scheduled job, with the alternative configuration
options found in the xmpp_state_run section.
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- hours: 1
- skip_during_range:
- start: 2pm
- end: 3pm
- run_after_skip_range: True
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday. Requires that
python-dateutil is installed on the minion.
'''
from __future__ import absolute_import, print_function, unicode_literals
def present(name,
**kwargs):
'''
Ensure a job is present in the schedule
name
The unique name that is given to the scheduled job.
seconds
The scheduled job will be executed after the specified
number of seconds have passed.
minutes
The scheduled job will be executed after the specified
number of minutes have passed.
hours
The scheduled job will be executed after the specified
number of hours have passed.
days
The scheduled job will be executed after the specified
number of days have passed.
when
This will schedule the job at the specified time(s).
The when parameter must be a single value or a dictionary
with the date string(s) using the dateutil format.
Requires python-dateutil.
cron
This will schedule the job at the specified time(s)
using the crontab format.
Requires python-croniter.
run_on_start
Whether the job will run when Salt minion start. Value should be
a boolean.
function
The function that should be executed by the scheduled job.
job_args
The arguments that will be used by the scheduled job.
job_kwargs
The keyword arguments that will be used by the scheduled job.
maxrunning
Ensure that there are no more than N copies of a particular job running.
jid_include
Include the job into the job cache.
splay
The amount of time in seconds to splay a scheduled job.
Can be specified as a single value in seconds or as a dictionary
range with 'start' and 'end' values.
range
This will schedule the command within the range specified.
The range parameter must be a dictionary with the date strings
using the dateutil format. Requires python-dateutil.
once
This will schedule a job to run once on the specified date.
once_fmt
The default date format is ISO 8601 but can be overridden by
also specifying the ``once_fmt`` option.
enabled
Whether the job should be enabled or disabled. Value should be a boolean.
return_job
Whether to return information to the Salt master upon job completion.
metadata
Using the metadata parameter special values can be associated with
a scheduled job. These values are not used in the execution of the job,
but can be used to search for specific jobs later if combined with the
return_job parameter. The metadata parameter must be specified as a
dictionary, othewise it will be ignored.
returner
The returner to use to return the results of the scheduled job.
return_config
The alternative configuration to use for returner configuration options.
return_kwargs
Any individual returner configuration items to override. Should be passed
as a dictionary.
persist
Whether the job should persist between minion restarts, defaults to True.
skip_during_range
This will ensure that the scheduled command does not run within the
range specified. The range parameter must be a dictionary with the
date strings using the dateutil format. Requires python-dateutil.
run_after_skip_range
Whether the job should run immediately after the skip_during_range time
period ends.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
new_item = __salt__['schedule.build_schedule_item'](name, **kwargs)
# See if the new_item is valid
if isinstance(new_item, dict):
if 'result' in new_item and not new_item['result']:
ret['result'] = new_item['result']
ret['comment'] = new_item['comment']
return ret
# The schedule.list gives us an item that is guaranteed to have an
# 'enabled' argument. Before comparing, add 'enabled' if it's not
# available (assume True, like schedule.list does)
if 'enabled' not in new_item:
new_item['enabled'] = True
if new_item == current_schedule[name]:
ret['comment'].append('Job {0} in correct state'.format(name))
else:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.modify'](name, **kwargs)
ret['comment'].append(result['comment'])
ret['changes'] = result['changes']
else:
result = __salt__['schedule.modify'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Modifying job {0} in schedule'.format(name))
ret['changes'] = result['changes']
else:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.add'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.add'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Adding new job {0} to schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
def absent(name, **kwargs):
'''
Ensure a job is absent from the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.delete'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.delete'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Removed job {0} from schedule'.format(name))
else:
ret['comment'].append('Job {0} not present in schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
def enabled(name, **kwargs):
'''
Ensure a job is enabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.enable_job'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.enable_job'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Enabled job {0} from schedule'.format(name))
else:
ret['comment'].append('Job {0} not present in schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
|
saltstack/salt
|
salt/modules/arista_pyeapi.py
|
_prepare_connection
|
python
|
def _prepare_connection(**kwargs):
'''
Prepare the connection with the remote network device, and clean up the key
value pairs, removing the args used for the connection init.
'''
pyeapi_kwargs = __salt__['config.get']('pyeapi', {})
pyeapi_kwargs.update(kwargs) # merge the CLI args with the opts/pillar
init_kwargs, fun_kwargs = __utils__['args.prepare_kwargs'](pyeapi_kwargs, PYEAPI_INIT_KWARGS)
if 'transport' not in init_kwargs:
init_kwargs['transport'] = 'https'
conn = pyeapi.client.connect(**init_kwargs)
node = pyeapi.client.Node(conn, enablepwd=init_kwargs.get('enablepwd'))
return node, fun_kwargs
|
Prepare the connection with the remote network device, and clean up the key
value pairs, removing the args used for the connection init.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/arista_pyeapi.py#L158-L170
| null |
# -*- coding: utf-8 -*-
'''
Arista pyeapi
=============
.. versionadded:: 2019.2.0
Execution module to interface the connection with Arista switches, connecting to
the remote network device using the
`pyeapi <http://pyeapi.readthedocs.io/en/master/index.html>`_ library. It is
flexible enough to execute the commands both when running under an Arista Proxy
Minion, as well as running under a Regular Minion by specifying the connection
arguments, i.e., ``device_type``, ``host``, ``username``, ``password`` etc.
:codeauthor: Mircea Ulinic <ping@mirceaulinic.net>
:maturity: new
:depends: pyeapi
:platform: unix
.. note::
To understand how to correctly enable the eAPI on your switch, please check
https://eos.arista.com/arista-eapi-101/.
Dependencies
------------
The ``pyeapi`` Execution module requires the Python Client for eAPI (pyeapi) to
be installed: ``pip install pyeapi``.
Usage
-----
This module can equally be used via the :mod:`pyeapi <salt.proxy.arista_pyeapi>`
Proxy module or directly from an arbitrary (Proxy) Minion that is running on a
machine having access to the network device API, and the ``pyeapi`` library is
installed.
When running outside of the :mod:`pyeapi Proxy <salt.proxy.arista_pyeapi>`
(i.e., from another Proxy Minion type, or regular Minion), the pyeapi connection
arguments can be either specified from the CLI when executing the command, or
in a configuration block under the ``pyeapi`` key in the configuration opts
(i.e., (Proxy) Minion configuration file), or Pillar. The module supports these
simultaneously. These fields are the exact same supported by the ``pyeapi``
Proxy Module:
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
host: ``localhost``
The IP address or DNS host name of the connection device.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
password
The password to pass to the device to authenticate the eAPI connection.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
enablepwd
The enable mode password if required by the destination node.
Example (when not running in a ``pyeapi`` Proxy Minion):
.. code-block:: yaml
pyeapi:
username: test
password: test
In case the ``username`` and ``password`` are the same on any device you are
targeting, the block above (besides other parameters specific to your
environment you might need) should suffice to be able to execute commands from
outside a ``pyeapi`` Proxy, e.g.:
.. code-block:: bash
salt '*' pyeapi.send_commands 'show version' 'show interfaces'
salt '*' pyeapi.config 'ntp server 1.2.3.4'
.. note::
Remember that the above applies only when not running in a ``pyeapi`` Proxy
Minion. If you want to use the :mod:`pyeapi Proxy <salt.proxy.arista_pyeapi>`,
please follow the documentation notes for a proper setup.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python stdlib
import difflib
import logging
# Import Salt libs
from salt.ext import six
from salt.exceptions import CommandExecutionError
try:
from salt.utils.args import clean_kwargs
except ImportError:
from salt.utils import clean_kwargs
# Import third party libs
try:
import pyeapi
HAS_PYEAPI = True
except ImportError:
HAS_PYEAPI = False
# -----------------------------------------------------------------------------
# execution module properties
# -----------------------------------------------------------------------------
__proxyenabled__ = ['*']
# Any Proxy Minion should be able to execute these
__virtualname__ = 'pyeapi'
# The Execution Module will be identified as ``pyeapi``
# -----------------------------------------------------------------------------
# globals
# -----------------------------------------------------------------------------
log = logging.getLogger(__name__)
PYEAPI_INIT_KWARGS = [
'transport',
'host',
'username',
'password',
'enablepwd',
'port',
'timeout',
'return_node'
]
# -----------------------------------------------------------------------------
# propery functions
# -----------------------------------------------------------------------------
def __virtual__():
'''
Execution module available only if pyeapi is installed.
'''
if not HAS_PYEAPI:
return False, 'The pyeapi execution module requires pyeapi library to be installed: ``pip install pyeapi``'
return __virtualname__
# -----------------------------------------------------------------------------
# helper functions
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# callable functions
# -----------------------------------------------------------------------------
def get_connection(**kwargs):
'''
Return the connection object to the pyeapi Node.
.. warning::
This function returns an unserializable object, hence it is not meant
to be used on the CLI. This should mainly be used when invoked from
other modules for the low level connection with the network device.
kwargs
Key-value dictionary with the authentication details.
USAGE Example:
.. code-block:: python
conn = __salt__['pyeapi.get_connection'](host='router1.example.com',
username='example',
password='example')
show_ver = conn.run_commands(['show version', 'show interfaces'])
'''
kwargs = clean_kwargs(**kwargs)
if 'pyeapi.conn' in __proxy__:
return __proxy__['pyeapi.conn']()
conn, kwargs = _prepare_connection(**kwargs)
return conn
def call(method, *args, **kwargs):
'''
Invoke an arbitrary pyeapi method.
method
The name of the pyeapi method to invoke.
args
A list of arguments to send to the method invoked.
kwargs
Key-value dictionary to send to the method invoked.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.call run_commands "['show version']"
'''
kwargs = clean_kwargs(**kwargs)
if 'pyeapi.call' in __proxy__:
return __proxy__['pyeapi.call'](method, *args, **kwargs)
conn, kwargs = _prepare_connection(**kwargs)
ret = getattr(conn, method)(*args, **kwargs)
return ret
def run_commands(*commands, **kwargs):
'''
Sends the commands over the transport to the device.
This function sends the commands to the device using the nodes
transport. This is a lower layer function that shouldn't normally
need to be used, preferring instead to use ``config()`` or ``enable()``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.run_commands 'show version'
salt '*' pyeapi.run_commands 'show version' encoding=text
salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak
Output example:
.. code-block:: text
veos1:
|_
----------
architecture:
i386
bootupTimestamp:
1527541728.53
hardwareRevision:
internalBuildId:
63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f
internalVersion:
4.18.1F-4591672.4181F
isIntlVersion:
False
memFree:
501468
memTotal:
1893316
modelName:
vEOS
serialNumber:
systemMacAddress:
52:54:00:3f:e6:d0
version:
4.18.1F
'''
encoding = kwargs.pop('encoding', 'json')
send_enable = kwargs.pop('send_enable', True)
output = call('run_commands',
commands,
encoding=encoding,
send_enable=send_enable,
**kwargs)
if encoding == 'text':
ret = []
for res in output:
ret.append(res['output'])
return ret
return output
def config(commands=None,
config_file=None,
template_engine='jinja',
context=None,
defaults=None,
saltenv='base',
**kwargs):
'''
Configures the node with the specified commands.
This method is used to send configuration commands to the node. It
will take either a string or a list and prepend the necessary commands
to put the session into config mode.
Returns the diff after the configuration commands are loaded.
config_file
The source file with the configuration commands to be sent to the
device.
The file can also be a template that can be rendered using the template
engine of choice.
This can be specified using the absolute path to the file, or using one
of the following URL schemes:
- ``salt://``, to fetch the file from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
commands
The commands to send to the node in config mode. If the commands
argument is a string it will be cast to a list.
The list of commands will also be prepended with the necessary commands
to put the session in config mode.
.. note::
This argument is ignored when ``config_file`` is specified.
template_engine: ``jinja``
The template engine to use when rendering the source file. Default:
``jinja``. To simply fetch the file without attempting to render, set
this argument to ``None``.
context
Variables to add to the template context.
defaults
Default values of the ``context`` dict.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.config commands="['ntp server 1.2.3.4', 'ntp server 5.6.7.8']"
salt '*' pyeapi.config config_file=salt://config.txt
salt '*' pyeapi.config config_file=https://bit.ly/2LGLcDy context="{'servers': ['1.2.3.4']}"
'''
initial_config = get_config(as_string=True, **kwargs)
if config_file:
file_str = __salt__['cp.get_file_str'](config_file, saltenv=saltenv)
if file_str is False:
raise CommandExecutionError('Source file {} not found'.format(config_file))
log.debug('Fetched from %s', config_file)
log.debug(file_str)
elif commands:
if isinstance(commands, (six.string_types, six.text_type)):
commands = [commands]
file_str = '\n'.join(commands)
# unify all the commands in a single file, to render them in a go
if template_engine:
file_str = __salt__['file.apply_template_on_contents'](file_str,
template_engine,
context,
defaults,
saltenv)
log.debug('Rendered:')
log.debug(file_str)
# whatever the source of the commands would be, split them line by line
commands = [line for line in file_str.splitlines() if line.strip()]
# push the commands one by one, removing empty lines
configured = call('config', commands, **kwargs)
current_config = get_config(as_string=True, **kwargs)
diff = difflib.unified_diff(initial_config.splitlines(1)[4:], current_config.splitlines(1)[4:])
return ''.join([x.replace('\r', '') for x in diff])
def get_config(config='running-config',
params=None,
as_string=False,
**kwargs):
'''
Retrieves the config from the device.
This method will retrieve the config from the node as either a string
or a list object. The config to retrieve can be specified as either
the startup-config or the running-config.
config: ``running-config``
Specifies to return either the nodes ``startup-config``
or ``running-config``. The default value is the ``running-config``.
params
A string of keywords to append to the command for retrieving the config.
as_string: ``False``
Flag that determines the response. If ``True``, then the configuration
is returned as a raw string. If ``False``, then the configuration is
returned as a list. The default value is ``False``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.get_config
salt '*' pyeapi.get_config params='section snmp-server'
salt '*' pyeapi.get_config config='startup-config'
'''
return call('get_config',
config=config,
params=params,
as_string=as_string,
**kwargs)
def section(regex, config='running-config', **kwargs):
'''
Return a section of the config.
regex
A valid regular expression used to select sections of configuration to
return.
config: ``running-config``
The configuration to return. Valid values for config are
``running-config`` or ``startup-config``. The default value is
``running-config``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*'
'''
return call('section', regex, config=config, **kwargs)
|
saltstack/salt
|
salt/modules/arista_pyeapi.py
|
get_connection
|
python
|
def get_connection(**kwargs):
'''
Return the connection object to the pyeapi Node.
.. warning::
This function returns an unserializable object, hence it is not meant
to be used on the CLI. This should mainly be used when invoked from
other modules for the low level connection with the network device.
kwargs
Key-value dictionary with the authentication details.
USAGE Example:
.. code-block:: python
conn = __salt__['pyeapi.get_connection'](host='router1.example.com',
username='example',
password='example')
show_ver = conn.run_commands(['show version', 'show interfaces'])
'''
kwargs = clean_kwargs(**kwargs)
if 'pyeapi.conn' in __proxy__:
return __proxy__['pyeapi.conn']()
conn, kwargs = _prepare_connection(**kwargs)
return conn
|
Return the connection object to the pyeapi Node.
.. warning::
This function returns an unserializable object, hence it is not meant
to be used on the CLI. This should mainly be used when invoked from
other modules for the low level connection with the network device.
kwargs
Key-value dictionary with the authentication details.
USAGE Example:
.. code-block:: python
conn = __salt__['pyeapi.get_connection'](host='router1.example.com',
username='example',
password='example')
show_ver = conn.run_commands(['show version', 'show interfaces'])
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/arista_pyeapi.py#L177-L203
|
[
"def clean_kwargs(**kwargs):\n '''\n Return a dict without any of the __pub* keys (or any other keys starting\n with a dunder) from the kwargs dict passed into the execution module\n functions. These keys are useful for tracking what was used to invoke\n the function call, but they may not be desirable to have if passing the\n kwargs forward wholesale.\n\n Usage example:\n\n .. code-block:: python\n\n kwargs = __utils__['args.clean_kwargs'](**kwargs)\n '''\n ret = {}\n for key, val in six.iteritems(kwargs):\n if not key.startswith('__'):\n ret[key] = val\n return ret\n",
"def _prepare_connection(**kwargs):\n '''\n Prepare the connection with the remote network device, and clean up the key\n value pairs, removing the args used for the connection init.\n '''\n pyeapi_kwargs = __salt__['config.get']('pyeapi', {})\n pyeapi_kwargs.update(kwargs) # merge the CLI args with the opts/pillar\n init_kwargs, fun_kwargs = __utils__['args.prepare_kwargs'](pyeapi_kwargs, PYEAPI_INIT_KWARGS)\n if 'transport' not in init_kwargs:\n init_kwargs['transport'] = 'https'\n conn = pyeapi.client.connect(**init_kwargs)\n node = pyeapi.client.Node(conn, enablepwd=init_kwargs.get('enablepwd'))\n return node, fun_kwargs\n"
] |
# -*- coding: utf-8 -*-
'''
Arista pyeapi
=============
.. versionadded:: 2019.2.0
Execution module to interface the connection with Arista switches, connecting to
the remote network device using the
`pyeapi <http://pyeapi.readthedocs.io/en/master/index.html>`_ library. It is
flexible enough to execute the commands both when running under an Arista Proxy
Minion, as well as running under a Regular Minion by specifying the connection
arguments, i.e., ``device_type``, ``host``, ``username``, ``password`` etc.
:codeauthor: Mircea Ulinic <ping@mirceaulinic.net>
:maturity: new
:depends: pyeapi
:platform: unix
.. note::
To understand how to correctly enable the eAPI on your switch, please check
https://eos.arista.com/arista-eapi-101/.
Dependencies
------------
The ``pyeapi`` Execution module requires the Python Client for eAPI (pyeapi) to
be installed: ``pip install pyeapi``.
Usage
-----
This module can equally be used via the :mod:`pyeapi <salt.proxy.arista_pyeapi>`
Proxy module or directly from an arbitrary (Proxy) Minion that is running on a
machine having access to the network device API, and the ``pyeapi`` library is
installed.
When running outside of the :mod:`pyeapi Proxy <salt.proxy.arista_pyeapi>`
(i.e., from another Proxy Minion type, or regular Minion), the pyeapi connection
arguments can be either specified from the CLI when executing the command, or
in a configuration block under the ``pyeapi`` key in the configuration opts
(i.e., (Proxy) Minion configuration file), or Pillar. The module supports these
simultaneously. These fields are the exact same supported by the ``pyeapi``
Proxy Module:
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
host: ``localhost``
The IP address or DNS host name of the connection device.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
password
The password to pass to the device to authenticate the eAPI connection.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
enablepwd
The enable mode password if required by the destination node.
Example (when not running in a ``pyeapi`` Proxy Minion):
.. code-block:: yaml
pyeapi:
username: test
password: test
In case the ``username`` and ``password`` are the same on any device you are
targeting, the block above (besides other parameters specific to your
environment you might need) should suffice to be able to execute commands from
outside a ``pyeapi`` Proxy, e.g.:
.. code-block:: bash
salt '*' pyeapi.send_commands 'show version' 'show interfaces'
salt '*' pyeapi.config 'ntp server 1.2.3.4'
.. note::
Remember that the above applies only when not running in a ``pyeapi`` Proxy
Minion. If you want to use the :mod:`pyeapi Proxy <salt.proxy.arista_pyeapi>`,
please follow the documentation notes for a proper setup.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python stdlib
import difflib
import logging
# Import Salt libs
from salt.ext import six
from salt.exceptions import CommandExecutionError
try:
from salt.utils.args import clean_kwargs
except ImportError:
from salt.utils import clean_kwargs
# Import third party libs
try:
import pyeapi
HAS_PYEAPI = True
except ImportError:
HAS_PYEAPI = False
# -----------------------------------------------------------------------------
# execution module properties
# -----------------------------------------------------------------------------
__proxyenabled__ = ['*']
# Any Proxy Minion should be able to execute these
__virtualname__ = 'pyeapi'
# The Execution Module will be identified as ``pyeapi``
# -----------------------------------------------------------------------------
# globals
# -----------------------------------------------------------------------------
log = logging.getLogger(__name__)
PYEAPI_INIT_KWARGS = [
'transport',
'host',
'username',
'password',
'enablepwd',
'port',
'timeout',
'return_node'
]
# -----------------------------------------------------------------------------
# propery functions
# -----------------------------------------------------------------------------
def __virtual__():
'''
Execution module available only if pyeapi is installed.
'''
if not HAS_PYEAPI:
return False, 'The pyeapi execution module requires pyeapi library to be installed: ``pip install pyeapi``'
return __virtualname__
# -----------------------------------------------------------------------------
# helper functions
# -----------------------------------------------------------------------------
def _prepare_connection(**kwargs):
'''
Prepare the connection with the remote network device, and clean up the key
value pairs, removing the args used for the connection init.
'''
pyeapi_kwargs = __salt__['config.get']('pyeapi', {})
pyeapi_kwargs.update(kwargs) # merge the CLI args with the opts/pillar
init_kwargs, fun_kwargs = __utils__['args.prepare_kwargs'](pyeapi_kwargs, PYEAPI_INIT_KWARGS)
if 'transport' not in init_kwargs:
init_kwargs['transport'] = 'https'
conn = pyeapi.client.connect(**init_kwargs)
node = pyeapi.client.Node(conn, enablepwd=init_kwargs.get('enablepwd'))
return node, fun_kwargs
# -----------------------------------------------------------------------------
# callable functions
# -----------------------------------------------------------------------------
def call(method, *args, **kwargs):
'''
Invoke an arbitrary pyeapi method.
method
The name of the pyeapi method to invoke.
args
A list of arguments to send to the method invoked.
kwargs
Key-value dictionary to send to the method invoked.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.call run_commands "['show version']"
'''
kwargs = clean_kwargs(**kwargs)
if 'pyeapi.call' in __proxy__:
return __proxy__['pyeapi.call'](method, *args, **kwargs)
conn, kwargs = _prepare_connection(**kwargs)
ret = getattr(conn, method)(*args, **kwargs)
return ret
def run_commands(*commands, **kwargs):
'''
Sends the commands over the transport to the device.
This function sends the commands to the device using the nodes
transport. This is a lower layer function that shouldn't normally
need to be used, preferring instead to use ``config()`` or ``enable()``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.run_commands 'show version'
salt '*' pyeapi.run_commands 'show version' encoding=text
salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak
Output example:
.. code-block:: text
veos1:
|_
----------
architecture:
i386
bootupTimestamp:
1527541728.53
hardwareRevision:
internalBuildId:
63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f
internalVersion:
4.18.1F-4591672.4181F
isIntlVersion:
False
memFree:
501468
memTotal:
1893316
modelName:
vEOS
serialNumber:
systemMacAddress:
52:54:00:3f:e6:d0
version:
4.18.1F
'''
encoding = kwargs.pop('encoding', 'json')
send_enable = kwargs.pop('send_enable', True)
output = call('run_commands',
commands,
encoding=encoding,
send_enable=send_enable,
**kwargs)
if encoding == 'text':
ret = []
for res in output:
ret.append(res['output'])
return ret
return output
def config(commands=None,
config_file=None,
template_engine='jinja',
context=None,
defaults=None,
saltenv='base',
**kwargs):
'''
Configures the node with the specified commands.
This method is used to send configuration commands to the node. It
will take either a string or a list and prepend the necessary commands
to put the session into config mode.
Returns the diff after the configuration commands are loaded.
config_file
The source file with the configuration commands to be sent to the
device.
The file can also be a template that can be rendered using the template
engine of choice.
This can be specified using the absolute path to the file, or using one
of the following URL schemes:
- ``salt://``, to fetch the file from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
commands
The commands to send to the node in config mode. If the commands
argument is a string it will be cast to a list.
The list of commands will also be prepended with the necessary commands
to put the session in config mode.
.. note::
This argument is ignored when ``config_file`` is specified.
template_engine: ``jinja``
The template engine to use when rendering the source file. Default:
``jinja``. To simply fetch the file without attempting to render, set
this argument to ``None``.
context
Variables to add to the template context.
defaults
Default values of the ``context`` dict.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.config commands="['ntp server 1.2.3.4', 'ntp server 5.6.7.8']"
salt '*' pyeapi.config config_file=salt://config.txt
salt '*' pyeapi.config config_file=https://bit.ly/2LGLcDy context="{'servers': ['1.2.3.4']}"
'''
initial_config = get_config(as_string=True, **kwargs)
if config_file:
file_str = __salt__['cp.get_file_str'](config_file, saltenv=saltenv)
if file_str is False:
raise CommandExecutionError('Source file {} not found'.format(config_file))
log.debug('Fetched from %s', config_file)
log.debug(file_str)
elif commands:
if isinstance(commands, (six.string_types, six.text_type)):
commands = [commands]
file_str = '\n'.join(commands)
# unify all the commands in a single file, to render them in a go
if template_engine:
file_str = __salt__['file.apply_template_on_contents'](file_str,
template_engine,
context,
defaults,
saltenv)
log.debug('Rendered:')
log.debug(file_str)
# whatever the source of the commands would be, split them line by line
commands = [line for line in file_str.splitlines() if line.strip()]
# push the commands one by one, removing empty lines
configured = call('config', commands, **kwargs)
current_config = get_config(as_string=True, **kwargs)
diff = difflib.unified_diff(initial_config.splitlines(1)[4:], current_config.splitlines(1)[4:])
return ''.join([x.replace('\r', '') for x in diff])
def get_config(config='running-config',
params=None,
as_string=False,
**kwargs):
'''
Retrieves the config from the device.
This method will retrieve the config from the node as either a string
or a list object. The config to retrieve can be specified as either
the startup-config or the running-config.
config: ``running-config``
Specifies to return either the nodes ``startup-config``
or ``running-config``. The default value is the ``running-config``.
params
A string of keywords to append to the command for retrieving the config.
as_string: ``False``
Flag that determines the response. If ``True``, then the configuration
is returned as a raw string. If ``False``, then the configuration is
returned as a list. The default value is ``False``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.get_config
salt '*' pyeapi.get_config params='section snmp-server'
salt '*' pyeapi.get_config config='startup-config'
'''
return call('get_config',
config=config,
params=params,
as_string=as_string,
**kwargs)
def section(regex, config='running-config', **kwargs):
'''
Return a section of the config.
regex
A valid regular expression used to select sections of configuration to
return.
config: ``running-config``
The configuration to return. Valid values for config are
``running-config`` or ``startup-config``. The default value is
``running-config``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*'
'''
return call('section', regex, config=config, **kwargs)
|
saltstack/salt
|
salt/modules/arista_pyeapi.py
|
call
|
python
|
def call(method, *args, **kwargs):
'''
Invoke an arbitrary pyeapi method.
method
The name of the pyeapi method to invoke.
args
A list of arguments to send to the method invoked.
kwargs
Key-value dictionary to send to the method invoked.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.call run_commands "['show version']"
'''
kwargs = clean_kwargs(**kwargs)
if 'pyeapi.call' in __proxy__:
return __proxy__['pyeapi.call'](method, *args, **kwargs)
conn, kwargs = _prepare_connection(**kwargs)
ret = getattr(conn, method)(*args, **kwargs)
return ret
|
Invoke an arbitrary pyeapi method.
method
The name of the pyeapi method to invoke.
args
A list of arguments to send to the method invoked.
kwargs
Key-value dictionary to send to the method invoked.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.call run_commands "['show version']"
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/arista_pyeapi.py#L206-L281
|
[
"def clean_kwargs(**kwargs):\n '''\n Return a dict without any of the __pub* keys (or any other keys starting\n with a dunder) from the kwargs dict passed into the execution module\n functions. These keys are useful for tracking what was used to invoke\n the function call, but they may not be desirable to have if passing the\n kwargs forward wholesale.\n\n Usage example:\n\n .. code-block:: python\n\n kwargs = __utils__['args.clean_kwargs'](**kwargs)\n '''\n ret = {}\n for key, val in six.iteritems(kwargs):\n if not key.startswith('__'):\n ret[key] = val\n return ret\n",
"def _prepare_connection(**kwargs):\n '''\n Prepare the connection with the remote network device, and clean up the key\n value pairs, removing the args used for the connection init.\n '''\n pyeapi_kwargs = __salt__['config.get']('pyeapi', {})\n pyeapi_kwargs.update(kwargs) # merge the CLI args with the opts/pillar\n init_kwargs, fun_kwargs = __utils__['args.prepare_kwargs'](pyeapi_kwargs, PYEAPI_INIT_KWARGS)\n if 'transport' not in init_kwargs:\n init_kwargs['transport'] = 'https'\n conn = pyeapi.client.connect(**init_kwargs)\n node = pyeapi.client.Node(conn, enablepwd=init_kwargs.get('enablepwd'))\n return node, fun_kwargs\n"
] |
# -*- coding: utf-8 -*-
'''
Arista pyeapi
=============
.. versionadded:: 2019.2.0
Execution module to interface the connection with Arista switches, connecting to
the remote network device using the
`pyeapi <http://pyeapi.readthedocs.io/en/master/index.html>`_ library. It is
flexible enough to execute the commands both when running under an Arista Proxy
Minion, as well as running under a Regular Minion by specifying the connection
arguments, i.e., ``device_type``, ``host``, ``username``, ``password`` etc.
:codeauthor: Mircea Ulinic <ping@mirceaulinic.net>
:maturity: new
:depends: pyeapi
:platform: unix
.. note::
To understand how to correctly enable the eAPI on your switch, please check
https://eos.arista.com/arista-eapi-101/.
Dependencies
------------
The ``pyeapi`` Execution module requires the Python Client for eAPI (pyeapi) to
be installed: ``pip install pyeapi``.
Usage
-----
This module can equally be used via the :mod:`pyeapi <salt.proxy.arista_pyeapi>`
Proxy module or directly from an arbitrary (Proxy) Minion that is running on a
machine having access to the network device API, and the ``pyeapi`` library is
installed.
When running outside of the :mod:`pyeapi Proxy <salt.proxy.arista_pyeapi>`
(i.e., from another Proxy Minion type, or regular Minion), the pyeapi connection
arguments can be either specified from the CLI when executing the command, or
in a configuration block under the ``pyeapi`` key in the configuration opts
(i.e., (Proxy) Minion configuration file), or Pillar. The module supports these
simultaneously. These fields are the exact same supported by the ``pyeapi``
Proxy Module:
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
host: ``localhost``
The IP address or DNS host name of the connection device.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
password
The password to pass to the device to authenticate the eAPI connection.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
enablepwd
The enable mode password if required by the destination node.
Example (when not running in a ``pyeapi`` Proxy Minion):
.. code-block:: yaml
pyeapi:
username: test
password: test
In case the ``username`` and ``password`` are the same on any device you are
targeting, the block above (besides other parameters specific to your
environment you might need) should suffice to be able to execute commands from
outside a ``pyeapi`` Proxy, e.g.:
.. code-block:: bash
salt '*' pyeapi.send_commands 'show version' 'show interfaces'
salt '*' pyeapi.config 'ntp server 1.2.3.4'
.. note::
Remember that the above applies only when not running in a ``pyeapi`` Proxy
Minion. If you want to use the :mod:`pyeapi Proxy <salt.proxy.arista_pyeapi>`,
please follow the documentation notes for a proper setup.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python stdlib
import difflib
import logging
# Import Salt libs
from salt.ext import six
from salt.exceptions import CommandExecutionError
try:
from salt.utils.args import clean_kwargs
except ImportError:
from salt.utils import clean_kwargs
# Import third party libs
try:
import pyeapi
HAS_PYEAPI = True
except ImportError:
HAS_PYEAPI = False
# -----------------------------------------------------------------------------
# execution module properties
# -----------------------------------------------------------------------------
__proxyenabled__ = ['*']
# Any Proxy Minion should be able to execute these
__virtualname__ = 'pyeapi'
# The Execution Module will be identified as ``pyeapi``
# -----------------------------------------------------------------------------
# globals
# -----------------------------------------------------------------------------
log = logging.getLogger(__name__)
PYEAPI_INIT_KWARGS = [
'transport',
'host',
'username',
'password',
'enablepwd',
'port',
'timeout',
'return_node'
]
# -----------------------------------------------------------------------------
# propery functions
# -----------------------------------------------------------------------------
def __virtual__():
'''
Execution module available only if pyeapi is installed.
'''
if not HAS_PYEAPI:
return False, 'The pyeapi execution module requires pyeapi library to be installed: ``pip install pyeapi``'
return __virtualname__
# -----------------------------------------------------------------------------
# helper functions
# -----------------------------------------------------------------------------
def _prepare_connection(**kwargs):
'''
Prepare the connection with the remote network device, and clean up the key
value pairs, removing the args used for the connection init.
'''
pyeapi_kwargs = __salt__['config.get']('pyeapi', {})
pyeapi_kwargs.update(kwargs) # merge the CLI args with the opts/pillar
init_kwargs, fun_kwargs = __utils__['args.prepare_kwargs'](pyeapi_kwargs, PYEAPI_INIT_KWARGS)
if 'transport' not in init_kwargs:
init_kwargs['transport'] = 'https'
conn = pyeapi.client.connect(**init_kwargs)
node = pyeapi.client.Node(conn, enablepwd=init_kwargs.get('enablepwd'))
return node, fun_kwargs
# -----------------------------------------------------------------------------
# callable functions
# -----------------------------------------------------------------------------
def get_connection(**kwargs):
'''
Return the connection object to the pyeapi Node.
.. warning::
This function returns an unserializable object, hence it is not meant
to be used on the CLI. This should mainly be used when invoked from
other modules for the low level connection with the network device.
kwargs
Key-value dictionary with the authentication details.
USAGE Example:
.. code-block:: python
conn = __salt__['pyeapi.get_connection'](host='router1.example.com',
username='example',
password='example')
show_ver = conn.run_commands(['show version', 'show interfaces'])
'''
kwargs = clean_kwargs(**kwargs)
if 'pyeapi.conn' in __proxy__:
return __proxy__['pyeapi.conn']()
conn, kwargs = _prepare_connection(**kwargs)
return conn
def run_commands(*commands, **kwargs):
'''
Sends the commands over the transport to the device.
This function sends the commands to the device using the nodes
transport. This is a lower layer function that shouldn't normally
need to be used, preferring instead to use ``config()`` or ``enable()``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.run_commands 'show version'
salt '*' pyeapi.run_commands 'show version' encoding=text
salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak
Output example:
.. code-block:: text
veos1:
|_
----------
architecture:
i386
bootupTimestamp:
1527541728.53
hardwareRevision:
internalBuildId:
63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f
internalVersion:
4.18.1F-4591672.4181F
isIntlVersion:
False
memFree:
501468
memTotal:
1893316
modelName:
vEOS
serialNumber:
systemMacAddress:
52:54:00:3f:e6:d0
version:
4.18.1F
'''
encoding = kwargs.pop('encoding', 'json')
send_enable = kwargs.pop('send_enable', True)
output = call('run_commands',
commands,
encoding=encoding,
send_enable=send_enable,
**kwargs)
if encoding == 'text':
ret = []
for res in output:
ret.append(res['output'])
return ret
return output
def config(commands=None,
config_file=None,
template_engine='jinja',
context=None,
defaults=None,
saltenv='base',
**kwargs):
'''
Configures the node with the specified commands.
This method is used to send configuration commands to the node. It
will take either a string or a list and prepend the necessary commands
to put the session into config mode.
Returns the diff after the configuration commands are loaded.
config_file
The source file with the configuration commands to be sent to the
device.
The file can also be a template that can be rendered using the template
engine of choice.
This can be specified using the absolute path to the file, or using one
of the following URL schemes:
- ``salt://``, to fetch the file from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
commands
The commands to send to the node in config mode. If the commands
argument is a string it will be cast to a list.
The list of commands will also be prepended with the necessary commands
to put the session in config mode.
.. note::
This argument is ignored when ``config_file`` is specified.
template_engine: ``jinja``
The template engine to use when rendering the source file. Default:
``jinja``. To simply fetch the file without attempting to render, set
this argument to ``None``.
context
Variables to add to the template context.
defaults
Default values of the ``context`` dict.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.config commands="['ntp server 1.2.3.4', 'ntp server 5.6.7.8']"
salt '*' pyeapi.config config_file=salt://config.txt
salt '*' pyeapi.config config_file=https://bit.ly/2LGLcDy context="{'servers': ['1.2.3.4']}"
'''
initial_config = get_config(as_string=True, **kwargs)
if config_file:
file_str = __salt__['cp.get_file_str'](config_file, saltenv=saltenv)
if file_str is False:
raise CommandExecutionError('Source file {} not found'.format(config_file))
log.debug('Fetched from %s', config_file)
log.debug(file_str)
elif commands:
if isinstance(commands, (six.string_types, six.text_type)):
commands = [commands]
file_str = '\n'.join(commands)
# unify all the commands in a single file, to render them in a go
if template_engine:
file_str = __salt__['file.apply_template_on_contents'](file_str,
template_engine,
context,
defaults,
saltenv)
log.debug('Rendered:')
log.debug(file_str)
# whatever the source of the commands would be, split them line by line
commands = [line for line in file_str.splitlines() if line.strip()]
# push the commands one by one, removing empty lines
configured = call('config', commands, **kwargs)
current_config = get_config(as_string=True, **kwargs)
diff = difflib.unified_diff(initial_config.splitlines(1)[4:], current_config.splitlines(1)[4:])
return ''.join([x.replace('\r', '') for x in diff])
def get_config(config='running-config',
params=None,
as_string=False,
**kwargs):
'''
Retrieves the config from the device.
This method will retrieve the config from the node as either a string
or a list object. The config to retrieve can be specified as either
the startup-config or the running-config.
config: ``running-config``
Specifies to return either the nodes ``startup-config``
or ``running-config``. The default value is the ``running-config``.
params
A string of keywords to append to the command for retrieving the config.
as_string: ``False``
Flag that determines the response. If ``True``, then the configuration
is returned as a raw string. If ``False``, then the configuration is
returned as a list. The default value is ``False``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.get_config
salt '*' pyeapi.get_config params='section snmp-server'
salt '*' pyeapi.get_config config='startup-config'
'''
return call('get_config',
config=config,
params=params,
as_string=as_string,
**kwargs)
def section(regex, config='running-config', **kwargs):
'''
Return a section of the config.
regex
A valid regular expression used to select sections of configuration to
return.
config: ``running-config``
The configuration to return. Valid values for config are
``running-config`` or ``startup-config``. The default value is
``running-config``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*'
'''
return call('section', regex, config=config, **kwargs)
|
saltstack/salt
|
salt/modules/arista_pyeapi.py
|
run_commands
|
python
|
def run_commands(*commands, **kwargs):
'''
Sends the commands over the transport to the device.
This function sends the commands to the device using the nodes
transport. This is a lower layer function that shouldn't normally
need to be used, preferring instead to use ``config()`` or ``enable()``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.run_commands 'show version'
salt '*' pyeapi.run_commands 'show version' encoding=text
salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak
Output example:
.. code-block:: text
veos1:
|_
----------
architecture:
i386
bootupTimestamp:
1527541728.53
hardwareRevision:
internalBuildId:
63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f
internalVersion:
4.18.1F-4591672.4181F
isIntlVersion:
False
memFree:
501468
memTotal:
1893316
modelName:
vEOS
serialNumber:
systemMacAddress:
52:54:00:3f:e6:d0
version:
4.18.1F
'''
encoding = kwargs.pop('encoding', 'json')
send_enable = kwargs.pop('send_enable', True)
output = call('run_commands',
commands,
encoding=encoding,
send_enable=send_enable,
**kwargs)
if encoding == 'text':
ret = []
for res in output:
ret.append(res['output'])
return ret
return output
|
Sends the commands over the transport to the device.
This function sends the commands to the device using the nodes
transport. This is a lower layer function that shouldn't normally
need to be used, preferring instead to use ``config()`` or ``enable()``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.run_commands 'show version'
salt '*' pyeapi.run_commands 'show version' encoding=text
salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak
Output example:
.. code-block:: text
veos1:
|_
----------
architecture:
i386
bootupTimestamp:
1527541728.53
hardwareRevision:
internalBuildId:
63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f
internalVersion:
4.18.1F-4591672.4181F
isIntlVersion:
False
memFree:
501468
memTotal:
1893316
modelName:
vEOS
serialNumber:
systemMacAddress:
52:54:00:3f:e6:d0
version:
4.18.1F
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/arista_pyeapi.py#L284-L393
|
[
"def call(method, *args, **kwargs):\n '''\n Invoke an arbitrary pyeapi method.\n\n method\n The name of the pyeapi method to invoke.\n\n args\n A list of arguments to send to the method invoked.\n\n kwargs\n Key-value dictionary to send to the method invoked.\n\n transport: ``https``\n Specifies the type of connection transport to use. Valid values for the\n connection are ``socket``, ``http_local``, ``http``, and ``https``.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n host: ``localhost``\n The IP address or DNS host name of the connection device.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n username: ``admin``\n The username to pass to the device to authenticate the eAPI connection.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n password\n The password to pass to the device to authenticate the eAPI connection.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n port\n The TCP port of the endpoint for the eAPI connection. If this keyword is\n not specified, the default value is automatically determined by the\n transport type (``80`` for ``http``, or ``443`` for ``https``).\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n enablepwd\n The enable mode password if required by the destination node.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pyeapi.call run_commands \"['show version']\"\n '''\n kwargs = clean_kwargs(**kwargs)\n if 'pyeapi.call' in __proxy__:\n return __proxy__['pyeapi.call'](method, *args, **kwargs)\n conn, kwargs = _prepare_connection(**kwargs)\n ret = getattr(conn, method)(*args, **kwargs)\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
Arista pyeapi
=============
.. versionadded:: 2019.2.0
Execution module to interface the connection with Arista switches, connecting to
the remote network device using the
`pyeapi <http://pyeapi.readthedocs.io/en/master/index.html>`_ library. It is
flexible enough to execute the commands both when running under an Arista Proxy
Minion, as well as running under a Regular Minion by specifying the connection
arguments, i.e., ``device_type``, ``host``, ``username``, ``password`` etc.
:codeauthor: Mircea Ulinic <ping@mirceaulinic.net>
:maturity: new
:depends: pyeapi
:platform: unix
.. note::
To understand how to correctly enable the eAPI on your switch, please check
https://eos.arista.com/arista-eapi-101/.
Dependencies
------------
The ``pyeapi`` Execution module requires the Python Client for eAPI (pyeapi) to
be installed: ``pip install pyeapi``.
Usage
-----
This module can equally be used via the :mod:`pyeapi <salt.proxy.arista_pyeapi>`
Proxy module or directly from an arbitrary (Proxy) Minion that is running on a
machine having access to the network device API, and the ``pyeapi`` library is
installed.
When running outside of the :mod:`pyeapi Proxy <salt.proxy.arista_pyeapi>`
(i.e., from another Proxy Minion type, or regular Minion), the pyeapi connection
arguments can be either specified from the CLI when executing the command, or
in a configuration block under the ``pyeapi`` key in the configuration opts
(i.e., (Proxy) Minion configuration file), or Pillar. The module supports these
simultaneously. These fields are the exact same supported by the ``pyeapi``
Proxy Module:
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
host: ``localhost``
The IP address or DNS host name of the connection device.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
password
The password to pass to the device to authenticate the eAPI connection.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
enablepwd
The enable mode password if required by the destination node.
Example (when not running in a ``pyeapi`` Proxy Minion):
.. code-block:: yaml
pyeapi:
username: test
password: test
In case the ``username`` and ``password`` are the same on any device you are
targeting, the block above (besides other parameters specific to your
environment you might need) should suffice to be able to execute commands from
outside a ``pyeapi`` Proxy, e.g.:
.. code-block:: bash
salt '*' pyeapi.send_commands 'show version' 'show interfaces'
salt '*' pyeapi.config 'ntp server 1.2.3.4'
.. note::
Remember that the above applies only when not running in a ``pyeapi`` Proxy
Minion. If you want to use the :mod:`pyeapi Proxy <salt.proxy.arista_pyeapi>`,
please follow the documentation notes for a proper setup.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python stdlib
import difflib
import logging
# Import Salt libs
from salt.ext import six
from salt.exceptions import CommandExecutionError
try:
from salt.utils.args import clean_kwargs
except ImportError:
from salt.utils import clean_kwargs
# Import third party libs
try:
import pyeapi
HAS_PYEAPI = True
except ImportError:
HAS_PYEAPI = False
# -----------------------------------------------------------------------------
# execution module properties
# -----------------------------------------------------------------------------
__proxyenabled__ = ['*']
# Any Proxy Minion should be able to execute these
__virtualname__ = 'pyeapi'
# The Execution Module will be identified as ``pyeapi``
# -----------------------------------------------------------------------------
# globals
# -----------------------------------------------------------------------------
log = logging.getLogger(__name__)
PYEAPI_INIT_KWARGS = [
'transport',
'host',
'username',
'password',
'enablepwd',
'port',
'timeout',
'return_node'
]
# -----------------------------------------------------------------------------
# propery functions
# -----------------------------------------------------------------------------
def __virtual__():
'''
Execution module available only if pyeapi is installed.
'''
if not HAS_PYEAPI:
return False, 'The pyeapi execution module requires pyeapi library to be installed: ``pip install pyeapi``'
return __virtualname__
# -----------------------------------------------------------------------------
# helper functions
# -----------------------------------------------------------------------------
def _prepare_connection(**kwargs):
'''
Prepare the connection with the remote network device, and clean up the key
value pairs, removing the args used for the connection init.
'''
pyeapi_kwargs = __salt__['config.get']('pyeapi', {})
pyeapi_kwargs.update(kwargs) # merge the CLI args with the opts/pillar
init_kwargs, fun_kwargs = __utils__['args.prepare_kwargs'](pyeapi_kwargs, PYEAPI_INIT_KWARGS)
if 'transport' not in init_kwargs:
init_kwargs['transport'] = 'https'
conn = pyeapi.client.connect(**init_kwargs)
node = pyeapi.client.Node(conn, enablepwd=init_kwargs.get('enablepwd'))
return node, fun_kwargs
# -----------------------------------------------------------------------------
# callable functions
# -----------------------------------------------------------------------------
def get_connection(**kwargs):
'''
Return the connection object to the pyeapi Node.
.. warning::
This function returns an unserializable object, hence it is not meant
to be used on the CLI. This should mainly be used when invoked from
other modules for the low level connection with the network device.
kwargs
Key-value dictionary with the authentication details.
USAGE Example:
.. code-block:: python
conn = __salt__['pyeapi.get_connection'](host='router1.example.com',
username='example',
password='example')
show_ver = conn.run_commands(['show version', 'show interfaces'])
'''
kwargs = clean_kwargs(**kwargs)
if 'pyeapi.conn' in __proxy__:
return __proxy__['pyeapi.conn']()
conn, kwargs = _prepare_connection(**kwargs)
return conn
def call(method, *args, **kwargs):
'''
Invoke an arbitrary pyeapi method.
method
The name of the pyeapi method to invoke.
args
A list of arguments to send to the method invoked.
kwargs
Key-value dictionary to send to the method invoked.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.call run_commands "['show version']"
'''
kwargs = clean_kwargs(**kwargs)
if 'pyeapi.call' in __proxy__:
return __proxy__['pyeapi.call'](method, *args, **kwargs)
conn, kwargs = _prepare_connection(**kwargs)
ret = getattr(conn, method)(*args, **kwargs)
return ret
def config(commands=None,
config_file=None,
template_engine='jinja',
context=None,
defaults=None,
saltenv='base',
**kwargs):
'''
Configures the node with the specified commands.
This method is used to send configuration commands to the node. It
will take either a string or a list and prepend the necessary commands
to put the session into config mode.
Returns the diff after the configuration commands are loaded.
config_file
The source file with the configuration commands to be sent to the
device.
The file can also be a template that can be rendered using the template
engine of choice.
This can be specified using the absolute path to the file, or using one
of the following URL schemes:
- ``salt://``, to fetch the file from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
commands
The commands to send to the node in config mode. If the commands
argument is a string it will be cast to a list.
The list of commands will also be prepended with the necessary commands
to put the session in config mode.
.. note::
This argument is ignored when ``config_file`` is specified.
template_engine: ``jinja``
The template engine to use when rendering the source file. Default:
``jinja``. To simply fetch the file without attempting to render, set
this argument to ``None``.
context
Variables to add to the template context.
defaults
Default values of the ``context`` dict.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.config commands="['ntp server 1.2.3.4', 'ntp server 5.6.7.8']"
salt '*' pyeapi.config config_file=salt://config.txt
salt '*' pyeapi.config config_file=https://bit.ly/2LGLcDy context="{'servers': ['1.2.3.4']}"
'''
initial_config = get_config(as_string=True, **kwargs)
if config_file:
file_str = __salt__['cp.get_file_str'](config_file, saltenv=saltenv)
if file_str is False:
raise CommandExecutionError('Source file {} not found'.format(config_file))
log.debug('Fetched from %s', config_file)
log.debug(file_str)
elif commands:
if isinstance(commands, (six.string_types, six.text_type)):
commands = [commands]
file_str = '\n'.join(commands)
# unify all the commands in a single file, to render them in a go
if template_engine:
file_str = __salt__['file.apply_template_on_contents'](file_str,
template_engine,
context,
defaults,
saltenv)
log.debug('Rendered:')
log.debug(file_str)
# whatever the source of the commands would be, split them line by line
commands = [line for line in file_str.splitlines() if line.strip()]
# push the commands one by one, removing empty lines
configured = call('config', commands, **kwargs)
current_config = get_config(as_string=True, **kwargs)
diff = difflib.unified_diff(initial_config.splitlines(1)[4:], current_config.splitlines(1)[4:])
return ''.join([x.replace('\r', '') for x in diff])
def get_config(config='running-config',
params=None,
as_string=False,
**kwargs):
'''
Retrieves the config from the device.
This method will retrieve the config from the node as either a string
or a list object. The config to retrieve can be specified as either
the startup-config or the running-config.
config: ``running-config``
Specifies to return either the nodes ``startup-config``
or ``running-config``. The default value is the ``running-config``.
params
A string of keywords to append to the command for retrieving the config.
as_string: ``False``
Flag that determines the response. If ``True``, then the configuration
is returned as a raw string. If ``False``, then the configuration is
returned as a list. The default value is ``False``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.get_config
salt '*' pyeapi.get_config params='section snmp-server'
salt '*' pyeapi.get_config config='startup-config'
'''
return call('get_config',
config=config,
params=params,
as_string=as_string,
**kwargs)
def section(regex, config='running-config', **kwargs):
'''
Return a section of the config.
regex
A valid regular expression used to select sections of configuration to
return.
config: ``running-config``
The configuration to return. Valid values for config are
``running-config`` or ``startup-config``. The default value is
``running-config``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*'
'''
return call('section', regex, config=config, **kwargs)
|
saltstack/salt
|
salt/modules/arista_pyeapi.py
|
config
|
python
|
def config(commands=None,
config_file=None,
template_engine='jinja',
context=None,
defaults=None,
saltenv='base',
**kwargs):
'''
Configures the node with the specified commands.
This method is used to send configuration commands to the node. It
will take either a string or a list and prepend the necessary commands
to put the session into config mode.
Returns the diff after the configuration commands are loaded.
config_file
The source file with the configuration commands to be sent to the
device.
The file can also be a template that can be rendered using the template
engine of choice.
This can be specified using the absolute path to the file, or using one
of the following URL schemes:
- ``salt://``, to fetch the file from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
commands
The commands to send to the node in config mode. If the commands
argument is a string it will be cast to a list.
The list of commands will also be prepended with the necessary commands
to put the session in config mode.
.. note::
This argument is ignored when ``config_file`` is specified.
template_engine: ``jinja``
The template engine to use when rendering the source file. Default:
``jinja``. To simply fetch the file without attempting to render, set
this argument to ``None``.
context
Variables to add to the template context.
defaults
Default values of the ``context`` dict.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.config commands="['ntp server 1.2.3.4', 'ntp server 5.6.7.8']"
salt '*' pyeapi.config config_file=salt://config.txt
salt '*' pyeapi.config config_file=https://bit.ly/2LGLcDy context="{'servers': ['1.2.3.4']}"
'''
initial_config = get_config(as_string=True, **kwargs)
if config_file:
file_str = __salt__['cp.get_file_str'](config_file, saltenv=saltenv)
if file_str is False:
raise CommandExecutionError('Source file {} not found'.format(config_file))
log.debug('Fetched from %s', config_file)
log.debug(file_str)
elif commands:
if isinstance(commands, (six.string_types, six.text_type)):
commands = [commands]
file_str = '\n'.join(commands)
# unify all the commands in a single file, to render them in a go
if template_engine:
file_str = __salt__['file.apply_template_on_contents'](file_str,
template_engine,
context,
defaults,
saltenv)
log.debug('Rendered:')
log.debug(file_str)
# whatever the source of the commands would be, split them line by line
commands = [line for line in file_str.splitlines() if line.strip()]
# push the commands one by one, removing empty lines
configured = call('config', commands, **kwargs)
current_config = get_config(as_string=True, **kwargs)
diff = difflib.unified_diff(initial_config.splitlines(1)[4:], current_config.splitlines(1)[4:])
return ''.join([x.replace('\r', '') for x in diff])
|
Configures the node with the specified commands.
This method is used to send configuration commands to the node. It
will take either a string or a list and prepend the necessary commands
to put the session into config mode.
Returns the diff after the configuration commands are loaded.
config_file
The source file with the configuration commands to be sent to the
device.
The file can also be a template that can be rendered using the template
engine of choice.
This can be specified using the absolute path to the file, or using one
of the following URL schemes:
- ``salt://``, to fetch the file from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
commands
The commands to send to the node in config mode. If the commands
argument is a string it will be cast to a list.
The list of commands will also be prepended with the necessary commands
to put the session in config mode.
.. note::
This argument is ignored when ``config_file`` is specified.
template_engine: ``jinja``
The template engine to use when rendering the source file. Default:
``jinja``. To simply fetch the file without attempting to render, set
this argument to ``None``.
context
Variables to add to the template context.
defaults
Default values of the ``context`` dict.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.config commands="['ntp server 1.2.3.4', 'ntp server 5.6.7.8']"
salt '*' pyeapi.config config_file=salt://config.txt
salt '*' pyeapi.config config_file=https://bit.ly/2LGLcDy context="{'servers': ['1.2.3.4']}"
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/arista_pyeapi.py#L396-L534
|
[
"def call(method, *args, **kwargs):\n '''\n Invoke an arbitrary pyeapi method.\n\n method\n The name of the pyeapi method to invoke.\n\n args\n A list of arguments to send to the method invoked.\n\n kwargs\n Key-value dictionary to send to the method invoked.\n\n transport: ``https``\n Specifies the type of connection transport to use. Valid values for the\n connection are ``socket``, ``http_local``, ``http``, and ``https``.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n host: ``localhost``\n The IP address or DNS host name of the connection device.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n username: ``admin``\n The username to pass to the device to authenticate the eAPI connection.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n password\n The password to pass to the device to authenticate the eAPI connection.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n port\n The TCP port of the endpoint for the eAPI connection. If this keyword is\n not specified, the default value is automatically determined by the\n transport type (``80`` for ``http``, or ``443`` for ``https``).\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n enablepwd\n The enable mode password if required by the destination node.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pyeapi.call run_commands \"['show version']\"\n '''\n kwargs = clean_kwargs(**kwargs)\n if 'pyeapi.call' in __proxy__:\n return __proxy__['pyeapi.call'](method, *args, **kwargs)\n conn, kwargs = _prepare_connection(**kwargs)\n ret = getattr(conn, method)(*args, **kwargs)\n return ret\n",
"def get_config(config='running-config',\n params=None,\n as_string=False,\n **kwargs):\n '''\n Retrieves the config from the device.\n\n This method will retrieve the config from the node as either a string\n or a list object. The config to retrieve can be specified as either\n the startup-config or the running-config.\n\n config: ``running-config``\n Specifies to return either the nodes ``startup-config``\n or ``running-config``. The default value is the ``running-config``.\n\n params\n A string of keywords to append to the command for retrieving the config.\n\n as_string: ``False``\n Flag that determines the response. If ``True``, then the configuration\n is returned as a raw string. If ``False``, then the configuration is\n returned as a list. The default value is ``False``.\n\n transport: ``https``\n Specifies the type of connection transport to use. Valid values for the\n connection are ``socket``, ``http_local``, ``http``, and ``https``.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n host: ``localhost``\n The IP address or DNS host name of the connection device.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n username: ``admin``\n The username to pass to the device to authenticate the eAPI connection.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n password\n The password to pass to the device to authenticate the eAPI connection.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n port\n The TCP port of the endpoint for the eAPI connection. If this keyword is\n not specified, the default value is automatically determined by the\n transport type (``80`` for ``http``, or ``443`` for ``https``).\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n enablepwd\n The enable mode password if required by the destination node.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pyeapi.get_config\n salt '*' pyeapi.get_config params='section snmp-server'\n salt '*' pyeapi.get_config config='startup-config'\n '''\n return call('get_config',\n config=config,\n params=params,\n as_string=as_string,\n **kwargs)\n"
] |
# -*- coding: utf-8 -*-
'''
Arista pyeapi
=============
.. versionadded:: 2019.2.0
Execution module to interface the connection with Arista switches, connecting to
the remote network device using the
`pyeapi <http://pyeapi.readthedocs.io/en/master/index.html>`_ library. It is
flexible enough to execute the commands both when running under an Arista Proxy
Minion, as well as running under a Regular Minion by specifying the connection
arguments, i.e., ``device_type``, ``host``, ``username``, ``password`` etc.
:codeauthor: Mircea Ulinic <ping@mirceaulinic.net>
:maturity: new
:depends: pyeapi
:platform: unix
.. note::
To understand how to correctly enable the eAPI on your switch, please check
https://eos.arista.com/arista-eapi-101/.
Dependencies
------------
The ``pyeapi`` Execution module requires the Python Client for eAPI (pyeapi) to
be installed: ``pip install pyeapi``.
Usage
-----
This module can equally be used via the :mod:`pyeapi <salt.proxy.arista_pyeapi>`
Proxy module or directly from an arbitrary (Proxy) Minion that is running on a
machine having access to the network device API, and the ``pyeapi`` library is
installed.
When running outside of the :mod:`pyeapi Proxy <salt.proxy.arista_pyeapi>`
(i.e., from another Proxy Minion type, or regular Minion), the pyeapi connection
arguments can be either specified from the CLI when executing the command, or
in a configuration block under the ``pyeapi`` key in the configuration opts
(i.e., (Proxy) Minion configuration file), or Pillar. The module supports these
simultaneously. These fields are the exact same supported by the ``pyeapi``
Proxy Module:
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
host: ``localhost``
The IP address or DNS host name of the connection device.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
password
The password to pass to the device to authenticate the eAPI connection.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
enablepwd
The enable mode password if required by the destination node.
Example (when not running in a ``pyeapi`` Proxy Minion):
.. code-block:: yaml
pyeapi:
username: test
password: test
In case the ``username`` and ``password`` are the same on any device you are
targeting, the block above (besides other parameters specific to your
environment you might need) should suffice to be able to execute commands from
outside a ``pyeapi`` Proxy, e.g.:
.. code-block:: bash
salt '*' pyeapi.send_commands 'show version' 'show interfaces'
salt '*' pyeapi.config 'ntp server 1.2.3.4'
.. note::
Remember that the above applies only when not running in a ``pyeapi`` Proxy
Minion. If you want to use the :mod:`pyeapi Proxy <salt.proxy.arista_pyeapi>`,
please follow the documentation notes for a proper setup.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python stdlib
import difflib
import logging
# Import Salt libs
from salt.ext import six
from salt.exceptions import CommandExecutionError
try:
from salt.utils.args import clean_kwargs
except ImportError:
from salt.utils import clean_kwargs
# Import third party libs
try:
import pyeapi
HAS_PYEAPI = True
except ImportError:
HAS_PYEAPI = False
# -----------------------------------------------------------------------------
# execution module properties
# -----------------------------------------------------------------------------
__proxyenabled__ = ['*']
# Any Proxy Minion should be able to execute these
__virtualname__ = 'pyeapi'
# The Execution Module will be identified as ``pyeapi``
# -----------------------------------------------------------------------------
# globals
# -----------------------------------------------------------------------------
log = logging.getLogger(__name__)
PYEAPI_INIT_KWARGS = [
'transport',
'host',
'username',
'password',
'enablepwd',
'port',
'timeout',
'return_node'
]
# -----------------------------------------------------------------------------
# propery functions
# -----------------------------------------------------------------------------
def __virtual__():
'''
Execution module available only if pyeapi is installed.
'''
if not HAS_PYEAPI:
return False, 'The pyeapi execution module requires pyeapi library to be installed: ``pip install pyeapi``'
return __virtualname__
# -----------------------------------------------------------------------------
# helper functions
# -----------------------------------------------------------------------------
def _prepare_connection(**kwargs):
'''
Prepare the connection with the remote network device, and clean up the key
value pairs, removing the args used for the connection init.
'''
pyeapi_kwargs = __salt__['config.get']('pyeapi', {})
pyeapi_kwargs.update(kwargs) # merge the CLI args with the opts/pillar
init_kwargs, fun_kwargs = __utils__['args.prepare_kwargs'](pyeapi_kwargs, PYEAPI_INIT_KWARGS)
if 'transport' not in init_kwargs:
init_kwargs['transport'] = 'https'
conn = pyeapi.client.connect(**init_kwargs)
node = pyeapi.client.Node(conn, enablepwd=init_kwargs.get('enablepwd'))
return node, fun_kwargs
# -----------------------------------------------------------------------------
# callable functions
# -----------------------------------------------------------------------------
def get_connection(**kwargs):
'''
Return the connection object to the pyeapi Node.
.. warning::
This function returns an unserializable object, hence it is not meant
to be used on the CLI. This should mainly be used when invoked from
other modules for the low level connection with the network device.
kwargs
Key-value dictionary with the authentication details.
USAGE Example:
.. code-block:: python
conn = __salt__['pyeapi.get_connection'](host='router1.example.com',
username='example',
password='example')
show_ver = conn.run_commands(['show version', 'show interfaces'])
'''
kwargs = clean_kwargs(**kwargs)
if 'pyeapi.conn' in __proxy__:
return __proxy__['pyeapi.conn']()
conn, kwargs = _prepare_connection(**kwargs)
return conn
def call(method, *args, **kwargs):
'''
Invoke an arbitrary pyeapi method.
method
The name of the pyeapi method to invoke.
args
A list of arguments to send to the method invoked.
kwargs
Key-value dictionary to send to the method invoked.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.call run_commands "['show version']"
'''
kwargs = clean_kwargs(**kwargs)
if 'pyeapi.call' in __proxy__:
return __proxy__['pyeapi.call'](method, *args, **kwargs)
conn, kwargs = _prepare_connection(**kwargs)
ret = getattr(conn, method)(*args, **kwargs)
return ret
def run_commands(*commands, **kwargs):
'''
Sends the commands over the transport to the device.
This function sends the commands to the device using the nodes
transport. This is a lower layer function that shouldn't normally
need to be used, preferring instead to use ``config()`` or ``enable()``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.run_commands 'show version'
salt '*' pyeapi.run_commands 'show version' encoding=text
salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak
Output example:
.. code-block:: text
veos1:
|_
----------
architecture:
i386
bootupTimestamp:
1527541728.53
hardwareRevision:
internalBuildId:
63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f
internalVersion:
4.18.1F-4591672.4181F
isIntlVersion:
False
memFree:
501468
memTotal:
1893316
modelName:
vEOS
serialNumber:
systemMacAddress:
52:54:00:3f:e6:d0
version:
4.18.1F
'''
encoding = kwargs.pop('encoding', 'json')
send_enable = kwargs.pop('send_enable', True)
output = call('run_commands',
commands,
encoding=encoding,
send_enable=send_enable,
**kwargs)
if encoding == 'text':
ret = []
for res in output:
ret.append(res['output'])
return ret
return output
def get_config(config='running-config',
params=None,
as_string=False,
**kwargs):
'''
Retrieves the config from the device.
This method will retrieve the config from the node as either a string
or a list object. The config to retrieve can be specified as either
the startup-config or the running-config.
config: ``running-config``
Specifies to return either the nodes ``startup-config``
or ``running-config``. The default value is the ``running-config``.
params
A string of keywords to append to the command for retrieving the config.
as_string: ``False``
Flag that determines the response. If ``True``, then the configuration
is returned as a raw string. If ``False``, then the configuration is
returned as a list. The default value is ``False``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.get_config
salt '*' pyeapi.get_config params='section snmp-server'
salt '*' pyeapi.get_config config='startup-config'
'''
return call('get_config',
config=config,
params=params,
as_string=as_string,
**kwargs)
def section(regex, config='running-config', **kwargs):
'''
Return a section of the config.
regex
A valid regular expression used to select sections of configuration to
return.
config: ``running-config``
The configuration to return. Valid values for config are
``running-config`` or ``startup-config``. The default value is
``running-config``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*'
'''
return call('section', regex, config=config, **kwargs)
|
saltstack/salt
|
salt/modules/arista_pyeapi.py
|
get_config
|
python
|
def get_config(config='running-config',
params=None,
as_string=False,
**kwargs):
'''
Retrieves the config from the device.
This method will retrieve the config from the node as either a string
or a list object. The config to retrieve can be specified as either
the startup-config or the running-config.
config: ``running-config``
Specifies to return either the nodes ``startup-config``
or ``running-config``. The default value is the ``running-config``.
params
A string of keywords to append to the command for retrieving the config.
as_string: ``False``
Flag that determines the response. If ``True``, then the configuration
is returned as a raw string. If ``False``, then the configuration is
returned as a list. The default value is ``False``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.get_config
salt '*' pyeapi.get_config params='section snmp-server'
salt '*' pyeapi.get_config config='startup-config'
'''
return call('get_config',
config=config,
params=params,
as_string=as_string,
**kwargs)
|
Retrieves the config from the device.
This method will retrieve the config from the node as either a string
or a list object. The config to retrieve can be specified as either
the startup-config or the running-config.
config: ``running-config``
Specifies to return either the nodes ``startup-config``
or ``running-config``. The default value is the ``running-config``.
params
A string of keywords to append to the command for retrieving the config.
as_string: ``False``
Flag that determines the response. If ``True``, then the configuration
is returned as a raw string. If ``False``, then the configuration is
returned as a list. The default value is ``False``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.get_config
salt '*' pyeapi.get_config params='section snmp-server'
salt '*' pyeapi.get_config config='startup-config'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/arista_pyeapi.py#L537-L623
|
[
"def call(method, *args, **kwargs):\n '''\n Invoke an arbitrary pyeapi method.\n\n method\n The name of the pyeapi method to invoke.\n\n args\n A list of arguments to send to the method invoked.\n\n kwargs\n Key-value dictionary to send to the method invoked.\n\n transport: ``https``\n Specifies the type of connection transport to use. Valid values for the\n connection are ``socket``, ``http_local``, ``http``, and ``https``.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n host: ``localhost``\n The IP address or DNS host name of the connection device.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n username: ``admin``\n The username to pass to the device to authenticate the eAPI connection.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n password\n The password to pass to the device to authenticate the eAPI connection.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n port\n The TCP port of the endpoint for the eAPI connection. If this keyword is\n not specified, the default value is automatically determined by the\n transport type (``80`` for ``http``, or ``443`` for ``https``).\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n enablepwd\n The enable mode password if required by the destination node.\n\n .. note::\n\n This argument does not need to be specified when running in a\n :mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pyeapi.call run_commands \"['show version']\"\n '''\n kwargs = clean_kwargs(**kwargs)\n if 'pyeapi.call' in __proxy__:\n return __proxy__['pyeapi.call'](method, *args, **kwargs)\n conn, kwargs = _prepare_connection(**kwargs)\n ret = getattr(conn, method)(*args, **kwargs)\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
Arista pyeapi
=============
.. versionadded:: 2019.2.0
Execution module to interface the connection with Arista switches, connecting to
the remote network device using the
`pyeapi <http://pyeapi.readthedocs.io/en/master/index.html>`_ library. It is
flexible enough to execute the commands both when running under an Arista Proxy
Minion, as well as running under a Regular Minion by specifying the connection
arguments, i.e., ``device_type``, ``host``, ``username``, ``password`` etc.
:codeauthor: Mircea Ulinic <ping@mirceaulinic.net>
:maturity: new
:depends: pyeapi
:platform: unix
.. note::
To understand how to correctly enable the eAPI on your switch, please check
https://eos.arista.com/arista-eapi-101/.
Dependencies
------------
The ``pyeapi`` Execution module requires the Python Client for eAPI (pyeapi) to
be installed: ``pip install pyeapi``.
Usage
-----
This module can equally be used via the :mod:`pyeapi <salt.proxy.arista_pyeapi>`
Proxy module or directly from an arbitrary (Proxy) Minion that is running on a
machine having access to the network device API, and the ``pyeapi`` library is
installed.
When running outside of the :mod:`pyeapi Proxy <salt.proxy.arista_pyeapi>`
(i.e., from another Proxy Minion type, or regular Minion), the pyeapi connection
arguments can be either specified from the CLI when executing the command, or
in a configuration block under the ``pyeapi`` key in the configuration opts
(i.e., (Proxy) Minion configuration file), or Pillar. The module supports these
simultaneously. These fields are the exact same supported by the ``pyeapi``
Proxy Module:
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
host: ``localhost``
The IP address or DNS host name of the connection device.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
password
The password to pass to the device to authenticate the eAPI connection.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
enablepwd
The enable mode password if required by the destination node.
Example (when not running in a ``pyeapi`` Proxy Minion):
.. code-block:: yaml
pyeapi:
username: test
password: test
In case the ``username`` and ``password`` are the same on any device you are
targeting, the block above (besides other parameters specific to your
environment you might need) should suffice to be able to execute commands from
outside a ``pyeapi`` Proxy, e.g.:
.. code-block:: bash
salt '*' pyeapi.send_commands 'show version' 'show interfaces'
salt '*' pyeapi.config 'ntp server 1.2.3.4'
.. note::
Remember that the above applies only when not running in a ``pyeapi`` Proxy
Minion. If you want to use the :mod:`pyeapi Proxy <salt.proxy.arista_pyeapi>`,
please follow the documentation notes for a proper setup.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python stdlib
import difflib
import logging
# Import Salt libs
from salt.ext import six
from salt.exceptions import CommandExecutionError
try:
from salt.utils.args import clean_kwargs
except ImportError:
from salt.utils import clean_kwargs
# Import third party libs
try:
import pyeapi
HAS_PYEAPI = True
except ImportError:
HAS_PYEAPI = False
# -----------------------------------------------------------------------------
# execution module properties
# -----------------------------------------------------------------------------
__proxyenabled__ = ['*']
# Any Proxy Minion should be able to execute these
__virtualname__ = 'pyeapi'
# The Execution Module will be identified as ``pyeapi``
# -----------------------------------------------------------------------------
# globals
# -----------------------------------------------------------------------------
log = logging.getLogger(__name__)
PYEAPI_INIT_KWARGS = [
'transport',
'host',
'username',
'password',
'enablepwd',
'port',
'timeout',
'return_node'
]
# -----------------------------------------------------------------------------
# propery functions
# -----------------------------------------------------------------------------
def __virtual__():
'''
Execution module available only if pyeapi is installed.
'''
if not HAS_PYEAPI:
return False, 'The pyeapi execution module requires pyeapi library to be installed: ``pip install pyeapi``'
return __virtualname__
# -----------------------------------------------------------------------------
# helper functions
# -----------------------------------------------------------------------------
def _prepare_connection(**kwargs):
'''
Prepare the connection with the remote network device, and clean up the key
value pairs, removing the args used for the connection init.
'''
pyeapi_kwargs = __salt__['config.get']('pyeapi', {})
pyeapi_kwargs.update(kwargs) # merge the CLI args with the opts/pillar
init_kwargs, fun_kwargs = __utils__['args.prepare_kwargs'](pyeapi_kwargs, PYEAPI_INIT_KWARGS)
if 'transport' not in init_kwargs:
init_kwargs['transport'] = 'https'
conn = pyeapi.client.connect(**init_kwargs)
node = pyeapi.client.Node(conn, enablepwd=init_kwargs.get('enablepwd'))
return node, fun_kwargs
# -----------------------------------------------------------------------------
# callable functions
# -----------------------------------------------------------------------------
def get_connection(**kwargs):
'''
Return the connection object to the pyeapi Node.
.. warning::
This function returns an unserializable object, hence it is not meant
to be used on the CLI. This should mainly be used when invoked from
other modules for the low level connection with the network device.
kwargs
Key-value dictionary with the authentication details.
USAGE Example:
.. code-block:: python
conn = __salt__['pyeapi.get_connection'](host='router1.example.com',
username='example',
password='example')
show_ver = conn.run_commands(['show version', 'show interfaces'])
'''
kwargs = clean_kwargs(**kwargs)
if 'pyeapi.conn' in __proxy__:
return __proxy__['pyeapi.conn']()
conn, kwargs = _prepare_connection(**kwargs)
return conn
def call(method, *args, **kwargs):
'''
Invoke an arbitrary pyeapi method.
method
The name of the pyeapi method to invoke.
args
A list of arguments to send to the method invoked.
kwargs
Key-value dictionary to send to the method invoked.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.call run_commands "['show version']"
'''
kwargs = clean_kwargs(**kwargs)
if 'pyeapi.call' in __proxy__:
return __proxy__['pyeapi.call'](method, *args, **kwargs)
conn, kwargs = _prepare_connection(**kwargs)
ret = getattr(conn, method)(*args, **kwargs)
return ret
def run_commands(*commands, **kwargs):
'''
Sends the commands over the transport to the device.
This function sends the commands to the device using the nodes
transport. This is a lower layer function that shouldn't normally
need to be used, preferring instead to use ``config()`` or ``enable()``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.run_commands 'show version'
salt '*' pyeapi.run_commands 'show version' encoding=text
salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak
Output example:
.. code-block:: text
veos1:
|_
----------
architecture:
i386
bootupTimestamp:
1527541728.53
hardwareRevision:
internalBuildId:
63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f
internalVersion:
4.18.1F-4591672.4181F
isIntlVersion:
False
memFree:
501468
memTotal:
1893316
modelName:
vEOS
serialNumber:
systemMacAddress:
52:54:00:3f:e6:d0
version:
4.18.1F
'''
encoding = kwargs.pop('encoding', 'json')
send_enable = kwargs.pop('send_enable', True)
output = call('run_commands',
commands,
encoding=encoding,
send_enable=send_enable,
**kwargs)
if encoding == 'text':
ret = []
for res in output:
ret.append(res['output'])
return ret
return output
def config(commands=None,
config_file=None,
template_engine='jinja',
context=None,
defaults=None,
saltenv='base',
**kwargs):
'''
Configures the node with the specified commands.
This method is used to send configuration commands to the node. It
will take either a string or a list and prepend the necessary commands
to put the session into config mode.
Returns the diff after the configuration commands are loaded.
config_file
The source file with the configuration commands to be sent to the
device.
The file can also be a template that can be rendered using the template
engine of choice.
This can be specified using the absolute path to the file, or using one
of the following URL schemes:
- ``salt://``, to fetch the file from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
commands
The commands to send to the node in config mode. If the commands
argument is a string it will be cast to a list.
The list of commands will also be prepended with the necessary commands
to put the session in config mode.
.. note::
This argument is ignored when ``config_file`` is specified.
template_engine: ``jinja``
The template engine to use when rendering the source file. Default:
``jinja``. To simply fetch the file without attempting to render, set
this argument to ``None``.
context
Variables to add to the template context.
defaults
Default values of the ``context`` dict.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.config commands="['ntp server 1.2.3.4', 'ntp server 5.6.7.8']"
salt '*' pyeapi.config config_file=salt://config.txt
salt '*' pyeapi.config config_file=https://bit.ly/2LGLcDy context="{'servers': ['1.2.3.4']}"
'''
initial_config = get_config(as_string=True, **kwargs)
if config_file:
file_str = __salt__['cp.get_file_str'](config_file, saltenv=saltenv)
if file_str is False:
raise CommandExecutionError('Source file {} not found'.format(config_file))
log.debug('Fetched from %s', config_file)
log.debug(file_str)
elif commands:
if isinstance(commands, (six.string_types, six.text_type)):
commands = [commands]
file_str = '\n'.join(commands)
# unify all the commands in a single file, to render them in a go
if template_engine:
file_str = __salt__['file.apply_template_on_contents'](file_str,
template_engine,
context,
defaults,
saltenv)
log.debug('Rendered:')
log.debug(file_str)
# whatever the source of the commands would be, split them line by line
commands = [line for line in file_str.splitlines() if line.strip()]
# push the commands one by one, removing empty lines
configured = call('config', commands, **kwargs)
current_config = get_config(as_string=True, **kwargs)
diff = difflib.unified_diff(initial_config.splitlines(1)[4:], current_config.splitlines(1)[4:])
return ''.join([x.replace('\r', '') for x in diff])
def section(regex, config='running-config', **kwargs):
'''
Return a section of the config.
regex
A valid regular expression used to select sections of configuration to
return.
config: ``running-config``
The configuration to return. Valid values for config are
``running-config`` or ``startup-config``. The default value is
``running-config``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*'
'''
return call('section', regex, config=config, **kwargs)
|
saltstack/salt
|
salt/modules/openscap.py
|
xccdf
|
python
|
def xccdf(params):
'''
Run ``oscap xccdf`` commands on minions.
It uses cp.push_dir to upload the generated files to the salt master
in the master's minion files cachedir
(defaults to ``/var/cache/salt/master/minions/minion-id/files``)
It needs ``file_recv`` set to ``True`` in the master configuration file.
CLI Example:
.. code-block:: bash
salt '*' openscap.xccdf "eval --profile Default /usr/share/openscap/scap-yast2sec-xccdf.xml"
'''
params = shlex.split(params)
policy = params[-1]
success = True
error = None
upload_dir = None
action = None
returncode = None
try:
parser = _ArgumentParser()
action = parser.parse_known_args(params)[0].action
args, argv = _ArgumentParser(action=action).parse_known_args(args=params)
except Exception as err:
success = False
error = six.text_type(err)
if success:
cmd = _XCCDF_MAP[action]['cmd_pattern'].format(args.profile, policy)
tempdir = tempfile.mkdtemp()
proc = Popen(
shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir)
(stdoutdata, error) = proc.communicate()
success = _OSCAP_EXIT_CODES_MAP[proc.returncode]
returncode = proc.returncode
if success:
__salt__['cp.push_dir'](tempdir)
shutil.rmtree(tempdir, ignore_errors=True)
upload_dir = tempdir
return dict(
success=success,
upload_dir=upload_dir,
error=error,
returncode=returncode)
|
Run ``oscap xccdf`` commands on minions.
It uses cp.push_dir to upload the generated files to the salt master
in the master's minion files cachedir
(defaults to ``/var/cache/salt/master/minions/minion-id/files``)
It needs ``file_recv`` set to ``True`` in the master configuration file.
CLI Example:
.. code-block:: bash
salt '*' openscap.xccdf "eval --profile Default /usr/share/openscap/scap-yast2sec-xccdf.xml"
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openscap.py#L66-L115
| null |
# -*- coding: utf-8 -*-
'''
Module for OpenSCAP Management
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import tempfile
import shlex
import shutil
from subprocess import Popen, PIPE
# Import Salt libs
from salt.ext import six
ArgumentParser = object
try:
import argparse # pylint: disable=minimum-python-version
ArgumentParser = argparse.ArgumentParser
HAS_ARGPARSE = True
except ImportError: # python 2.6
HAS_ARGPARSE = False
_XCCDF_MAP = {
'eval': {
'parser_arguments': [
(('--profile',), {'required': True}),
],
'cmd_pattern': (
"oscap xccdf eval "
"--oval-results --results results.xml --report report.html "
"--profile {0} {1}"
)
}
}
def __virtual__():
return HAS_ARGPARSE, 'argparse module is required.'
class _ArgumentParser(ArgumentParser):
def __init__(self, action=None, *args, **kwargs):
super(_ArgumentParser, self).__init__(*args, prog='oscap', **kwargs)
self.add_argument('action', choices=['eval'])
add_arg = None
for params, kwparams in _XCCDF_MAP['eval']['parser_arguments']:
self.add_argument(*params, **kwparams)
def error(self, message, *args, **kwargs):
raise Exception(message)
_OSCAP_EXIT_CODES_MAP = {
0: True, # all rules pass
1: False, # there is an error during evaluation
2: True # there is at least one rule with either fail or unknown result
}
|
saltstack/salt
|
salt/modules/tuned.py
|
list_
|
python
|
def list_():
'''
List the profiles available
CLI Example:
.. code-block:: bash
salt '*' tuned.list
'''
result = __salt__['cmd.run']('tuned-adm list').splitlines()
# Remove "Available profiles:"
result.pop(0)
# Remove "Current active profile:.*"
result.pop()
# Output can be : " - <profile name> - <description>" (v2.7.1)
# or " - <profile name> " (v2.4.1)
result = [i.split('- ')[1].strip() for i in result]
return result
|
List the profiles available
CLI Example:
.. code-block:: bash
salt '*' tuned.list
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/tuned.py#L36-L55
| null |
# -*- coding: utf-8 -*-
'''
Interface to Red Hat tuned-adm module
:maintainer: Syed Ali <alicsyed@gmail.com>
:maturity: new
:depends: tuned-adm
:platform: Linux
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import re
# Import Salt libs
import salt.utils.path
__func_alias__ = {
'list_': 'list',
}
__virtualname__ = 'tuned'
def __virtual__():
'''
Check to see if tuned-adm binary is installed on the system
'''
tuned_adm = salt.utils.path.which('tuned-adm')
if not tuned_adm:
return (False, 'The tuned execution module failed to load: the tuned-adm binary is not in the path.')
return __virtualname__
def active():
'''
Return current active profile
CLI Example:
.. code-block:: bash
salt '*' tuned.active
'''
# turn off all profiles
result = __salt__['cmd.run']('tuned-adm active')
pattern = re.compile(r'''(?P<stmt>Current active profile:) (?P<profile>\w+.*)''')
match = re.match(pattern, result)
return '{0}'.format(match.group('profile'))
def off():
'''
Turn off all profiles
CLI Example:
.. code-block:: bash
salt '*' tuned.off
'''
# turn off all profiles
result = __salt__['cmd.retcode']('tuned-adm off')
if int(result) != 0:
return False
return True
def profile(profile_name):
'''
Activate specified profile
CLI Example:
.. code-block:: bash
salt '*' tuned.profile virtual-guest
'''
# run tuned-adm with the profile specified
result = __salt__['cmd.retcode']('tuned-adm profile {0}'.format(profile_name))
if int(result) != 0:
return False
return '{0}'.format(profile_name)
|
saltstack/salt
|
salt/modules/tuned.py
|
active
|
python
|
def active():
'''
Return current active profile
CLI Example:
.. code-block:: bash
salt '*' tuned.active
'''
# turn off all profiles
result = __salt__['cmd.run']('tuned-adm active')
pattern = re.compile(r'''(?P<stmt>Current active profile:) (?P<profile>\w+.*)''')
match = re.match(pattern, result)
return '{0}'.format(match.group('profile'))
|
Return current active profile
CLI Example:
.. code-block:: bash
salt '*' tuned.active
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/tuned.py#L58-L73
| null |
# -*- coding: utf-8 -*-
'''
Interface to Red Hat tuned-adm module
:maintainer: Syed Ali <alicsyed@gmail.com>
:maturity: new
:depends: tuned-adm
:platform: Linux
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import re
# Import Salt libs
import salt.utils.path
__func_alias__ = {
'list_': 'list',
}
__virtualname__ = 'tuned'
def __virtual__():
'''
Check to see if tuned-adm binary is installed on the system
'''
tuned_adm = salt.utils.path.which('tuned-adm')
if not tuned_adm:
return (False, 'The tuned execution module failed to load: the tuned-adm binary is not in the path.')
return __virtualname__
def list_():
'''
List the profiles available
CLI Example:
.. code-block:: bash
salt '*' tuned.list
'''
result = __salt__['cmd.run']('tuned-adm list').splitlines()
# Remove "Available profiles:"
result.pop(0)
# Remove "Current active profile:.*"
result.pop()
# Output can be : " - <profile name> - <description>" (v2.7.1)
# or " - <profile name> " (v2.4.1)
result = [i.split('- ')[1].strip() for i in result]
return result
def off():
'''
Turn off all profiles
CLI Example:
.. code-block:: bash
salt '*' tuned.off
'''
# turn off all profiles
result = __salt__['cmd.retcode']('tuned-adm off')
if int(result) != 0:
return False
return True
def profile(profile_name):
'''
Activate specified profile
CLI Example:
.. code-block:: bash
salt '*' tuned.profile virtual-guest
'''
# run tuned-adm with the profile specified
result = __salt__['cmd.retcode']('tuned-adm profile {0}'.format(profile_name))
if int(result) != 0:
return False
return '{0}'.format(profile_name)
|
saltstack/salt
|
salt/modules/tuned.py
|
profile
|
python
|
def profile(profile_name):
'''
Activate specified profile
CLI Example:
.. code-block:: bash
salt '*' tuned.profile virtual-guest
'''
# run tuned-adm with the profile specified
result = __salt__['cmd.retcode']('tuned-adm profile {0}'.format(profile_name))
if int(result) != 0:
return False
return '{0}'.format(profile_name)
|
Activate specified profile
CLI Example:
.. code-block:: bash
salt '*' tuned.profile virtual-guest
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/tuned.py#L94-L109
| null |
# -*- coding: utf-8 -*-
'''
Interface to Red Hat tuned-adm module
:maintainer: Syed Ali <alicsyed@gmail.com>
:maturity: new
:depends: tuned-adm
:platform: Linux
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import re
# Import Salt libs
import salt.utils.path
__func_alias__ = {
'list_': 'list',
}
__virtualname__ = 'tuned'
def __virtual__():
'''
Check to see if tuned-adm binary is installed on the system
'''
tuned_adm = salt.utils.path.which('tuned-adm')
if not tuned_adm:
return (False, 'The tuned execution module failed to load: the tuned-adm binary is not in the path.')
return __virtualname__
def list_():
'''
List the profiles available
CLI Example:
.. code-block:: bash
salt '*' tuned.list
'''
result = __salt__['cmd.run']('tuned-adm list').splitlines()
# Remove "Available profiles:"
result.pop(0)
# Remove "Current active profile:.*"
result.pop()
# Output can be : " - <profile name> - <description>" (v2.7.1)
# or " - <profile name> " (v2.4.1)
result = [i.split('- ')[1].strip() for i in result]
return result
def active():
'''
Return current active profile
CLI Example:
.. code-block:: bash
salt '*' tuned.active
'''
# turn off all profiles
result = __salt__['cmd.run']('tuned-adm active')
pattern = re.compile(r'''(?P<stmt>Current active profile:) (?P<profile>\w+.*)''')
match = re.match(pattern, result)
return '{0}'.format(match.group('profile'))
def off():
'''
Turn off all profiles
CLI Example:
.. code-block:: bash
salt '*' tuned.off
'''
# turn off all profiles
result = __salt__['cmd.retcode']('tuned-adm off')
if int(result) != 0:
return False
return True
|
saltstack/salt
|
salt/returners/slack_returner.py
|
_post_message
|
python
|
def _post_message(channel,
message,
username,
as_user,
api_key=None):
'''
Send a message to a Slack room.
:param channel: The room name.
:param message: The message to send to the Slack room.
:param username: Specify who the message is from.
:param as_user: Sets the profile picture which have been added through Slack itself.
:param api_key: The Slack api key, if not specified in the configuration.
:param api_version: The Slack api version, if not specified in the configuration.
:return: Boolean if message was sent successfully.
'''
parameters = dict()
parameters['channel'] = channel
parameters['username'] = username
parameters['as_user'] = as_user
parameters['text'] = '```' + message + '```' # pre-formatted, fixed-width text
# Slack wants the body on POST to be urlencoded.
result = salt.utils.slack.query(function='message',
api_key=api_key,
method='POST',
header_dict={'Content-Type': 'application/x-www-form-urlencoded'},
data=_urlencode(parameters))
log.debug('Slack message post result: %s', result)
if result:
return True
else:
return False
|
Send a message to a Slack room.
:param channel: The room name.
:param message: The message to send to the Slack room.
:param username: Specify who the message is from.
:param as_user: Sets the profile picture which have been added through Slack itself.
:param api_key: The Slack api key, if not specified in the configuration.
:param api_version: The Slack api version, if not specified in the configuration.
:return: Boolean if message was sent successfully.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/slack_returner.py#L145-L178
| null |
# -*- coding: utf-8 -*-
'''
Return salt data via slack
.. versionadded:: 2015.5.0
The following fields can be set in the minion conf file:
.. code-block:: yaml
slack.channel (required)
slack.api_key (required)
slack.username (required)
slack.as_user (required to see the profile picture of your bot)
slack.profile (optional)
slack.changes(optional, only show changes and failed states)
slack.only_show_failed(optional, only show failed states)
slack.yaml_format(optional, format the json in yaml format)
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location:
.. code-block:: yaml
slack.channel
slack.api_key
slack.username
slack.as_user
Slack settings may also be configured as:
.. code-block:: yaml
slack:
channel: RoomName
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
username: user
as_user: true
alternative.slack:
room_id: RoomName
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
from_name: user@email.com
slack_profile:
slack.api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
slack.from_name: user@email.com
slack:
profile: slack_profile
channel: RoomName
alternative.slack:
profile: slack_profile
channel: RoomName
To use the Slack returner, append '--return slack' to the salt command.
.. code-block:: bash
salt '*' test.ping --return slack
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. code-block:: bash
salt '*' test.ping --return slack --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return slack --return_kwargs '{"channel": "#random"}'
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import pprint
import logging
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six.moves.http_client
from salt.ext.six.moves.urllib.parse import urlencode as _urlencode
# pylint: enable=import-error,no-name-in-module,redefined-builtin
# Import Salt Libs
import salt.returners
import salt.utils.slack
import salt.utils.yaml
log = logging.getLogger(__name__)
__virtualname__ = 'slack'
def _get_options(ret=None):
'''
Get the slack options from salt.
'''
defaults = {'channel': '#general'}
attrs = {'slack_profile': 'profile',
'channel': 'channel',
'username': 'username',
'as_user': 'as_user',
'api_key': 'api_key',
'changes': 'changes',
'only_show_failed': 'only_show_failed',
'yaml_format': 'yaml_format',
}
profile_attr = 'slack_profile'
profile_attrs = {'from_jid': 'from_jid',
'api_key': 'api_key',
'api_version': 'api_key'
}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
profile_attr=profile_attr,
profile_attrs=profile_attrs,
__salt__=__salt__,
__opts__=__opts__,
defaults=defaults)
return _options
def __virtual__():
'''
Return virtual name of the module.
:return: The virtual name of the module.
'''
return __virtualname__
def returner(ret):
'''
Send an slack message with the data
'''
_options = _get_options(ret)
channel = _options.get('channel')
username = _options.get('username')
as_user = _options.get('as_user')
api_key = _options.get('api_key')
changes = _options.get('changes')
only_show_failed = _options.get('only_show_failed')
yaml_format = _options.get('yaml_format')
if not channel:
log.error('slack.channel not defined in salt config')
return
if not username:
log.error('slack.username not defined in salt config')
return
if not as_user:
log.error('slack.as_user not defined in salt config')
return
if not api_key:
log.error('slack.api_key not defined in salt config')
return
if only_show_failed and changes:
log.error('cannot define both slack.changes and slack.only_show_failed in salt config')
return
returns = ret.get('return')
if changes is True:
returns = {(key, value) for key, value in returns.items() if value['result'] is not True or value['changes']}
if only_show_failed is True:
returns = {(key, value) for key, value in returns.items() if value['result'] is not True}
if yaml_format is True:
returns = salt.utils.yaml.safe_dump(returns)
else:
returns = pprint.pformat(returns)
message = ('id: {0}\r\n'
'function: {1}\r\n'
'function args: {2}\r\n'
'jid: {3}\r\n'
'return: {4}\r\n').format(
ret.get('id'),
ret.get('fun'),
ret.get('fun_args'),
ret.get('jid'),
returns)
slack = _post_message(channel,
message,
username,
as_user,
api_key)
return slack
|
saltstack/salt
|
salt/returners/slack_returner.py
|
returner
|
python
|
def returner(ret):
'''
Send an slack message with the data
'''
_options = _get_options(ret)
channel = _options.get('channel')
username = _options.get('username')
as_user = _options.get('as_user')
api_key = _options.get('api_key')
changes = _options.get('changes')
only_show_failed = _options.get('only_show_failed')
yaml_format = _options.get('yaml_format')
if not channel:
log.error('slack.channel not defined in salt config')
return
if not username:
log.error('slack.username not defined in salt config')
return
if not as_user:
log.error('slack.as_user not defined in salt config')
return
if not api_key:
log.error('slack.api_key not defined in salt config')
return
if only_show_failed and changes:
log.error('cannot define both slack.changes and slack.only_show_failed in salt config')
return
returns = ret.get('return')
if changes is True:
returns = {(key, value) for key, value in returns.items() if value['result'] is not True or value['changes']}
if only_show_failed is True:
returns = {(key, value) for key, value in returns.items() if value['result'] is not True}
if yaml_format is True:
returns = salt.utils.yaml.safe_dump(returns)
else:
returns = pprint.pformat(returns)
message = ('id: {0}\r\n'
'function: {1}\r\n'
'function args: {2}\r\n'
'jid: {3}\r\n'
'return: {4}\r\n').format(
ret.get('id'),
ret.get('fun'),
ret.get('fun_args'),
ret.get('jid'),
returns)
slack = _post_message(channel,
message,
username,
as_user,
api_key)
return slack
|
Send an slack message with the data
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/slack_returner.py#L181-L244
|
[
"def safe_dump(data, stream=None, **kwargs):\n '''\n Use a custom dumper to ensure that defaultdict and OrderedDict are\n represented properly. Ensure that unicode strings are encoded unless\n explicitly told not to.\n '''\n if 'allow_unicode' not in kwargs:\n kwargs['allow_unicode'] = True\n return yaml.dump(data, stream, Dumper=SafeOrderedDumper, **kwargs)\n",
"def _get_options(ret=None):\n '''\n Get the slack options from salt.\n '''\n\n defaults = {'channel': '#general'}\n\n attrs = {'slack_profile': 'profile',\n 'channel': 'channel',\n 'username': 'username',\n 'as_user': 'as_user',\n 'api_key': 'api_key',\n 'changes': 'changes',\n 'only_show_failed': 'only_show_failed',\n 'yaml_format': 'yaml_format',\n }\n\n profile_attr = 'slack_profile'\n\n profile_attrs = {'from_jid': 'from_jid',\n 'api_key': 'api_key',\n 'api_version': 'api_key'\n }\n\n _options = salt.returners.get_returner_options(__virtualname__,\n ret,\n attrs,\n profile_attr=profile_attr,\n profile_attrs=profile_attrs,\n __salt__=__salt__,\n __opts__=__opts__,\n defaults=defaults)\n return _options\n",
"def _post_message(channel,\n message,\n username,\n as_user,\n api_key=None):\n '''\n Send a message to a Slack room.\n :param channel: The room name.\n :param message: The message to send to the Slack room.\n :param username: Specify who the message is from.\n :param as_user: Sets the profile picture which have been added through Slack itself.\n :param api_key: The Slack api key, if not specified in the configuration.\n :param api_version: The Slack api version, if not specified in the configuration.\n :return: Boolean if message was sent successfully.\n '''\n\n parameters = dict()\n parameters['channel'] = channel\n parameters['username'] = username\n parameters['as_user'] = as_user\n parameters['text'] = '```' + message + '```' # pre-formatted, fixed-width text\n\n # Slack wants the body on POST to be urlencoded.\n result = salt.utils.slack.query(function='message',\n api_key=api_key,\n method='POST',\n header_dict={'Content-Type': 'application/x-www-form-urlencoded'},\n data=_urlencode(parameters))\n\n log.debug('Slack message post result: %s', result)\n if result:\n return True\n else:\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Return salt data via slack
.. versionadded:: 2015.5.0
The following fields can be set in the minion conf file:
.. code-block:: yaml
slack.channel (required)
slack.api_key (required)
slack.username (required)
slack.as_user (required to see the profile picture of your bot)
slack.profile (optional)
slack.changes(optional, only show changes and failed states)
slack.only_show_failed(optional, only show failed states)
slack.yaml_format(optional, format the json in yaml format)
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location:
.. code-block:: yaml
slack.channel
slack.api_key
slack.username
slack.as_user
Slack settings may also be configured as:
.. code-block:: yaml
slack:
channel: RoomName
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
username: user
as_user: true
alternative.slack:
room_id: RoomName
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
from_name: user@email.com
slack_profile:
slack.api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
slack.from_name: user@email.com
slack:
profile: slack_profile
channel: RoomName
alternative.slack:
profile: slack_profile
channel: RoomName
To use the Slack returner, append '--return slack' to the salt command.
.. code-block:: bash
salt '*' test.ping --return slack
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. code-block:: bash
salt '*' test.ping --return slack --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return slack --return_kwargs '{"channel": "#random"}'
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import pprint
import logging
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six.moves.http_client
from salt.ext.six.moves.urllib.parse import urlencode as _urlencode
# pylint: enable=import-error,no-name-in-module,redefined-builtin
# Import Salt Libs
import salt.returners
import salt.utils.slack
import salt.utils.yaml
log = logging.getLogger(__name__)
__virtualname__ = 'slack'
def _get_options(ret=None):
'''
Get the slack options from salt.
'''
defaults = {'channel': '#general'}
attrs = {'slack_profile': 'profile',
'channel': 'channel',
'username': 'username',
'as_user': 'as_user',
'api_key': 'api_key',
'changes': 'changes',
'only_show_failed': 'only_show_failed',
'yaml_format': 'yaml_format',
}
profile_attr = 'slack_profile'
profile_attrs = {'from_jid': 'from_jid',
'api_key': 'api_key',
'api_version': 'api_key'
}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
profile_attr=profile_attr,
profile_attrs=profile_attrs,
__salt__=__salt__,
__opts__=__opts__,
defaults=defaults)
return _options
def __virtual__():
'''
Return virtual name of the module.
:return: The virtual name of the module.
'''
return __virtualname__
def _post_message(channel,
message,
username,
as_user,
api_key=None):
'''
Send a message to a Slack room.
:param channel: The room name.
:param message: The message to send to the Slack room.
:param username: Specify who the message is from.
:param as_user: Sets the profile picture which have been added through Slack itself.
:param api_key: The Slack api key, if not specified in the configuration.
:param api_version: The Slack api version, if not specified in the configuration.
:return: Boolean if message was sent successfully.
'''
parameters = dict()
parameters['channel'] = channel
parameters['username'] = username
parameters['as_user'] = as_user
parameters['text'] = '```' + message + '```' # pre-formatted, fixed-width text
# Slack wants the body on POST to be urlencoded.
result = salt.utils.slack.query(function='message',
api_key=api_key,
method='POST',
header_dict={'Content-Type': 'application/x-www-form-urlencoded'},
data=_urlencode(parameters))
log.debug('Slack message post result: %s', result)
if result:
return True
else:
return False
|
saltstack/salt
|
salt/metaproxy/proxy.py
|
handle_decoded_payload
|
python
|
def handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
if 'user' in data:
log.info(
'User %s Executing command %s with jid %s',
data['user'], data['fun'], data['jid']
)
else:
log.info(
'Executing command %s with jid %s',
data['fun'], data['jid']
)
log.debug('Command details %s', data)
# Don't duplicate jobs
log.trace('Started JIDs: %s', self.jid_queue)
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warning('Maximum number of processes (%s) reached while '
'executing jid %s, waiting %s seconds...',
process_count_max,
data['jid'],
process_count_max_sleep_secs)
yield tornado.gen.sleep(process_count_max_sleep_secs)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
|
Override this method if you wish to handle the decoded data
differently.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/metaproxy/proxy.py#L693-L777
|
[
"def decode(data, encoding=None, errors='strict', keep=False,\n normalize=False, preserve_dict_class=False, preserve_tuples=False,\n to_str=False):\n '''\n Generic function which will decode whichever type is passed, if necessary.\n Optionally use to_str=True to ensure strings are str types and not unicode\n on Python 2.\n\n If `strict` is True, and `keep` is False, and we fail to decode, a\n UnicodeDecodeError will be raised. Passing `keep` as True allows for the\n original value to silently be returned in cases where decoding fails. This\n can be useful for cases where the data passed to this function is likely to\n contain binary blobs, such as in the case of cp.recv.\n\n If `normalize` is True, then unicodedata.normalize() will be used to\n normalize unicode strings down to a single code point per glyph. It is\n recommended not to normalize unless you know what you're doing. For\n instance, if `data` contains a dictionary, it is possible that normalizing\n will lead to data loss because the following two strings will normalize to\n the same value:\n\n - u'\\\\u044f\\\\u0438\\\\u0306\\\\u0446\\\\u0430.txt'\n - u'\\\\u044f\\\\u0439\\\\u0446\\\\u0430.txt'\n\n One good use case for normalization is in the test suite. For example, on\n some platforms such as Mac OS, os.listdir() will produce the first of the\n two strings above, in which \"й\" is represented as two code points (i.e. one\n for the base character, and one for the breve mark). Normalizing allows for\n a more reliable test case.\n '''\n _decode_func = salt.utils.stringutils.to_unicode \\\n if not to_str \\\n else salt.utils.stringutils.to_str\n if isinstance(data, Mapping):\n return decode_dict(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, list):\n return decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, tuple):\n return decode_tuple(data, encoding, errors, keep, normalize,\n preserve_dict_class, to_str) \\\n if preserve_tuples \\\n else decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n else:\n try:\n data = _decode_func(data, encoding, errors, normalize)\n except TypeError:\n # to_unicode raises a TypeError when input is not a\n # string/bytestring/bytearray. This is expected and simply means we\n # are going to leave the value as-is.\n pass\n except UnicodeDecodeError:\n if not keep:\n raise\n return data\n",
"def running(opts):\n '''\n Return the running jobs on this minion\n '''\n\n ret = []\n proc_dir = os.path.join(opts['cachedir'], 'proc')\n if not os.path.isdir(proc_dir):\n return ret\n for fn_ in os.listdir(proc_dir):\n path = os.path.join(proc_dir, fn_)\n try:\n data = _read_proc_file(path, opts)\n if data is not None:\n ret.append(data)\n except (IOError, OSError):\n # proc files may be removed at any time during this process by\n # the minion process that is executing the JID in question, so\n # we must ignore ENOENT during this process\n pass\n return ret\n"
] |
# -*- coding: utf-8 -*-
#
# Proxy minion metaproxy modules
#
from __future__ import absolute_import, print_function, with_statement, unicode_literals
import os
import signal
import sys
import types
import logging
import threading
import traceback
# Import Salt Libs
# pylint: disable=3rd-party-module-not-gated
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.pillar
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
import salt.serializers.msgpack
import salt.minion
import salt.defaults.exitcodes
import salt.utils.dictupdate
from salt.utils.event import tagify
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltSystemExit,
)
from salt.ext import six
from salt.ext.six.moves import range
from salt.minion import ProxyMinion
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
def post_master_init(self, master):
log.debug("subclassed LazyLoaded _post_master_init")
if self.connected:
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = 'No proxy key found in pillar or opts for id ' + self.opts['id'] + '. ' + \
'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
if self.opts.get('proxy_merge_pillar_in_opts'):
# Override proxy opts with pillar data when the user required.
self.opts = salt.utils.dictupdate.merge(self.opts,
self.opts['pillar'],
strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'),
merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False))
elif self.opts.get('proxy_mines_pillar'):
# Even when not required, some details such as mine configuration
# should be merged anyway whenever possible.
if 'mine_interval' in self.opts['pillar']:
self.opts['mine_interval'] = self.opts['pillar']['mine_interval']
if 'mine_functions' in self.opts['pillar']:
general_proxy_mines = self.opts.get('mine_functions', [])
specific_proxy_mines = self.opts['pillar']['mine_functions']
try:
self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines
except TypeError as terr:
log.error(
'Unable to merge mine functions from the pillar in the '
'opts, for proxy %s', self.opts['id']
)
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matchers = salt.loader.matchers(self.opts)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = salt.minion.get_proc_dir(self.opts['cachedir'], uid=uid)
if self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[salt.minion.master_event(type='alive')],
proxy=self.proxy)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'run_on_start': True,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
salt.minion.master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
salt.minion.master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(salt.minion.master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(salt.minion.master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(salt.minion.master_event(type='failback'), persist=True)
# proxy keepalive
proxy_alive_fn = fq_proxyname+'.alive'
if (proxy_alive_fn in self.proxy
and 'status.proxy_reconnect' in self.functions
and self.opts.get('proxy_keep_alive', True)):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
self.schedule.add_job({
'__proxy_keepalive':
{
'function': 'status.proxy_reconnect',
'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {
'proxy_name': fq_proxyname
}
}
}, persist=True)
self.schedule.enable_schedule()
else:
self.schedule.delete_job('__proxy_keepalive', persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
def target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
# Need to load the modules so they get all the dunder variables
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
# Pull in the utils
minion_instance.utils = salt.loader.utils(minion_instance.opts)
# Then load the proxy module
minion_instance.proxy = salt.loader.proxy(minion_instance.opts, utils=minion_instance.utils)
# And re-load the modules so the __proxy__ variable gets injected
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
minion_instance.functions.pack['__proxy__'] = minion_instance.proxy
minion_instance.proxy.pack['__salt__'] = minion_instance.functions
minion_instance.proxy.pack['__ret__'] = minion_instance.returners
minion_instance.proxy.pack['__pillar__'] = minion_instance.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy)
minion_instance.proxy.pack['__utils__'] = minion_instance.utils
# Reload all modules so all dunder variables are injected
minion_instance.proxy.reload_modules()
fq_proxyname = opts['proxy']['proxytype']
minion_instance.module_executors = minion_instance.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = minion_instance.proxy[fq_proxyname + '.init']
proxy_init_fn(opts)
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
salt.minion.get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
ProxyMinion._thread_multi_return(minion_instance, opts, data)
else:
ProxyMinion._thread_return(minion_instance, opts, data)
def thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
executors = data.get('module_executors') or \
getattr(minion_instance, 'module_executors', []) or \
opts.get('module_executors', ['direct_call'])
allow_missing_funcs = any([
minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name)
for executor in executors
if '{0}.allow_missing_func' in minion_instance.executors
])
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
if minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
args, kwargs = salt.minion.load_args_and_kwargs(
func,
data['arg'],
data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
args, kwargs = data['arg'], data
minion_instance.functions.pack['__context__']['retcode'] = 0
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo':
executors[-1] = 'sudo' # replace the last one with sudo
log.trace('Executors list %s', executors) # pylint: disable=no-member
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
salt.defaults.exitcodes.EX_OK
)
if retcode == salt.defaults.exitcodes.EX_OK:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(return_data.get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
ret['retcode'] = retcode
ret['success'] = retcode == salt.defaults.exitcodes.EX_OK
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except CommandExecutionError as exc:
log.error(
'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except SaltInvocationError as exc:
log.error(
'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
function_name, exc, func.__doc__ or ''
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
else:
docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret['return'] = docs
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get('return'), six.string_types):
if data['ret']:
data['ret'] = ','.join((data['ret'], opts['return']))
else:
data['ret'] = opts['return']
log.debug('minion return: %s', ret)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
returner_str = '{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(
'Returner %s could not be loaded: %s',
returner_str, returner_err
)
except Exception as exc:
log.exception(
'The return failed for job %s: %s', data['jid'], exc
)
def thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
multifunc_ordered = opts.get('multifunc_ordered', False)
num_funcs = len(data['fun'])
if multifunc_ordered:
ret = {
'return': [None] * num_funcs,
'retcode': [None] * num_funcs,
'success': [False] * num_funcs
}
else:
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret['success'][data['fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = salt.minion.load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
key = ind if multifunc_ordered else data['fun'][ind]
ret['return'][key] = func(*args, **kwargs)
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(ret['return'][key].get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = 1
ret['retcode'][key] = retcode
ret['success'][key] = retcode == 0
except Exception as exc:
trb = traceback.format_exc()
log.warning('The minion function caused an exception: %s', exc)
if multifunc_ordered:
ret['return'][ind] = trb
else:
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job %s: %s',
data['jid'], exc
)
def handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
'Broadcast message received not for this minion, Load: %s',
payload['load']
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matchers['glob_match.match'](load['tgt']):
return False
return True
|
saltstack/salt
|
salt/modules/win_system.py
|
_convert_date_time_string
|
python
|
def _convert_date_time_string(dt_string):
'''
convert string to date time object
'''
dt_string = dt_string.split('.')[0]
dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S')
return dt_obj.strftime('%Y-%m-%d %H:%M:%S')
|
convert string to date time object
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L70-L76
| null |
# -*- coding: utf-8 -*-
'''
Module for managing windows systems.
:depends:
- pywintypes
- win32api
- win32con
- win32net
- wmi
Support for reboot, shutdown, etc
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import ctypes
import logging
import time
import platform
from datetime import datetime
# Import salt libs
import salt.utils.functools
import salt.utils.locales
import salt.utils.platform
import salt.utils.winapi
from salt.exceptions import CommandExecutionError
# Import 3rd-party Libs
from salt.ext import six
try:
import wmi
import win32net
import win32api
import win32con
import pywintypes
from ctypes import windll
HAS_WIN32NET_MODS = True
except ImportError:
HAS_WIN32NET_MODS = False
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
Only works on Windows Systems with Win32 Modules
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_system: Requires Windows'
if not HAS_WIN32NET_MODS:
return False, 'Module win_system: Missing win32 modules'
return __virtualname__
def _convert_minutes_seconds(timeout, in_seconds=False):
'''
convert timeout to seconds
'''
return timeout if in_seconds else timeout*60
def _to_unicode(instr):
'''
Converts from current users character encoding to unicode.
When instr has a value of None, the return value of the function
will also be None.
'''
if instr is None or isinstance(instr, six.text_type):
return instr
else:
return six.text_type(instr, 'utf8')
def halt(timeout=5, in_seconds=False):
'''
Halt a running system.
Args:
timeout (int):
Number of seconds before halting the system. Default is 5 seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.halt 5 True
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel): # pylint: disable=unused-argument
'''
Change the system runlevel on sysV compatible systems. Not applicable to
Windows
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
# cmd = ['init', runlevel]
# ret = __salt__['cmd.run'](cmd, python_shell=False)
# return ret
# TODO: Create a mapping of runlevels to # pylint: disable=fixme
# corresponding Windows actions
return 'Not implemented on Windows at this time.'
def poweroff(timeout=5, in_seconds=False):
'''
Power off a running system.
Args:
timeout (int):
Number of seconds before powering off the system. Default is 5
seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.poweroff 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disable=redefined-outer-name
only_on_pending_reboot=False):
'''
Reboot a running system.
Args:
timeout (int):
The number of minutes/seconds before rebooting the system. Use of
minutes or seconds depends on the value of ``in_seconds``. Default
is 5 minutes.
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
wait_for_reboot (bool)
``True`` will sleep for timeout + 30 seconds after reboot has been
initiated. This is useful for use in a highstate. For example, you
may have states that you want to apply only after the reboot.
Default is ``False``.
.. versionadded:: 2015.8.0
only_on_pending_reboot (bool):
If this is set to ``True``, then the reboot will only proceed
if the system reports a pending reboot. Setting this parameter to
``True`` could be useful when calling this function from a final
housekeeping state intended to be executed at the end of a state run
(using *order: last*). Default is ``False``.
Returns:
bool: ``True`` if successful (a reboot will occur), otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.reboot 5
salt '*' system.reboot 5 True
Invoking this function from a final housekeeping state:
.. code-block:: yaml
final_housekeeping:
module.run:
- name: system.reboot
- only_on_pending_reboot: True
- order: last
'''
ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds,
only_on_pending_reboot=only_on_pending_reboot)
if wait_for_reboot:
seconds = _convert_minutes_seconds(timeout, in_seconds)
time.sleep(seconds + 30)
return ret
def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint: disable=redefined-outer-name
in_seconds=False, only_on_pending_reboot=False):
'''
Shutdown a running system.
Args:
message (str):
The message to display to the user before shutting down.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the
user who called the function, the message specified by the lpMessage
parameter, and prompts the user to log off. The dialog box beeps
when it is created and remains on top of other windows (system
modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down immediately without
displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
force_close (bool):
``True`` will force close all open applications. ``False`` will
display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool):
``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown
state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns:
bool:
``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown "System will shutdown in 5 minutes"
'''
if six.PY2:
message = _to_unicode(message)
timeout = _convert_minutes_seconds(timeout, in_seconds)
if only_on_pending_reboot and not get_pending_reboot():
return False
if message and not isinstance(message, six.string_types):
message = message.decode('utf-8')
try:
win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to shutdown the system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def shutdown_hard():
'''
Shutdown a running system with no timeout or warning.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown_hard
'''
return shutdown(timeout=0)
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def lock():
'''
Lock the workstation.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.lock
'''
return windll.user32.LockWorkStation()
def set_computer_name(name):
'''
Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
'''
if six.PY2:
name = _to_unicode(name)
if windll.kernel32.SetComputerNameExW(
win32con.ComputerNamePhysicalDnsHostname, name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
pending = __utils__['reg.read_value'](
'HKLM',
r'SYSTEM\CurrentControlSet\Services\Tcpip\Parameters',
'NV Hostname')['vdata']
if pending:
return pending if pending != current else None
return False
def get_computer_name():
'''
Get the Windows computer name
Returns:
str: Returns the computer name if found. Otherwise returns ``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_name
'''
name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)
return name if name else False
def set_computer_desc(desc=None):
'''
Set the Windows computer description
Args:
desc (str):
The computer description
Returns:
str: Description if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
'''
if six.PY2:
desc = _to_unicode(desc)
# Make sure the system exists
# Return an object containing current information array for the computer
system_info = win32net.NetServerGetInfo(None, 101)
# If desc is passed, decode it for unicode
if desc is None:
return False
system_info['comment'] = desc
# Apply new settings
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc.args
log.error('Failed to update system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
return {'Computer Description': get_computer_desc()}
set_computer_description = salt.utils.functools.alias_function(set_computer_desc, 'set_computer_description') # pylint: disable=invalid-name
def get_system_info():
'''
Get system information.
Returns:
dict: Dictionary containing information about the system to include
name, description, version, etc...
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_info
'''
def byte_calc(val):
val = float(val)
if val < 2**10:
return '{0:.3f}B'.format(val)
elif val < 2**20:
return '{0:.3f}KB'.format(val / 2**10)
elif val < 2**30:
return '{0:.3f}MB'.format(val / 2**20)
elif val < 2**40:
return '{0:.3f}GB'.format(val / 2**30)
else:
return '{0:.3f}TB'.format(val / 2**40)
# Lookup dicts for Win32_OperatingSystem
os_type = {1: 'Work Station',
2: 'Domain Controller',
3: 'Server'}
# lookup dicts for Win32_ComputerSystem
domain_role = {0: 'Standalone Workstation',
1: 'Member Workstation',
2: 'Standalone Server',
3: 'Member Server',
4: 'Backup Domain Controller',
5: 'Primary Domain Controller'}
warning_states = {1: 'Other',
2: 'Unknown',
3: 'Safe',
4: 'Warning',
5: 'Critical',
6: 'Non-recoverable'}
pc_system_types = {0: 'Unspecified',
1: 'Desktop',
2: 'Mobile',
3: 'Workstation',
4: 'Enterprise Server',
5: 'SOHO Server',
6: 'Appliance PC',
7: 'Performance Server',
8: 'Maximum'}
# Connect to WMI
with salt.utils.winapi.Com():
conn = wmi.WMI()
system = conn.Win32_OperatingSystem()[0]
ret = {'name': get_computer_name(),
'description': system.Description,
'install_date': system.InstallDate,
'last_boot': system.LastBootUpTime,
'os_manufacturer': system.Manufacturer,
'os_name': system.Caption,
'users': system.NumberOfUsers,
'organization': system.Organization,
'os_architecture': system.OSArchitecture,
'primary': system.Primary,
'os_type': os_type[system.ProductType],
'registered_user': system.RegisteredUser,
'system_directory': system.SystemDirectory,
'system_drive': system.SystemDrive,
'os_version': system.Version,
'windows_directory': system.WindowsDirectory}
system = conn.Win32_ComputerSystem()[0]
# Get pc_system_type depending on Windows version
if platform.release() in ['Vista', '7', '8']:
# Types for Vista, 7, and 8
pc_system_type = pc_system_types[system.PCSystemType]
else:
# New types were added with 8.1 and newer
pc_system_types.update({8: 'Slate', 9: 'Maximum'})
pc_system_type = pc_system_types[system.PCSystemType]
ret.update({
'bootup_state': system.BootupState,
'caption': system.Caption,
'chassis_bootup_state': warning_states[system.ChassisBootupState],
'chassis_sku_number': system.ChassisSKUNumber,
'dns_hostname': system.DNSHostname,
'domain': system.Domain,
'domain_role': domain_role[system.DomainRole],
'hardware_manufacturer': system.Manufacturer,
'hardware_model': system.Model,
'network_server_mode_enabled': system.NetworkServerModeEnabled,
'part_of_domain': system.PartOfDomain,
'pc_system_type': pc_system_type,
'power_state': system.PowerState,
'status': system.Status,
'system_type': system.SystemType,
'total_physical_memory': byte_calc(system.TotalPhysicalMemory),
'total_physical_memory_raw': system.TotalPhysicalMemory,
'thermal_state': warning_states[system.ThermalState],
'workgroup': system.Workgroup
})
# Get processor information
processors = conn.Win32_Processor()
ret['processors'] = 0
ret['processors_logical'] = 0
ret['processor_cores'] = 0
ret['processor_cores_enabled'] = 0
ret['processor_manufacturer'] = processors[0].Manufacturer
ret['processor_max_clock_speed'] = six.text_type(processors[0].MaxClockSpeed) + 'MHz'
for processor in processors:
ret['processors'] += 1
ret['processors_logical'] += processor.NumberOfLogicalProcessors
ret['processor_cores'] += processor.NumberOfCores
ret['processor_cores_enabled'] += processor.NumberOfEnabledCore
bios = conn.Win32_BIOS()[0]
ret.update({'hardware_serial': bios.SerialNumber,
'bios_manufacturer': bios.Manufacturer,
'bios_version': bios.Version,
'bios_details': bios.BIOSVersion,
'bios_caption': bios.Caption,
'bios_description': bios.Description})
ret['install_date'] = _convert_date_time_string(ret['install_date'])
ret['last_boot'] = _convert_date_time_string(ret['last_boot'])
return ret
def get_computer_desc():
'''
Get the Windows computer description
Returns:
str: Returns the computer description if found. Otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_desc
'''
desc = get_system_info()['description']
return False if desc is None else desc
get_computer_description = salt.utils.functools.alias_function(get_computer_desc, 'get_computer_description') # pylint: disable=invalid-name
def get_hostname():
'''
Get the hostname of the windows minion
.. versionadded:: 2016.3.0
Returns:
str: Returns the hostname of the windows minion
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_hostname
'''
cmd = 'hostname'
ret = __salt__['cmd.run'](cmd=cmd)
return ret
def set_hostname(hostname):
'''
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.Rename(Name=hostname)
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``user@domain.tld`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
if six.PY2:
domain = _to_unicode(domain)
username = _to_unicode(username)
password = _to_unicode(password)
account_ou = _to_unicode(account_ou)
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, six.string_types):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
err = _join_domain(domain=domain, username=username, password=password,
account_ou=account_ou, account_exists=account_exists)
if not err:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
raise CommandExecutionError(win32api.FormatMessage(err).rstrip())
def _join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False):
'''
Helper function to join the domain.
Args:
domain (str): The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``user@domain.tld`` or simply ``user``
password (str): Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
Returns:
int:
:param domain:
:param username:
:param password:
:param account_ou:
:param account_exists:
:return:
'''
NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name
NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name
NETSETUP_JOIN_WITH_NEW_NAME = 0x400 # pylint: disable=invalid-name
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
join_options |= NETSETUP_JOIN_WITH_NEW_NAME
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Return the results of the command as an error
# JoinDomainOrWorkgroup returns a strangely formatted value that looks like
# (0,) so return the first item
return comp.JoinDomainOrWorkgroup(
Name=domain, Password=password, UserName=username, AccountOU=account_ou,
FJoinOptions=join_options)[0]
def unjoin_domain(username=None,
password=None,
domain=None,
workgroup='WORKGROUP',
disable=False,
restart=False):
# pylint: disable=anomalous-backslash-in-string
'''
Unjoin a computer from an Active Directory Domain. Requires a restart.
Args:
username (str):
Username of an account which is authorized to manage computer
accounts on the domain. Needs to be a fully qualified name like
``user@domain.tld`` or ``domain.tld\\user``. If the domain is not
specified, the passed domain will be used. If the computer account
doesn't need to be disabled after the computer is unjoined, this can
be ``None``.
password (str):
The password of the specified user
domain (str):
The domain from which to unjoin the computer. Can be ``None``
workgroup (str):
The workgroup to join the computer to. Default is ``WORKGROUP``
.. versionadded:: 2015.8.2/2015.5.7
disable (bool):
``True`` to disable the computer account in Active Directory.
Default is ``False``
restart (bool):
``True`` will restart the computer after successful unjoin. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.unjoin_domain restart=True
salt 'minion-id' system.unjoin_domain username='unjoinuser' \\
password='unjoinpassword' disable=True \\
restart=True
'''
# pylint: enable=anomalous-backslash-in-string
if six.PY2:
username = _to_unicode(username)
password = _to_unicode(password)
domain = _to_unicode(domain)
status = get_domain_workgroup()
if 'Workgroup' in status:
if status['Workgroup'] == workgroup:
return 'Already joined to {0}'.format(workgroup)
if username and '\\' not in username and '@' not in username:
if domain:
username = '{0}@{1}'.format(username, domain)
else:
return 'Must specify domain if not supplied in username'
if username and password is None:
return 'Must specify a password if you pass a username'
NETSETUP_ACCT_DELETE = 0x4 # pylint: disable=invalid-name
unjoin_options = 0x0
if disable:
unjoin_options |= NETSETUP_ACCT_DELETE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
err = comp.UnjoinDomainOrWorkgroup(Password=password,
UserName=username,
FUnjoinOptions=unjoin_options)
# you have to do this because UnjoinDomainOrWorkgroup returns a
# strangely formatted value that looks like (0,)
if not err[0]:
err = comp.JoinDomainOrWorkgroup(Name=workgroup)
if not err[0]:
ret = {'Workgroup': workgroup,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to join the computer to %s', workgroup)
return False
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to unjoin computer from %s', status['Domain'])
return False
def get_domain_workgroup():
'''
Get the domain or workgroup the computer belongs to.
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
Returns:
str: The name of the domain or workgroup
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_domain_workgroup
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
for computer in conn.Win32_ComputerSystem():
if computer.PartOfDomain:
return {'Domain': computer.Domain}
else:
return {'Workgroup': computer.Workgroup}
def set_domain_workgroup(workgroup):
'''
Set the domain or workgroup the computer belongs to.
.. versionadded:: 2019.2.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_domain_workgroup LOCAL
'''
if six.PY2:
workgroup = _to_unicode(workgroup)
# Initialize COM
with salt.utils.winapi.Com():
# Grab the first Win32_ComputerSystem object from wmi
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Now we can join the new workgroup
res = comp.JoinDomainOrWorkgroup(Name=workgroup.upper())
return True if not res[0] else False
def _try_parse_datetime(time_str, fmts):
'''
A helper function that attempts to parse the input time_str as a date.
Args:
time_str (str): A string representing the time
fmts (list): A list of date format strings
Returns:
datetime: Returns a datetime object if parsed properly, otherwise None
'''
result = None
for fmt in fmts:
try:
result = datetime.strptime(time_str, fmt)
break
except ValueError:
pass
return result
def get_system_time():
'''
Get the system time.
Returns:
str: Returns the system time in HH:MM:SS AM/PM format.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_time
'''
now = win32api.GetLocalTime()
meridian = 'AM'
hours = int(now[4])
if hours == 12:
meridian = 'PM'
elif hours == 0:
hours = 12
elif hours > 12:
hours = hours - 12
meridian = 'PM'
return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)
def set_system_time(newtime):
'''
Set the system time.
Args:
newtime (str):
The time to set. Can be any of the following formats:
- HH:MM:SS AM/PM
- HH:MM AM/PM
- HH:MM:SS (24 hour)
- HH:MM (24 hour)
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_system_time 12:01
'''
# Get date/time object from newtime
fmts = ['%I:%M:%S %p', '%I:%M %p', '%H:%M:%S', '%H:%M']
dt_obj = _try_parse_datetime(newtime, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(hours=dt_obj.hour,
minutes=dt_obj.minute,
seconds=dt_obj.second)
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc.args
log.error('Failed to get local time')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
# Check for passed values. If not passed, use current values
if years is None:
years = date_time[0]
if months is None:
months = date_time[1]
if days is None:
days = date_time[3]
if hours is None:
hours = date_time[4]
if minutes is None:
minutes = date_time[5]
if seconds is None:
seconds = date_time[6]
try:
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', ctypes.c_int16),
('wMonth', ctypes.c_int16),
('wDayOfWeek', ctypes.c_int16),
('wDay', ctypes.c_int16),
('wHour', ctypes.c_int16),
('wMinute', ctypes.c_int16),
('wSecond', ctypes.c_int16),
('wMilliseconds', ctypes.c_int16)]
system_time = SYSTEMTIME()
system_time.wYear = int(years)
system_time.wMonth = int(months)
system_time.wDay = int(days)
system_time.wHour = int(hours)
system_time.wMinute = int(minutes)
system_time.wSecond = int(seconds)
system_time_ptr = ctypes.pointer(system_time)
succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr)
if succeeded is not 0:
return True
else:
log.error('Failed to set local time')
raise CommandExecutionError(
win32api.FormatMessage(succeeded).rstrip())
except OSError as err:
log.error('Failed to set local time')
raise CommandExecutionError(err)
def get_system_date():
'''
Get the Windows system date
Returns:
str: Returns the system date
CLI Example:
.. code-block:: bash
salt '*' system.get_system_date
'''
now = win32api.GetLocalTime()
return '{0:02d}/{1:02d}/{2:04d}'.format(now[1], now[3], now[0])
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day)
def start_time_service():
'''
Start the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.start_time_service
'''
return __salt__['service.start']('w32time')
def stop_time_service():
'''
Stop the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.stop_time_service
'''
return __salt__['service.stop']('w32time')
def get_pending_component_servicing():
'''
Determine whether there are pending Component Based Servicing tasks that
require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Component Based Servicing tasks,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_component_servicing
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing\RebootPending'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
def get_pending_domain_join():
'''
Determine whether there is a pending domain join action that requires a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there is a pending domain join action, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_domain_join
'''
base_key = r'SYSTEM\CurrentControlSet\Services\Netlogon'
avoid_key = r'{0}\AvoidSpnSet'.format(base_key)
join_key = r'{0}\JoinDomain'.format(base_key)
# If either the avoid_key or join_key is present,
# then there is a reboot pending.
if __utils__['reg.key_exists']('HKLM', avoid_key):
log.debug('Key exists: %s', avoid_key)
return True
else:
log.debug('Key does not exist: %s', avoid_key)
if __utils__['reg.key_exists']('HKLM', join_key):
log.debug('Key exists: %s', join_key)
return True
else:
log.debug('Key does not exist: %s', join_key)
return False
def get_pending_file_rename():
'''
Determine whether there are pending file rename operations that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending file rename operations, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_file_rename
'''
vnames = ('PendingFileRenameOperations', 'PendingFileRenameOperations2')
key = r'SYSTEM\CurrentControlSet\Control\Session Manager'
# If any of the value names exist and have value data set,
# then a reboot is pending.
for vname in vnames:
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
if reg_ret['vdata'] and (reg_ret['vdata'] != '(value not set)'):
return True
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_servermanager():
'''
Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager
'''
vname = 'CurrentRebootAttempts'
key = r'SOFTWARE\Microsoft\ServerManager'
# There are situations where it's possible to have '(value not set)' as
# the value data, and since an actual reboot won't be pending in that
# instance, just catch instances where we try unsuccessfully to cast as int.
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
try:
if int(reg_ret['vdata']) > 0:
return True
except ValueError:
pass
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_update():
'''
Determine whether there are pending updates that require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending updates, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_update
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\RebootRequired'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
MINION_VOLATILE_KEY = r'SYSTEM\CurrentControlSet\Services\salt-minion\Volatile-Data'
REBOOT_REQUIRED_NAME = 'Reboot required'
def set_reboot_required_witnessed():
r'''
This function is used to remember that an event indicating that a reboot is
required was witnessed. This function relies on the salt-minion's ability to
create the following volatile registry key in the *HKLM* hive:
*SYSTEM\\CurrentControlSet\\Services\\salt-minion\\Volatile-Data*
Because this registry key is volatile, it will not persist beyond the
current boot session. Also, in the scope of this key, the name *'Reboot
required'* will be assigned the value of *1*.
For the time being, this function is being used whenever an install
completes with exit code 3010 and can be extended where appropriate in the
future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_reboot_required_witnessed
'''
return __utils__['reg.set_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
volatile=True,
vname=REBOOT_REQUIRED_NAME,
vdata=1,
vtype='REG_DWORD')
def get_reboot_required_witnessed():
'''
Determine if at any time during the current boot session the salt minion
witnessed an event indicating that a reboot is required.
This function will return ``True`` if an install completed with exit
code 3010 during the current boot session and can be extended where
appropriate in the future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the ``Requires reboot`` registry flag is set to ``1``,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_reboot_required_witnessed
'''
value_dict = __utils__['reg.read_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
vname=REBOOT_REQUIRED_NAME)
return value_dict['vdata'] == 1
def get_pending_reboot():
'''
Determine whether there is a reboot pending.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the system is pending reboot, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_reboot
'''
# Order the checks for reboot pending in most to least likely.
checks = (get_pending_update,
get_pending_file_rename,
get_pending_servermanager,
get_pending_component_servicing,
get_reboot_required_witnessed,
get_pending_computer_name,
get_pending_domain_join)
for check in checks:
if check():
return True
return False
|
saltstack/salt
|
salt/modules/win_system.py
|
_to_unicode
|
python
|
def _to_unicode(instr):
'''
Converts from current users character encoding to unicode.
When instr has a value of None, the return value of the function
will also be None.
'''
if instr is None or isinstance(instr, six.text_type):
return instr
else:
return six.text_type(instr, 'utf8')
|
Converts from current users character encoding to unicode.
When instr has a value of None, the return value of the function
will also be None.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L79-L88
| null |
# -*- coding: utf-8 -*-
'''
Module for managing windows systems.
:depends:
- pywintypes
- win32api
- win32con
- win32net
- wmi
Support for reboot, shutdown, etc
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import ctypes
import logging
import time
import platform
from datetime import datetime
# Import salt libs
import salt.utils.functools
import salt.utils.locales
import salt.utils.platform
import salt.utils.winapi
from salt.exceptions import CommandExecutionError
# Import 3rd-party Libs
from salt.ext import six
try:
import wmi
import win32net
import win32api
import win32con
import pywintypes
from ctypes import windll
HAS_WIN32NET_MODS = True
except ImportError:
HAS_WIN32NET_MODS = False
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
Only works on Windows Systems with Win32 Modules
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_system: Requires Windows'
if not HAS_WIN32NET_MODS:
return False, 'Module win_system: Missing win32 modules'
return __virtualname__
def _convert_minutes_seconds(timeout, in_seconds=False):
'''
convert timeout to seconds
'''
return timeout if in_seconds else timeout*60
def _convert_date_time_string(dt_string):
'''
convert string to date time object
'''
dt_string = dt_string.split('.')[0]
dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S')
return dt_obj.strftime('%Y-%m-%d %H:%M:%S')
def halt(timeout=5, in_seconds=False):
'''
Halt a running system.
Args:
timeout (int):
Number of seconds before halting the system. Default is 5 seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.halt 5 True
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel): # pylint: disable=unused-argument
'''
Change the system runlevel on sysV compatible systems. Not applicable to
Windows
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
# cmd = ['init', runlevel]
# ret = __salt__['cmd.run'](cmd, python_shell=False)
# return ret
# TODO: Create a mapping of runlevels to # pylint: disable=fixme
# corresponding Windows actions
return 'Not implemented on Windows at this time.'
def poweroff(timeout=5, in_seconds=False):
'''
Power off a running system.
Args:
timeout (int):
Number of seconds before powering off the system. Default is 5
seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.poweroff 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disable=redefined-outer-name
only_on_pending_reboot=False):
'''
Reboot a running system.
Args:
timeout (int):
The number of minutes/seconds before rebooting the system. Use of
minutes or seconds depends on the value of ``in_seconds``. Default
is 5 minutes.
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
wait_for_reboot (bool)
``True`` will sleep for timeout + 30 seconds after reboot has been
initiated. This is useful for use in a highstate. For example, you
may have states that you want to apply only after the reboot.
Default is ``False``.
.. versionadded:: 2015.8.0
only_on_pending_reboot (bool):
If this is set to ``True``, then the reboot will only proceed
if the system reports a pending reboot. Setting this parameter to
``True`` could be useful when calling this function from a final
housekeeping state intended to be executed at the end of a state run
(using *order: last*). Default is ``False``.
Returns:
bool: ``True`` if successful (a reboot will occur), otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.reboot 5
salt '*' system.reboot 5 True
Invoking this function from a final housekeeping state:
.. code-block:: yaml
final_housekeeping:
module.run:
- name: system.reboot
- only_on_pending_reboot: True
- order: last
'''
ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds,
only_on_pending_reboot=only_on_pending_reboot)
if wait_for_reboot:
seconds = _convert_minutes_seconds(timeout, in_seconds)
time.sleep(seconds + 30)
return ret
def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint: disable=redefined-outer-name
in_seconds=False, only_on_pending_reboot=False):
'''
Shutdown a running system.
Args:
message (str):
The message to display to the user before shutting down.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the
user who called the function, the message specified by the lpMessage
parameter, and prompts the user to log off. The dialog box beeps
when it is created and remains on top of other windows (system
modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down immediately without
displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
force_close (bool):
``True`` will force close all open applications. ``False`` will
display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool):
``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown
state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns:
bool:
``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown "System will shutdown in 5 minutes"
'''
if six.PY2:
message = _to_unicode(message)
timeout = _convert_minutes_seconds(timeout, in_seconds)
if only_on_pending_reboot and not get_pending_reboot():
return False
if message and not isinstance(message, six.string_types):
message = message.decode('utf-8')
try:
win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to shutdown the system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def shutdown_hard():
'''
Shutdown a running system with no timeout or warning.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown_hard
'''
return shutdown(timeout=0)
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def lock():
'''
Lock the workstation.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.lock
'''
return windll.user32.LockWorkStation()
def set_computer_name(name):
'''
Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
'''
if six.PY2:
name = _to_unicode(name)
if windll.kernel32.SetComputerNameExW(
win32con.ComputerNamePhysicalDnsHostname, name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
pending = __utils__['reg.read_value'](
'HKLM',
r'SYSTEM\CurrentControlSet\Services\Tcpip\Parameters',
'NV Hostname')['vdata']
if pending:
return pending if pending != current else None
return False
def get_computer_name():
'''
Get the Windows computer name
Returns:
str: Returns the computer name if found. Otherwise returns ``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_name
'''
name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)
return name if name else False
def set_computer_desc(desc=None):
'''
Set the Windows computer description
Args:
desc (str):
The computer description
Returns:
str: Description if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
'''
if six.PY2:
desc = _to_unicode(desc)
# Make sure the system exists
# Return an object containing current information array for the computer
system_info = win32net.NetServerGetInfo(None, 101)
# If desc is passed, decode it for unicode
if desc is None:
return False
system_info['comment'] = desc
# Apply new settings
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc.args
log.error('Failed to update system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
return {'Computer Description': get_computer_desc()}
set_computer_description = salt.utils.functools.alias_function(set_computer_desc, 'set_computer_description') # pylint: disable=invalid-name
def get_system_info():
'''
Get system information.
Returns:
dict: Dictionary containing information about the system to include
name, description, version, etc...
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_info
'''
def byte_calc(val):
val = float(val)
if val < 2**10:
return '{0:.3f}B'.format(val)
elif val < 2**20:
return '{0:.3f}KB'.format(val / 2**10)
elif val < 2**30:
return '{0:.3f}MB'.format(val / 2**20)
elif val < 2**40:
return '{0:.3f}GB'.format(val / 2**30)
else:
return '{0:.3f}TB'.format(val / 2**40)
# Lookup dicts for Win32_OperatingSystem
os_type = {1: 'Work Station',
2: 'Domain Controller',
3: 'Server'}
# lookup dicts for Win32_ComputerSystem
domain_role = {0: 'Standalone Workstation',
1: 'Member Workstation',
2: 'Standalone Server',
3: 'Member Server',
4: 'Backup Domain Controller',
5: 'Primary Domain Controller'}
warning_states = {1: 'Other',
2: 'Unknown',
3: 'Safe',
4: 'Warning',
5: 'Critical',
6: 'Non-recoverable'}
pc_system_types = {0: 'Unspecified',
1: 'Desktop',
2: 'Mobile',
3: 'Workstation',
4: 'Enterprise Server',
5: 'SOHO Server',
6: 'Appliance PC',
7: 'Performance Server',
8: 'Maximum'}
# Connect to WMI
with salt.utils.winapi.Com():
conn = wmi.WMI()
system = conn.Win32_OperatingSystem()[0]
ret = {'name': get_computer_name(),
'description': system.Description,
'install_date': system.InstallDate,
'last_boot': system.LastBootUpTime,
'os_manufacturer': system.Manufacturer,
'os_name': system.Caption,
'users': system.NumberOfUsers,
'organization': system.Organization,
'os_architecture': system.OSArchitecture,
'primary': system.Primary,
'os_type': os_type[system.ProductType],
'registered_user': system.RegisteredUser,
'system_directory': system.SystemDirectory,
'system_drive': system.SystemDrive,
'os_version': system.Version,
'windows_directory': system.WindowsDirectory}
system = conn.Win32_ComputerSystem()[0]
# Get pc_system_type depending on Windows version
if platform.release() in ['Vista', '7', '8']:
# Types for Vista, 7, and 8
pc_system_type = pc_system_types[system.PCSystemType]
else:
# New types were added with 8.1 and newer
pc_system_types.update({8: 'Slate', 9: 'Maximum'})
pc_system_type = pc_system_types[system.PCSystemType]
ret.update({
'bootup_state': system.BootupState,
'caption': system.Caption,
'chassis_bootup_state': warning_states[system.ChassisBootupState],
'chassis_sku_number': system.ChassisSKUNumber,
'dns_hostname': system.DNSHostname,
'domain': system.Domain,
'domain_role': domain_role[system.DomainRole],
'hardware_manufacturer': system.Manufacturer,
'hardware_model': system.Model,
'network_server_mode_enabled': system.NetworkServerModeEnabled,
'part_of_domain': system.PartOfDomain,
'pc_system_type': pc_system_type,
'power_state': system.PowerState,
'status': system.Status,
'system_type': system.SystemType,
'total_physical_memory': byte_calc(system.TotalPhysicalMemory),
'total_physical_memory_raw': system.TotalPhysicalMemory,
'thermal_state': warning_states[system.ThermalState],
'workgroup': system.Workgroup
})
# Get processor information
processors = conn.Win32_Processor()
ret['processors'] = 0
ret['processors_logical'] = 0
ret['processor_cores'] = 0
ret['processor_cores_enabled'] = 0
ret['processor_manufacturer'] = processors[0].Manufacturer
ret['processor_max_clock_speed'] = six.text_type(processors[0].MaxClockSpeed) + 'MHz'
for processor in processors:
ret['processors'] += 1
ret['processors_logical'] += processor.NumberOfLogicalProcessors
ret['processor_cores'] += processor.NumberOfCores
ret['processor_cores_enabled'] += processor.NumberOfEnabledCore
bios = conn.Win32_BIOS()[0]
ret.update({'hardware_serial': bios.SerialNumber,
'bios_manufacturer': bios.Manufacturer,
'bios_version': bios.Version,
'bios_details': bios.BIOSVersion,
'bios_caption': bios.Caption,
'bios_description': bios.Description})
ret['install_date'] = _convert_date_time_string(ret['install_date'])
ret['last_boot'] = _convert_date_time_string(ret['last_boot'])
return ret
def get_computer_desc():
'''
Get the Windows computer description
Returns:
str: Returns the computer description if found. Otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_desc
'''
desc = get_system_info()['description']
return False if desc is None else desc
get_computer_description = salt.utils.functools.alias_function(get_computer_desc, 'get_computer_description') # pylint: disable=invalid-name
def get_hostname():
'''
Get the hostname of the windows minion
.. versionadded:: 2016.3.0
Returns:
str: Returns the hostname of the windows minion
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_hostname
'''
cmd = 'hostname'
ret = __salt__['cmd.run'](cmd=cmd)
return ret
def set_hostname(hostname):
'''
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.Rename(Name=hostname)
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``user@domain.tld`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
if six.PY2:
domain = _to_unicode(domain)
username = _to_unicode(username)
password = _to_unicode(password)
account_ou = _to_unicode(account_ou)
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, six.string_types):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
err = _join_domain(domain=domain, username=username, password=password,
account_ou=account_ou, account_exists=account_exists)
if not err:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
raise CommandExecutionError(win32api.FormatMessage(err).rstrip())
def _join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False):
'''
Helper function to join the domain.
Args:
domain (str): The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``user@domain.tld`` or simply ``user``
password (str): Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
Returns:
int:
:param domain:
:param username:
:param password:
:param account_ou:
:param account_exists:
:return:
'''
NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name
NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name
NETSETUP_JOIN_WITH_NEW_NAME = 0x400 # pylint: disable=invalid-name
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
join_options |= NETSETUP_JOIN_WITH_NEW_NAME
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Return the results of the command as an error
# JoinDomainOrWorkgroup returns a strangely formatted value that looks like
# (0,) so return the first item
return comp.JoinDomainOrWorkgroup(
Name=domain, Password=password, UserName=username, AccountOU=account_ou,
FJoinOptions=join_options)[0]
def unjoin_domain(username=None,
password=None,
domain=None,
workgroup='WORKGROUP',
disable=False,
restart=False):
# pylint: disable=anomalous-backslash-in-string
'''
Unjoin a computer from an Active Directory Domain. Requires a restart.
Args:
username (str):
Username of an account which is authorized to manage computer
accounts on the domain. Needs to be a fully qualified name like
``user@domain.tld`` or ``domain.tld\\user``. If the domain is not
specified, the passed domain will be used. If the computer account
doesn't need to be disabled after the computer is unjoined, this can
be ``None``.
password (str):
The password of the specified user
domain (str):
The domain from which to unjoin the computer. Can be ``None``
workgroup (str):
The workgroup to join the computer to. Default is ``WORKGROUP``
.. versionadded:: 2015.8.2/2015.5.7
disable (bool):
``True`` to disable the computer account in Active Directory.
Default is ``False``
restart (bool):
``True`` will restart the computer after successful unjoin. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.unjoin_domain restart=True
salt 'minion-id' system.unjoin_domain username='unjoinuser' \\
password='unjoinpassword' disable=True \\
restart=True
'''
# pylint: enable=anomalous-backslash-in-string
if six.PY2:
username = _to_unicode(username)
password = _to_unicode(password)
domain = _to_unicode(domain)
status = get_domain_workgroup()
if 'Workgroup' in status:
if status['Workgroup'] == workgroup:
return 'Already joined to {0}'.format(workgroup)
if username and '\\' not in username and '@' not in username:
if domain:
username = '{0}@{1}'.format(username, domain)
else:
return 'Must specify domain if not supplied in username'
if username and password is None:
return 'Must specify a password if you pass a username'
NETSETUP_ACCT_DELETE = 0x4 # pylint: disable=invalid-name
unjoin_options = 0x0
if disable:
unjoin_options |= NETSETUP_ACCT_DELETE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
err = comp.UnjoinDomainOrWorkgroup(Password=password,
UserName=username,
FUnjoinOptions=unjoin_options)
# you have to do this because UnjoinDomainOrWorkgroup returns a
# strangely formatted value that looks like (0,)
if not err[0]:
err = comp.JoinDomainOrWorkgroup(Name=workgroup)
if not err[0]:
ret = {'Workgroup': workgroup,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to join the computer to %s', workgroup)
return False
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to unjoin computer from %s', status['Domain'])
return False
def get_domain_workgroup():
'''
Get the domain or workgroup the computer belongs to.
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
Returns:
str: The name of the domain or workgroup
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_domain_workgroup
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
for computer in conn.Win32_ComputerSystem():
if computer.PartOfDomain:
return {'Domain': computer.Domain}
else:
return {'Workgroup': computer.Workgroup}
def set_domain_workgroup(workgroup):
'''
Set the domain or workgroup the computer belongs to.
.. versionadded:: 2019.2.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_domain_workgroup LOCAL
'''
if six.PY2:
workgroup = _to_unicode(workgroup)
# Initialize COM
with salt.utils.winapi.Com():
# Grab the first Win32_ComputerSystem object from wmi
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Now we can join the new workgroup
res = comp.JoinDomainOrWorkgroup(Name=workgroup.upper())
return True if not res[0] else False
def _try_parse_datetime(time_str, fmts):
'''
A helper function that attempts to parse the input time_str as a date.
Args:
time_str (str): A string representing the time
fmts (list): A list of date format strings
Returns:
datetime: Returns a datetime object if parsed properly, otherwise None
'''
result = None
for fmt in fmts:
try:
result = datetime.strptime(time_str, fmt)
break
except ValueError:
pass
return result
def get_system_time():
'''
Get the system time.
Returns:
str: Returns the system time in HH:MM:SS AM/PM format.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_time
'''
now = win32api.GetLocalTime()
meridian = 'AM'
hours = int(now[4])
if hours == 12:
meridian = 'PM'
elif hours == 0:
hours = 12
elif hours > 12:
hours = hours - 12
meridian = 'PM'
return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)
def set_system_time(newtime):
'''
Set the system time.
Args:
newtime (str):
The time to set. Can be any of the following formats:
- HH:MM:SS AM/PM
- HH:MM AM/PM
- HH:MM:SS (24 hour)
- HH:MM (24 hour)
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_system_time 12:01
'''
# Get date/time object from newtime
fmts = ['%I:%M:%S %p', '%I:%M %p', '%H:%M:%S', '%H:%M']
dt_obj = _try_parse_datetime(newtime, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(hours=dt_obj.hour,
minutes=dt_obj.minute,
seconds=dt_obj.second)
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc.args
log.error('Failed to get local time')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
# Check for passed values. If not passed, use current values
if years is None:
years = date_time[0]
if months is None:
months = date_time[1]
if days is None:
days = date_time[3]
if hours is None:
hours = date_time[4]
if minutes is None:
minutes = date_time[5]
if seconds is None:
seconds = date_time[6]
try:
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', ctypes.c_int16),
('wMonth', ctypes.c_int16),
('wDayOfWeek', ctypes.c_int16),
('wDay', ctypes.c_int16),
('wHour', ctypes.c_int16),
('wMinute', ctypes.c_int16),
('wSecond', ctypes.c_int16),
('wMilliseconds', ctypes.c_int16)]
system_time = SYSTEMTIME()
system_time.wYear = int(years)
system_time.wMonth = int(months)
system_time.wDay = int(days)
system_time.wHour = int(hours)
system_time.wMinute = int(minutes)
system_time.wSecond = int(seconds)
system_time_ptr = ctypes.pointer(system_time)
succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr)
if succeeded is not 0:
return True
else:
log.error('Failed to set local time')
raise CommandExecutionError(
win32api.FormatMessage(succeeded).rstrip())
except OSError as err:
log.error('Failed to set local time')
raise CommandExecutionError(err)
def get_system_date():
'''
Get the Windows system date
Returns:
str: Returns the system date
CLI Example:
.. code-block:: bash
salt '*' system.get_system_date
'''
now = win32api.GetLocalTime()
return '{0:02d}/{1:02d}/{2:04d}'.format(now[1], now[3], now[0])
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day)
def start_time_service():
'''
Start the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.start_time_service
'''
return __salt__['service.start']('w32time')
def stop_time_service():
'''
Stop the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.stop_time_service
'''
return __salt__['service.stop']('w32time')
def get_pending_component_servicing():
'''
Determine whether there are pending Component Based Servicing tasks that
require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Component Based Servicing tasks,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_component_servicing
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing\RebootPending'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
def get_pending_domain_join():
'''
Determine whether there is a pending domain join action that requires a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there is a pending domain join action, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_domain_join
'''
base_key = r'SYSTEM\CurrentControlSet\Services\Netlogon'
avoid_key = r'{0}\AvoidSpnSet'.format(base_key)
join_key = r'{0}\JoinDomain'.format(base_key)
# If either the avoid_key or join_key is present,
# then there is a reboot pending.
if __utils__['reg.key_exists']('HKLM', avoid_key):
log.debug('Key exists: %s', avoid_key)
return True
else:
log.debug('Key does not exist: %s', avoid_key)
if __utils__['reg.key_exists']('HKLM', join_key):
log.debug('Key exists: %s', join_key)
return True
else:
log.debug('Key does not exist: %s', join_key)
return False
def get_pending_file_rename():
'''
Determine whether there are pending file rename operations that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending file rename operations, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_file_rename
'''
vnames = ('PendingFileRenameOperations', 'PendingFileRenameOperations2')
key = r'SYSTEM\CurrentControlSet\Control\Session Manager'
# If any of the value names exist and have value data set,
# then a reboot is pending.
for vname in vnames:
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
if reg_ret['vdata'] and (reg_ret['vdata'] != '(value not set)'):
return True
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_servermanager():
'''
Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager
'''
vname = 'CurrentRebootAttempts'
key = r'SOFTWARE\Microsoft\ServerManager'
# There are situations where it's possible to have '(value not set)' as
# the value data, and since an actual reboot won't be pending in that
# instance, just catch instances where we try unsuccessfully to cast as int.
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
try:
if int(reg_ret['vdata']) > 0:
return True
except ValueError:
pass
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_update():
'''
Determine whether there are pending updates that require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending updates, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_update
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\RebootRequired'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
MINION_VOLATILE_KEY = r'SYSTEM\CurrentControlSet\Services\salt-minion\Volatile-Data'
REBOOT_REQUIRED_NAME = 'Reboot required'
def set_reboot_required_witnessed():
r'''
This function is used to remember that an event indicating that a reboot is
required was witnessed. This function relies on the salt-minion's ability to
create the following volatile registry key in the *HKLM* hive:
*SYSTEM\\CurrentControlSet\\Services\\salt-minion\\Volatile-Data*
Because this registry key is volatile, it will not persist beyond the
current boot session. Also, in the scope of this key, the name *'Reboot
required'* will be assigned the value of *1*.
For the time being, this function is being used whenever an install
completes with exit code 3010 and can be extended where appropriate in the
future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_reboot_required_witnessed
'''
return __utils__['reg.set_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
volatile=True,
vname=REBOOT_REQUIRED_NAME,
vdata=1,
vtype='REG_DWORD')
def get_reboot_required_witnessed():
'''
Determine if at any time during the current boot session the salt minion
witnessed an event indicating that a reboot is required.
This function will return ``True`` if an install completed with exit
code 3010 during the current boot session and can be extended where
appropriate in the future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the ``Requires reboot`` registry flag is set to ``1``,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_reboot_required_witnessed
'''
value_dict = __utils__['reg.read_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
vname=REBOOT_REQUIRED_NAME)
return value_dict['vdata'] == 1
def get_pending_reboot():
'''
Determine whether there is a reboot pending.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the system is pending reboot, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_reboot
'''
# Order the checks for reboot pending in most to least likely.
checks = (get_pending_update,
get_pending_file_rename,
get_pending_servermanager,
get_pending_component_servicing,
get_reboot_required_witnessed,
get_pending_computer_name,
get_pending_domain_join)
for check in checks:
if check():
return True
return False
|
saltstack/salt
|
salt/modules/win_system.py
|
reboot
|
python
|
def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disable=redefined-outer-name
only_on_pending_reboot=False):
'''
Reboot a running system.
Args:
timeout (int):
The number of minutes/seconds before rebooting the system. Use of
minutes or seconds depends on the value of ``in_seconds``. Default
is 5 minutes.
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
wait_for_reboot (bool)
``True`` will sleep for timeout + 30 seconds after reboot has been
initiated. This is useful for use in a highstate. For example, you
may have states that you want to apply only after the reboot.
Default is ``False``.
.. versionadded:: 2015.8.0
only_on_pending_reboot (bool):
If this is set to ``True``, then the reboot will only proceed
if the system reports a pending reboot. Setting this parameter to
``True`` could be useful when calling this function from a final
housekeeping state intended to be executed at the end of a state run
(using *order: last*). Default is ``False``.
Returns:
bool: ``True`` if successful (a reboot will occur), otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.reboot 5
salt '*' system.reboot 5 True
Invoking this function from a final housekeeping state:
.. code-block:: yaml
final_housekeeping:
module.run:
- name: system.reboot
- only_on_pending_reboot: True
- order: last
'''
ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds,
only_on_pending_reboot=only_on_pending_reboot)
if wait_for_reboot:
seconds = _convert_minutes_seconds(timeout, in_seconds)
time.sleep(seconds + 30)
return ret
|
Reboot a running system.
Args:
timeout (int):
The number of minutes/seconds before rebooting the system. Use of
minutes or seconds depends on the value of ``in_seconds``. Default
is 5 minutes.
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
wait_for_reboot (bool)
``True`` will sleep for timeout + 30 seconds after reboot has been
initiated. This is useful for use in a highstate. For example, you
may have states that you want to apply only after the reboot.
Default is ``False``.
.. versionadded:: 2015.8.0
only_on_pending_reboot (bool):
If this is set to ``True``, then the reboot will only proceed
if the system reports a pending reboot. Setting this parameter to
``True`` could be useful when calling this function from a final
housekeeping state intended to be executed at the end of a state run
(using *order: last*). Default is ``False``.
Returns:
bool: ``True`` if successful (a reboot will occur), otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.reboot 5
salt '*' system.reboot 5 True
Invoking this function from a final housekeeping state:
.. code-block:: yaml
final_housekeeping:
module.run:
- name: system.reboot
- only_on_pending_reboot: True
- order: last
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L165-L225
|
[
"def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint: disable=redefined-outer-name\n in_seconds=False, only_on_pending_reboot=False):\n '''\n Shutdown a running system.\n\n Args:\n\n message (str):\n The message to display to the user before shutting down.\n\n timeout (int):\n The length of time (in seconds) that the shutdown dialog box should\n be displayed. While this dialog box is displayed, the shutdown can\n be aborted using the ``system.shutdown_abort`` function.\n\n If timeout is not zero, InitiateSystemShutdown displays a dialog box\n on the specified computer. The dialog box displays the name of the\n user who called the function, the message specified by the lpMessage\n parameter, and prompts the user to log off. The dialog box beeps\n when it is created and remains on top of other windows (system\n modal). The dialog box can be moved but not closed. A timer counts\n down the remaining time before the shutdown occurs.\n\n If timeout is zero, the computer shuts down immediately without\n displaying the dialog box and cannot be stopped by\n ``system.shutdown_abort``.\n\n Default is 5 minutes\n\n in_seconds (bool):\n ``True`` will cause the ``timeout`` parameter to be in seconds.\n ``False`` will be in minutes. Default is ``False``.\n\n .. versionadded:: 2015.8.0\n\n force_close (bool):\n ``True`` will force close all open applications. ``False`` will\n display a dialog box instructing the user to close open\n applications. Default is ``True``.\n\n reboot (bool):\n ``True`` restarts the computer immediately after shutdown. ``False``\n powers down the system. Default is ``False``.\n\n only_on_pending_reboot (bool): If this is set to True, then the shutdown\n will only proceed if the system reports a pending reboot. To\n optionally shutdown in a highstate, consider using the shutdown\n state instead of this module.\n\n only_on_pending_reboot (bool):\n If ``True`` the shutdown will only proceed if there is a reboot\n pending. ``False`` will shutdown the system. Default is ``False``.\n\n Returns:\n bool:\n ``True`` if successful (a shutdown or reboot will occur), otherwise\n ``False``\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' system.shutdown \"System will shutdown in 5 minutes\"\n '''\n if six.PY2:\n message = _to_unicode(message)\n\n timeout = _convert_minutes_seconds(timeout, in_seconds)\n\n if only_on_pending_reboot and not get_pending_reboot():\n return False\n\n if message and not isinstance(message, six.string_types):\n message = message.decode('utf-8')\n try:\n win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,\n force_close, reboot)\n return True\n except pywintypes.error as exc:\n (number, context, message) = exc.args\n log.error('Failed to shutdown the system')\n log.error('nbr: %s', number)\n log.error('ctx: %s', context)\n log.error('msg: %s', message)\n return False\n",
"def _convert_minutes_seconds(timeout, in_seconds=False):\n '''\n convert timeout to seconds\n '''\n return timeout if in_seconds else timeout*60\n"
] |
# -*- coding: utf-8 -*-
'''
Module for managing windows systems.
:depends:
- pywintypes
- win32api
- win32con
- win32net
- wmi
Support for reboot, shutdown, etc
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import ctypes
import logging
import time
import platform
from datetime import datetime
# Import salt libs
import salt.utils.functools
import salt.utils.locales
import salt.utils.platform
import salt.utils.winapi
from salt.exceptions import CommandExecutionError
# Import 3rd-party Libs
from salt.ext import six
try:
import wmi
import win32net
import win32api
import win32con
import pywintypes
from ctypes import windll
HAS_WIN32NET_MODS = True
except ImportError:
HAS_WIN32NET_MODS = False
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
Only works on Windows Systems with Win32 Modules
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_system: Requires Windows'
if not HAS_WIN32NET_MODS:
return False, 'Module win_system: Missing win32 modules'
return __virtualname__
def _convert_minutes_seconds(timeout, in_seconds=False):
'''
convert timeout to seconds
'''
return timeout if in_seconds else timeout*60
def _convert_date_time_string(dt_string):
'''
convert string to date time object
'''
dt_string = dt_string.split('.')[0]
dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S')
return dt_obj.strftime('%Y-%m-%d %H:%M:%S')
def _to_unicode(instr):
'''
Converts from current users character encoding to unicode.
When instr has a value of None, the return value of the function
will also be None.
'''
if instr is None or isinstance(instr, six.text_type):
return instr
else:
return six.text_type(instr, 'utf8')
def halt(timeout=5, in_seconds=False):
'''
Halt a running system.
Args:
timeout (int):
Number of seconds before halting the system. Default is 5 seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.halt 5 True
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel): # pylint: disable=unused-argument
'''
Change the system runlevel on sysV compatible systems. Not applicable to
Windows
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
# cmd = ['init', runlevel]
# ret = __salt__['cmd.run'](cmd, python_shell=False)
# return ret
# TODO: Create a mapping of runlevels to # pylint: disable=fixme
# corresponding Windows actions
return 'Not implemented on Windows at this time.'
def poweroff(timeout=5, in_seconds=False):
'''
Power off a running system.
Args:
timeout (int):
Number of seconds before powering off the system. Default is 5
seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.poweroff 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint: disable=redefined-outer-name
in_seconds=False, only_on_pending_reboot=False):
'''
Shutdown a running system.
Args:
message (str):
The message to display to the user before shutting down.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the
user who called the function, the message specified by the lpMessage
parameter, and prompts the user to log off. The dialog box beeps
when it is created and remains on top of other windows (system
modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down immediately without
displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
force_close (bool):
``True`` will force close all open applications. ``False`` will
display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool):
``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown
state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns:
bool:
``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown "System will shutdown in 5 minutes"
'''
if six.PY2:
message = _to_unicode(message)
timeout = _convert_minutes_seconds(timeout, in_seconds)
if only_on_pending_reboot and not get_pending_reboot():
return False
if message and not isinstance(message, six.string_types):
message = message.decode('utf-8')
try:
win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to shutdown the system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def shutdown_hard():
'''
Shutdown a running system with no timeout or warning.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown_hard
'''
return shutdown(timeout=0)
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def lock():
'''
Lock the workstation.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.lock
'''
return windll.user32.LockWorkStation()
def set_computer_name(name):
'''
Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
'''
if six.PY2:
name = _to_unicode(name)
if windll.kernel32.SetComputerNameExW(
win32con.ComputerNamePhysicalDnsHostname, name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
pending = __utils__['reg.read_value'](
'HKLM',
r'SYSTEM\CurrentControlSet\Services\Tcpip\Parameters',
'NV Hostname')['vdata']
if pending:
return pending if pending != current else None
return False
def get_computer_name():
'''
Get the Windows computer name
Returns:
str: Returns the computer name if found. Otherwise returns ``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_name
'''
name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)
return name if name else False
def set_computer_desc(desc=None):
'''
Set the Windows computer description
Args:
desc (str):
The computer description
Returns:
str: Description if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
'''
if six.PY2:
desc = _to_unicode(desc)
# Make sure the system exists
# Return an object containing current information array for the computer
system_info = win32net.NetServerGetInfo(None, 101)
# If desc is passed, decode it for unicode
if desc is None:
return False
system_info['comment'] = desc
# Apply new settings
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc.args
log.error('Failed to update system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
return {'Computer Description': get_computer_desc()}
set_computer_description = salt.utils.functools.alias_function(set_computer_desc, 'set_computer_description') # pylint: disable=invalid-name
def get_system_info():
'''
Get system information.
Returns:
dict: Dictionary containing information about the system to include
name, description, version, etc...
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_info
'''
def byte_calc(val):
val = float(val)
if val < 2**10:
return '{0:.3f}B'.format(val)
elif val < 2**20:
return '{0:.3f}KB'.format(val / 2**10)
elif val < 2**30:
return '{0:.3f}MB'.format(val / 2**20)
elif val < 2**40:
return '{0:.3f}GB'.format(val / 2**30)
else:
return '{0:.3f}TB'.format(val / 2**40)
# Lookup dicts for Win32_OperatingSystem
os_type = {1: 'Work Station',
2: 'Domain Controller',
3: 'Server'}
# lookup dicts for Win32_ComputerSystem
domain_role = {0: 'Standalone Workstation',
1: 'Member Workstation',
2: 'Standalone Server',
3: 'Member Server',
4: 'Backup Domain Controller',
5: 'Primary Domain Controller'}
warning_states = {1: 'Other',
2: 'Unknown',
3: 'Safe',
4: 'Warning',
5: 'Critical',
6: 'Non-recoverable'}
pc_system_types = {0: 'Unspecified',
1: 'Desktop',
2: 'Mobile',
3: 'Workstation',
4: 'Enterprise Server',
5: 'SOHO Server',
6: 'Appliance PC',
7: 'Performance Server',
8: 'Maximum'}
# Connect to WMI
with salt.utils.winapi.Com():
conn = wmi.WMI()
system = conn.Win32_OperatingSystem()[0]
ret = {'name': get_computer_name(),
'description': system.Description,
'install_date': system.InstallDate,
'last_boot': system.LastBootUpTime,
'os_manufacturer': system.Manufacturer,
'os_name': system.Caption,
'users': system.NumberOfUsers,
'organization': system.Organization,
'os_architecture': system.OSArchitecture,
'primary': system.Primary,
'os_type': os_type[system.ProductType],
'registered_user': system.RegisteredUser,
'system_directory': system.SystemDirectory,
'system_drive': system.SystemDrive,
'os_version': system.Version,
'windows_directory': system.WindowsDirectory}
system = conn.Win32_ComputerSystem()[0]
# Get pc_system_type depending on Windows version
if platform.release() in ['Vista', '7', '8']:
# Types for Vista, 7, and 8
pc_system_type = pc_system_types[system.PCSystemType]
else:
# New types were added with 8.1 and newer
pc_system_types.update({8: 'Slate', 9: 'Maximum'})
pc_system_type = pc_system_types[system.PCSystemType]
ret.update({
'bootup_state': system.BootupState,
'caption': system.Caption,
'chassis_bootup_state': warning_states[system.ChassisBootupState],
'chassis_sku_number': system.ChassisSKUNumber,
'dns_hostname': system.DNSHostname,
'domain': system.Domain,
'domain_role': domain_role[system.DomainRole],
'hardware_manufacturer': system.Manufacturer,
'hardware_model': system.Model,
'network_server_mode_enabled': system.NetworkServerModeEnabled,
'part_of_domain': system.PartOfDomain,
'pc_system_type': pc_system_type,
'power_state': system.PowerState,
'status': system.Status,
'system_type': system.SystemType,
'total_physical_memory': byte_calc(system.TotalPhysicalMemory),
'total_physical_memory_raw': system.TotalPhysicalMemory,
'thermal_state': warning_states[system.ThermalState],
'workgroup': system.Workgroup
})
# Get processor information
processors = conn.Win32_Processor()
ret['processors'] = 0
ret['processors_logical'] = 0
ret['processor_cores'] = 0
ret['processor_cores_enabled'] = 0
ret['processor_manufacturer'] = processors[0].Manufacturer
ret['processor_max_clock_speed'] = six.text_type(processors[0].MaxClockSpeed) + 'MHz'
for processor in processors:
ret['processors'] += 1
ret['processors_logical'] += processor.NumberOfLogicalProcessors
ret['processor_cores'] += processor.NumberOfCores
ret['processor_cores_enabled'] += processor.NumberOfEnabledCore
bios = conn.Win32_BIOS()[0]
ret.update({'hardware_serial': bios.SerialNumber,
'bios_manufacturer': bios.Manufacturer,
'bios_version': bios.Version,
'bios_details': bios.BIOSVersion,
'bios_caption': bios.Caption,
'bios_description': bios.Description})
ret['install_date'] = _convert_date_time_string(ret['install_date'])
ret['last_boot'] = _convert_date_time_string(ret['last_boot'])
return ret
def get_computer_desc():
'''
Get the Windows computer description
Returns:
str: Returns the computer description if found. Otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_desc
'''
desc = get_system_info()['description']
return False if desc is None else desc
get_computer_description = salt.utils.functools.alias_function(get_computer_desc, 'get_computer_description') # pylint: disable=invalid-name
def get_hostname():
'''
Get the hostname of the windows minion
.. versionadded:: 2016.3.0
Returns:
str: Returns the hostname of the windows minion
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_hostname
'''
cmd = 'hostname'
ret = __salt__['cmd.run'](cmd=cmd)
return ret
def set_hostname(hostname):
'''
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.Rename(Name=hostname)
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``user@domain.tld`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
if six.PY2:
domain = _to_unicode(domain)
username = _to_unicode(username)
password = _to_unicode(password)
account_ou = _to_unicode(account_ou)
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, six.string_types):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
err = _join_domain(domain=domain, username=username, password=password,
account_ou=account_ou, account_exists=account_exists)
if not err:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
raise CommandExecutionError(win32api.FormatMessage(err).rstrip())
def _join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False):
'''
Helper function to join the domain.
Args:
domain (str): The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``user@domain.tld`` or simply ``user``
password (str): Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
Returns:
int:
:param domain:
:param username:
:param password:
:param account_ou:
:param account_exists:
:return:
'''
NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name
NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name
NETSETUP_JOIN_WITH_NEW_NAME = 0x400 # pylint: disable=invalid-name
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
join_options |= NETSETUP_JOIN_WITH_NEW_NAME
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Return the results of the command as an error
# JoinDomainOrWorkgroup returns a strangely formatted value that looks like
# (0,) so return the first item
return comp.JoinDomainOrWorkgroup(
Name=domain, Password=password, UserName=username, AccountOU=account_ou,
FJoinOptions=join_options)[0]
def unjoin_domain(username=None,
password=None,
domain=None,
workgroup='WORKGROUP',
disable=False,
restart=False):
# pylint: disable=anomalous-backslash-in-string
'''
Unjoin a computer from an Active Directory Domain. Requires a restart.
Args:
username (str):
Username of an account which is authorized to manage computer
accounts on the domain. Needs to be a fully qualified name like
``user@domain.tld`` or ``domain.tld\\user``. If the domain is not
specified, the passed domain will be used. If the computer account
doesn't need to be disabled after the computer is unjoined, this can
be ``None``.
password (str):
The password of the specified user
domain (str):
The domain from which to unjoin the computer. Can be ``None``
workgroup (str):
The workgroup to join the computer to. Default is ``WORKGROUP``
.. versionadded:: 2015.8.2/2015.5.7
disable (bool):
``True`` to disable the computer account in Active Directory.
Default is ``False``
restart (bool):
``True`` will restart the computer after successful unjoin. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.unjoin_domain restart=True
salt 'minion-id' system.unjoin_domain username='unjoinuser' \\
password='unjoinpassword' disable=True \\
restart=True
'''
# pylint: enable=anomalous-backslash-in-string
if six.PY2:
username = _to_unicode(username)
password = _to_unicode(password)
domain = _to_unicode(domain)
status = get_domain_workgroup()
if 'Workgroup' in status:
if status['Workgroup'] == workgroup:
return 'Already joined to {0}'.format(workgroup)
if username and '\\' not in username and '@' not in username:
if domain:
username = '{0}@{1}'.format(username, domain)
else:
return 'Must specify domain if not supplied in username'
if username and password is None:
return 'Must specify a password if you pass a username'
NETSETUP_ACCT_DELETE = 0x4 # pylint: disable=invalid-name
unjoin_options = 0x0
if disable:
unjoin_options |= NETSETUP_ACCT_DELETE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
err = comp.UnjoinDomainOrWorkgroup(Password=password,
UserName=username,
FUnjoinOptions=unjoin_options)
# you have to do this because UnjoinDomainOrWorkgroup returns a
# strangely formatted value that looks like (0,)
if not err[0]:
err = comp.JoinDomainOrWorkgroup(Name=workgroup)
if not err[0]:
ret = {'Workgroup': workgroup,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to join the computer to %s', workgroup)
return False
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to unjoin computer from %s', status['Domain'])
return False
def get_domain_workgroup():
'''
Get the domain or workgroup the computer belongs to.
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
Returns:
str: The name of the domain or workgroup
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_domain_workgroup
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
for computer in conn.Win32_ComputerSystem():
if computer.PartOfDomain:
return {'Domain': computer.Domain}
else:
return {'Workgroup': computer.Workgroup}
def set_domain_workgroup(workgroup):
'''
Set the domain or workgroup the computer belongs to.
.. versionadded:: 2019.2.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_domain_workgroup LOCAL
'''
if six.PY2:
workgroup = _to_unicode(workgroup)
# Initialize COM
with salt.utils.winapi.Com():
# Grab the first Win32_ComputerSystem object from wmi
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Now we can join the new workgroup
res = comp.JoinDomainOrWorkgroup(Name=workgroup.upper())
return True if not res[0] else False
def _try_parse_datetime(time_str, fmts):
'''
A helper function that attempts to parse the input time_str as a date.
Args:
time_str (str): A string representing the time
fmts (list): A list of date format strings
Returns:
datetime: Returns a datetime object if parsed properly, otherwise None
'''
result = None
for fmt in fmts:
try:
result = datetime.strptime(time_str, fmt)
break
except ValueError:
pass
return result
def get_system_time():
'''
Get the system time.
Returns:
str: Returns the system time in HH:MM:SS AM/PM format.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_time
'''
now = win32api.GetLocalTime()
meridian = 'AM'
hours = int(now[4])
if hours == 12:
meridian = 'PM'
elif hours == 0:
hours = 12
elif hours > 12:
hours = hours - 12
meridian = 'PM'
return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)
def set_system_time(newtime):
'''
Set the system time.
Args:
newtime (str):
The time to set. Can be any of the following formats:
- HH:MM:SS AM/PM
- HH:MM AM/PM
- HH:MM:SS (24 hour)
- HH:MM (24 hour)
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_system_time 12:01
'''
# Get date/time object from newtime
fmts = ['%I:%M:%S %p', '%I:%M %p', '%H:%M:%S', '%H:%M']
dt_obj = _try_parse_datetime(newtime, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(hours=dt_obj.hour,
minutes=dt_obj.minute,
seconds=dt_obj.second)
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc.args
log.error('Failed to get local time')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
# Check for passed values. If not passed, use current values
if years is None:
years = date_time[0]
if months is None:
months = date_time[1]
if days is None:
days = date_time[3]
if hours is None:
hours = date_time[4]
if minutes is None:
minutes = date_time[5]
if seconds is None:
seconds = date_time[6]
try:
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', ctypes.c_int16),
('wMonth', ctypes.c_int16),
('wDayOfWeek', ctypes.c_int16),
('wDay', ctypes.c_int16),
('wHour', ctypes.c_int16),
('wMinute', ctypes.c_int16),
('wSecond', ctypes.c_int16),
('wMilliseconds', ctypes.c_int16)]
system_time = SYSTEMTIME()
system_time.wYear = int(years)
system_time.wMonth = int(months)
system_time.wDay = int(days)
system_time.wHour = int(hours)
system_time.wMinute = int(minutes)
system_time.wSecond = int(seconds)
system_time_ptr = ctypes.pointer(system_time)
succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr)
if succeeded is not 0:
return True
else:
log.error('Failed to set local time')
raise CommandExecutionError(
win32api.FormatMessage(succeeded).rstrip())
except OSError as err:
log.error('Failed to set local time')
raise CommandExecutionError(err)
def get_system_date():
'''
Get the Windows system date
Returns:
str: Returns the system date
CLI Example:
.. code-block:: bash
salt '*' system.get_system_date
'''
now = win32api.GetLocalTime()
return '{0:02d}/{1:02d}/{2:04d}'.format(now[1], now[3], now[0])
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day)
def start_time_service():
'''
Start the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.start_time_service
'''
return __salt__['service.start']('w32time')
def stop_time_service():
'''
Stop the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.stop_time_service
'''
return __salt__['service.stop']('w32time')
def get_pending_component_servicing():
'''
Determine whether there are pending Component Based Servicing tasks that
require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Component Based Servicing tasks,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_component_servicing
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing\RebootPending'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
def get_pending_domain_join():
'''
Determine whether there is a pending domain join action that requires a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there is a pending domain join action, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_domain_join
'''
base_key = r'SYSTEM\CurrentControlSet\Services\Netlogon'
avoid_key = r'{0}\AvoidSpnSet'.format(base_key)
join_key = r'{0}\JoinDomain'.format(base_key)
# If either the avoid_key or join_key is present,
# then there is a reboot pending.
if __utils__['reg.key_exists']('HKLM', avoid_key):
log.debug('Key exists: %s', avoid_key)
return True
else:
log.debug('Key does not exist: %s', avoid_key)
if __utils__['reg.key_exists']('HKLM', join_key):
log.debug('Key exists: %s', join_key)
return True
else:
log.debug('Key does not exist: %s', join_key)
return False
def get_pending_file_rename():
'''
Determine whether there are pending file rename operations that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending file rename operations, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_file_rename
'''
vnames = ('PendingFileRenameOperations', 'PendingFileRenameOperations2')
key = r'SYSTEM\CurrentControlSet\Control\Session Manager'
# If any of the value names exist and have value data set,
# then a reboot is pending.
for vname in vnames:
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
if reg_ret['vdata'] and (reg_ret['vdata'] != '(value not set)'):
return True
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_servermanager():
'''
Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager
'''
vname = 'CurrentRebootAttempts'
key = r'SOFTWARE\Microsoft\ServerManager'
# There are situations where it's possible to have '(value not set)' as
# the value data, and since an actual reboot won't be pending in that
# instance, just catch instances where we try unsuccessfully to cast as int.
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
try:
if int(reg_ret['vdata']) > 0:
return True
except ValueError:
pass
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_update():
'''
Determine whether there are pending updates that require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending updates, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_update
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\RebootRequired'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
MINION_VOLATILE_KEY = r'SYSTEM\CurrentControlSet\Services\salt-minion\Volatile-Data'
REBOOT_REQUIRED_NAME = 'Reboot required'
def set_reboot_required_witnessed():
r'''
This function is used to remember that an event indicating that a reboot is
required was witnessed. This function relies on the salt-minion's ability to
create the following volatile registry key in the *HKLM* hive:
*SYSTEM\\CurrentControlSet\\Services\\salt-minion\\Volatile-Data*
Because this registry key is volatile, it will not persist beyond the
current boot session. Also, in the scope of this key, the name *'Reboot
required'* will be assigned the value of *1*.
For the time being, this function is being used whenever an install
completes with exit code 3010 and can be extended where appropriate in the
future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_reboot_required_witnessed
'''
return __utils__['reg.set_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
volatile=True,
vname=REBOOT_REQUIRED_NAME,
vdata=1,
vtype='REG_DWORD')
def get_reboot_required_witnessed():
'''
Determine if at any time during the current boot session the salt minion
witnessed an event indicating that a reboot is required.
This function will return ``True`` if an install completed with exit
code 3010 during the current boot session and can be extended where
appropriate in the future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the ``Requires reboot`` registry flag is set to ``1``,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_reboot_required_witnessed
'''
value_dict = __utils__['reg.read_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
vname=REBOOT_REQUIRED_NAME)
return value_dict['vdata'] == 1
def get_pending_reboot():
'''
Determine whether there is a reboot pending.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the system is pending reboot, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_reboot
'''
# Order the checks for reboot pending in most to least likely.
checks = (get_pending_update,
get_pending_file_rename,
get_pending_servermanager,
get_pending_component_servicing,
get_reboot_required_witnessed,
get_pending_computer_name,
get_pending_domain_join)
for check in checks:
if check():
return True
return False
|
saltstack/salt
|
salt/modules/win_system.py
|
shutdown
|
python
|
def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint: disable=redefined-outer-name
in_seconds=False, only_on_pending_reboot=False):
'''
Shutdown a running system.
Args:
message (str):
The message to display to the user before shutting down.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the
user who called the function, the message specified by the lpMessage
parameter, and prompts the user to log off. The dialog box beeps
when it is created and remains on top of other windows (system
modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down immediately without
displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
force_close (bool):
``True`` will force close all open applications. ``False`` will
display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool):
``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown
state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns:
bool:
``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown "System will shutdown in 5 minutes"
'''
if six.PY2:
message = _to_unicode(message)
timeout = _convert_minutes_seconds(timeout, in_seconds)
if only_on_pending_reboot and not get_pending_reboot():
return False
if message and not isinstance(message, six.string_types):
message = message.decode('utf-8')
try:
win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to shutdown the system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
|
Shutdown a running system.
Args:
message (str):
The message to display to the user before shutting down.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the
user who called the function, the message specified by the lpMessage
parameter, and prompts the user to log off. The dialog box beeps
when it is created and remains on top of other windows (system
modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down immediately without
displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
force_close (bool):
``True`` will force close all open applications. ``False`` will
display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool):
``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown
state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns:
bool:
``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown "System will shutdown in 5 minutes"
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L228-L312
|
[
"def _to_unicode(instr):\n '''\n Converts from current users character encoding to unicode.\n When instr has a value of None, the return value of the function\n will also be None.\n '''\n if instr is None or isinstance(instr, six.text_type):\n return instr\n else:\n return six.text_type(instr, 'utf8')\n",
"def _convert_minutes_seconds(timeout, in_seconds=False):\n '''\n convert timeout to seconds\n '''\n return timeout if in_seconds else timeout*60\n",
"def get_pending_reboot():\n '''\n Determine whether there is a reboot pending.\n\n .. versionadded:: 2016.11.0\n\n Returns:\n bool: ``True`` if the system is pending reboot, otherwise ``False``\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' system.get_pending_reboot\n '''\n\n # Order the checks for reboot pending in most to least likely.\n checks = (get_pending_update,\n get_pending_file_rename,\n get_pending_servermanager,\n get_pending_component_servicing,\n get_reboot_required_witnessed,\n get_pending_computer_name,\n get_pending_domain_join)\n\n for check in checks:\n if check():\n return True\n\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Module for managing windows systems.
:depends:
- pywintypes
- win32api
- win32con
- win32net
- wmi
Support for reboot, shutdown, etc
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import ctypes
import logging
import time
import platform
from datetime import datetime
# Import salt libs
import salt.utils.functools
import salt.utils.locales
import salt.utils.platform
import salt.utils.winapi
from salt.exceptions import CommandExecutionError
# Import 3rd-party Libs
from salt.ext import six
try:
import wmi
import win32net
import win32api
import win32con
import pywintypes
from ctypes import windll
HAS_WIN32NET_MODS = True
except ImportError:
HAS_WIN32NET_MODS = False
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
Only works on Windows Systems with Win32 Modules
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_system: Requires Windows'
if not HAS_WIN32NET_MODS:
return False, 'Module win_system: Missing win32 modules'
return __virtualname__
def _convert_minutes_seconds(timeout, in_seconds=False):
'''
convert timeout to seconds
'''
return timeout if in_seconds else timeout*60
def _convert_date_time_string(dt_string):
'''
convert string to date time object
'''
dt_string = dt_string.split('.')[0]
dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S')
return dt_obj.strftime('%Y-%m-%d %H:%M:%S')
def _to_unicode(instr):
'''
Converts from current users character encoding to unicode.
When instr has a value of None, the return value of the function
will also be None.
'''
if instr is None or isinstance(instr, six.text_type):
return instr
else:
return six.text_type(instr, 'utf8')
def halt(timeout=5, in_seconds=False):
'''
Halt a running system.
Args:
timeout (int):
Number of seconds before halting the system. Default is 5 seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.halt 5 True
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel): # pylint: disable=unused-argument
'''
Change the system runlevel on sysV compatible systems. Not applicable to
Windows
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
# cmd = ['init', runlevel]
# ret = __salt__['cmd.run'](cmd, python_shell=False)
# return ret
# TODO: Create a mapping of runlevels to # pylint: disable=fixme
# corresponding Windows actions
return 'Not implemented on Windows at this time.'
def poweroff(timeout=5, in_seconds=False):
'''
Power off a running system.
Args:
timeout (int):
Number of seconds before powering off the system. Default is 5
seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.poweroff 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disable=redefined-outer-name
only_on_pending_reboot=False):
'''
Reboot a running system.
Args:
timeout (int):
The number of minutes/seconds before rebooting the system. Use of
minutes or seconds depends on the value of ``in_seconds``. Default
is 5 minutes.
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
wait_for_reboot (bool)
``True`` will sleep for timeout + 30 seconds after reboot has been
initiated. This is useful for use in a highstate. For example, you
may have states that you want to apply only after the reboot.
Default is ``False``.
.. versionadded:: 2015.8.0
only_on_pending_reboot (bool):
If this is set to ``True``, then the reboot will only proceed
if the system reports a pending reboot. Setting this parameter to
``True`` could be useful when calling this function from a final
housekeeping state intended to be executed at the end of a state run
(using *order: last*). Default is ``False``.
Returns:
bool: ``True`` if successful (a reboot will occur), otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.reboot 5
salt '*' system.reboot 5 True
Invoking this function from a final housekeeping state:
.. code-block:: yaml
final_housekeeping:
module.run:
- name: system.reboot
- only_on_pending_reboot: True
- order: last
'''
ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds,
only_on_pending_reboot=only_on_pending_reboot)
if wait_for_reboot:
seconds = _convert_minutes_seconds(timeout, in_seconds)
time.sleep(seconds + 30)
return ret
def shutdown_hard():
'''
Shutdown a running system with no timeout or warning.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown_hard
'''
return shutdown(timeout=0)
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def lock():
'''
Lock the workstation.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.lock
'''
return windll.user32.LockWorkStation()
def set_computer_name(name):
'''
Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
'''
if six.PY2:
name = _to_unicode(name)
if windll.kernel32.SetComputerNameExW(
win32con.ComputerNamePhysicalDnsHostname, name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
pending = __utils__['reg.read_value'](
'HKLM',
r'SYSTEM\CurrentControlSet\Services\Tcpip\Parameters',
'NV Hostname')['vdata']
if pending:
return pending if pending != current else None
return False
def get_computer_name():
'''
Get the Windows computer name
Returns:
str: Returns the computer name if found. Otherwise returns ``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_name
'''
name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)
return name if name else False
def set_computer_desc(desc=None):
'''
Set the Windows computer description
Args:
desc (str):
The computer description
Returns:
str: Description if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
'''
if six.PY2:
desc = _to_unicode(desc)
# Make sure the system exists
# Return an object containing current information array for the computer
system_info = win32net.NetServerGetInfo(None, 101)
# If desc is passed, decode it for unicode
if desc is None:
return False
system_info['comment'] = desc
# Apply new settings
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc.args
log.error('Failed to update system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
return {'Computer Description': get_computer_desc()}
set_computer_description = salt.utils.functools.alias_function(set_computer_desc, 'set_computer_description') # pylint: disable=invalid-name
def get_system_info():
'''
Get system information.
Returns:
dict: Dictionary containing information about the system to include
name, description, version, etc...
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_info
'''
def byte_calc(val):
val = float(val)
if val < 2**10:
return '{0:.3f}B'.format(val)
elif val < 2**20:
return '{0:.3f}KB'.format(val / 2**10)
elif val < 2**30:
return '{0:.3f}MB'.format(val / 2**20)
elif val < 2**40:
return '{0:.3f}GB'.format(val / 2**30)
else:
return '{0:.3f}TB'.format(val / 2**40)
# Lookup dicts for Win32_OperatingSystem
os_type = {1: 'Work Station',
2: 'Domain Controller',
3: 'Server'}
# lookup dicts for Win32_ComputerSystem
domain_role = {0: 'Standalone Workstation',
1: 'Member Workstation',
2: 'Standalone Server',
3: 'Member Server',
4: 'Backup Domain Controller',
5: 'Primary Domain Controller'}
warning_states = {1: 'Other',
2: 'Unknown',
3: 'Safe',
4: 'Warning',
5: 'Critical',
6: 'Non-recoverable'}
pc_system_types = {0: 'Unspecified',
1: 'Desktop',
2: 'Mobile',
3: 'Workstation',
4: 'Enterprise Server',
5: 'SOHO Server',
6: 'Appliance PC',
7: 'Performance Server',
8: 'Maximum'}
# Connect to WMI
with salt.utils.winapi.Com():
conn = wmi.WMI()
system = conn.Win32_OperatingSystem()[0]
ret = {'name': get_computer_name(),
'description': system.Description,
'install_date': system.InstallDate,
'last_boot': system.LastBootUpTime,
'os_manufacturer': system.Manufacturer,
'os_name': system.Caption,
'users': system.NumberOfUsers,
'organization': system.Organization,
'os_architecture': system.OSArchitecture,
'primary': system.Primary,
'os_type': os_type[system.ProductType],
'registered_user': system.RegisteredUser,
'system_directory': system.SystemDirectory,
'system_drive': system.SystemDrive,
'os_version': system.Version,
'windows_directory': system.WindowsDirectory}
system = conn.Win32_ComputerSystem()[0]
# Get pc_system_type depending on Windows version
if platform.release() in ['Vista', '7', '8']:
# Types for Vista, 7, and 8
pc_system_type = pc_system_types[system.PCSystemType]
else:
# New types were added with 8.1 and newer
pc_system_types.update({8: 'Slate', 9: 'Maximum'})
pc_system_type = pc_system_types[system.PCSystemType]
ret.update({
'bootup_state': system.BootupState,
'caption': system.Caption,
'chassis_bootup_state': warning_states[system.ChassisBootupState],
'chassis_sku_number': system.ChassisSKUNumber,
'dns_hostname': system.DNSHostname,
'domain': system.Domain,
'domain_role': domain_role[system.DomainRole],
'hardware_manufacturer': system.Manufacturer,
'hardware_model': system.Model,
'network_server_mode_enabled': system.NetworkServerModeEnabled,
'part_of_domain': system.PartOfDomain,
'pc_system_type': pc_system_type,
'power_state': system.PowerState,
'status': system.Status,
'system_type': system.SystemType,
'total_physical_memory': byte_calc(system.TotalPhysicalMemory),
'total_physical_memory_raw': system.TotalPhysicalMemory,
'thermal_state': warning_states[system.ThermalState],
'workgroup': system.Workgroup
})
# Get processor information
processors = conn.Win32_Processor()
ret['processors'] = 0
ret['processors_logical'] = 0
ret['processor_cores'] = 0
ret['processor_cores_enabled'] = 0
ret['processor_manufacturer'] = processors[0].Manufacturer
ret['processor_max_clock_speed'] = six.text_type(processors[0].MaxClockSpeed) + 'MHz'
for processor in processors:
ret['processors'] += 1
ret['processors_logical'] += processor.NumberOfLogicalProcessors
ret['processor_cores'] += processor.NumberOfCores
ret['processor_cores_enabled'] += processor.NumberOfEnabledCore
bios = conn.Win32_BIOS()[0]
ret.update({'hardware_serial': bios.SerialNumber,
'bios_manufacturer': bios.Manufacturer,
'bios_version': bios.Version,
'bios_details': bios.BIOSVersion,
'bios_caption': bios.Caption,
'bios_description': bios.Description})
ret['install_date'] = _convert_date_time_string(ret['install_date'])
ret['last_boot'] = _convert_date_time_string(ret['last_boot'])
return ret
def get_computer_desc():
'''
Get the Windows computer description
Returns:
str: Returns the computer description if found. Otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_desc
'''
desc = get_system_info()['description']
return False if desc is None else desc
get_computer_description = salt.utils.functools.alias_function(get_computer_desc, 'get_computer_description') # pylint: disable=invalid-name
def get_hostname():
'''
Get the hostname of the windows minion
.. versionadded:: 2016.3.0
Returns:
str: Returns the hostname of the windows minion
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_hostname
'''
cmd = 'hostname'
ret = __salt__['cmd.run'](cmd=cmd)
return ret
def set_hostname(hostname):
'''
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.Rename(Name=hostname)
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``user@domain.tld`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
if six.PY2:
domain = _to_unicode(domain)
username = _to_unicode(username)
password = _to_unicode(password)
account_ou = _to_unicode(account_ou)
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, six.string_types):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
err = _join_domain(domain=domain, username=username, password=password,
account_ou=account_ou, account_exists=account_exists)
if not err:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
raise CommandExecutionError(win32api.FormatMessage(err).rstrip())
def _join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False):
'''
Helper function to join the domain.
Args:
domain (str): The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``user@domain.tld`` or simply ``user``
password (str): Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
Returns:
int:
:param domain:
:param username:
:param password:
:param account_ou:
:param account_exists:
:return:
'''
NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name
NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name
NETSETUP_JOIN_WITH_NEW_NAME = 0x400 # pylint: disable=invalid-name
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
join_options |= NETSETUP_JOIN_WITH_NEW_NAME
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Return the results of the command as an error
# JoinDomainOrWorkgroup returns a strangely formatted value that looks like
# (0,) so return the first item
return comp.JoinDomainOrWorkgroup(
Name=domain, Password=password, UserName=username, AccountOU=account_ou,
FJoinOptions=join_options)[0]
def unjoin_domain(username=None,
password=None,
domain=None,
workgroup='WORKGROUP',
disable=False,
restart=False):
# pylint: disable=anomalous-backslash-in-string
'''
Unjoin a computer from an Active Directory Domain. Requires a restart.
Args:
username (str):
Username of an account which is authorized to manage computer
accounts on the domain. Needs to be a fully qualified name like
``user@domain.tld`` or ``domain.tld\\user``. If the domain is not
specified, the passed domain will be used. If the computer account
doesn't need to be disabled after the computer is unjoined, this can
be ``None``.
password (str):
The password of the specified user
domain (str):
The domain from which to unjoin the computer. Can be ``None``
workgroup (str):
The workgroup to join the computer to. Default is ``WORKGROUP``
.. versionadded:: 2015.8.2/2015.5.7
disable (bool):
``True`` to disable the computer account in Active Directory.
Default is ``False``
restart (bool):
``True`` will restart the computer after successful unjoin. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.unjoin_domain restart=True
salt 'minion-id' system.unjoin_domain username='unjoinuser' \\
password='unjoinpassword' disable=True \\
restart=True
'''
# pylint: enable=anomalous-backslash-in-string
if six.PY2:
username = _to_unicode(username)
password = _to_unicode(password)
domain = _to_unicode(domain)
status = get_domain_workgroup()
if 'Workgroup' in status:
if status['Workgroup'] == workgroup:
return 'Already joined to {0}'.format(workgroup)
if username and '\\' not in username and '@' not in username:
if domain:
username = '{0}@{1}'.format(username, domain)
else:
return 'Must specify domain if not supplied in username'
if username and password is None:
return 'Must specify a password if you pass a username'
NETSETUP_ACCT_DELETE = 0x4 # pylint: disable=invalid-name
unjoin_options = 0x0
if disable:
unjoin_options |= NETSETUP_ACCT_DELETE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
err = comp.UnjoinDomainOrWorkgroup(Password=password,
UserName=username,
FUnjoinOptions=unjoin_options)
# you have to do this because UnjoinDomainOrWorkgroup returns a
# strangely formatted value that looks like (0,)
if not err[0]:
err = comp.JoinDomainOrWorkgroup(Name=workgroup)
if not err[0]:
ret = {'Workgroup': workgroup,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to join the computer to %s', workgroup)
return False
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to unjoin computer from %s', status['Domain'])
return False
def get_domain_workgroup():
'''
Get the domain or workgroup the computer belongs to.
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
Returns:
str: The name of the domain or workgroup
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_domain_workgroup
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
for computer in conn.Win32_ComputerSystem():
if computer.PartOfDomain:
return {'Domain': computer.Domain}
else:
return {'Workgroup': computer.Workgroup}
def set_domain_workgroup(workgroup):
'''
Set the domain or workgroup the computer belongs to.
.. versionadded:: 2019.2.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_domain_workgroup LOCAL
'''
if six.PY2:
workgroup = _to_unicode(workgroup)
# Initialize COM
with salt.utils.winapi.Com():
# Grab the first Win32_ComputerSystem object from wmi
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Now we can join the new workgroup
res = comp.JoinDomainOrWorkgroup(Name=workgroup.upper())
return True if not res[0] else False
def _try_parse_datetime(time_str, fmts):
'''
A helper function that attempts to parse the input time_str as a date.
Args:
time_str (str): A string representing the time
fmts (list): A list of date format strings
Returns:
datetime: Returns a datetime object if parsed properly, otherwise None
'''
result = None
for fmt in fmts:
try:
result = datetime.strptime(time_str, fmt)
break
except ValueError:
pass
return result
def get_system_time():
'''
Get the system time.
Returns:
str: Returns the system time in HH:MM:SS AM/PM format.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_time
'''
now = win32api.GetLocalTime()
meridian = 'AM'
hours = int(now[4])
if hours == 12:
meridian = 'PM'
elif hours == 0:
hours = 12
elif hours > 12:
hours = hours - 12
meridian = 'PM'
return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)
def set_system_time(newtime):
'''
Set the system time.
Args:
newtime (str):
The time to set. Can be any of the following formats:
- HH:MM:SS AM/PM
- HH:MM AM/PM
- HH:MM:SS (24 hour)
- HH:MM (24 hour)
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_system_time 12:01
'''
# Get date/time object from newtime
fmts = ['%I:%M:%S %p', '%I:%M %p', '%H:%M:%S', '%H:%M']
dt_obj = _try_parse_datetime(newtime, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(hours=dt_obj.hour,
minutes=dt_obj.minute,
seconds=dt_obj.second)
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc.args
log.error('Failed to get local time')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
# Check for passed values. If not passed, use current values
if years is None:
years = date_time[0]
if months is None:
months = date_time[1]
if days is None:
days = date_time[3]
if hours is None:
hours = date_time[4]
if minutes is None:
minutes = date_time[5]
if seconds is None:
seconds = date_time[6]
try:
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', ctypes.c_int16),
('wMonth', ctypes.c_int16),
('wDayOfWeek', ctypes.c_int16),
('wDay', ctypes.c_int16),
('wHour', ctypes.c_int16),
('wMinute', ctypes.c_int16),
('wSecond', ctypes.c_int16),
('wMilliseconds', ctypes.c_int16)]
system_time = SYSTEMTIME()
system_time.wYear = int(years)
system_time.wMonth = int(months)
system_time.wDay = int(days)
system_time.wHour = int(hours)
system_time.wMinute = int(minutes)
system_time.wSecond = int(seconds)
system_time_ptr = ctypes.pointer(system_time)
succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr)
if succeeded is not 0:
return True
else:
log.error('Failed to set local time')
raise CommandExecutionError(
win32api.FormatMessage(succeeded).rstrip())
except OSError as err:
log.error('Failed to set local time')
raise CommandExecutionError(err)
def get_system_date():
'''
Get the Windows system date
Returns:
str: Returns the system date
CLI Example:
.. code-block:: bash
salt '*' system.get_system_date
'''
now = win32api.GetLocalTime()
return '{0:02d}/{1:02d}/{2:04d}'.format(now[1], now[3], now[0])
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day)
def start_time_service():
'''
Start the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.start_time_service
'''
return __salt__['service.start']('w32time')
def stop_time_service():
'''
Stop the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.stop_time_service
'''
return __salt__['service.stop']('w32time')
def get_pending_component_servicing():
'''
Determine whether there are pending Component Based Servicing tasks that
require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Component Based Servicing tasks,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_component_servicing
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing\RebootPending'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
def get_pending_domain_join():
'''
Determine whether there is a pending domain join action that requires a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there is a pending domain join action, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_domain_join
'''
base_key = r'SYSTEM\CurrentControlSet\Services\Netlogon'
avoid_key = r'{0}\AvoidSpnSet'.format(base_key)
join_key = r'{0}\JoinDomain'.format(base_key)
# If either the avoid_key or join_key is present,
# then there is a reboot pending.
if __utils__['reg.key_exists']('HKLM', avoid_key):
log.debug('Key exists: %s', avoid_key)
return True
else:
log.debug('Key does not exist: %s', avoid_key)
if __utils__['reg.key_exists']('HKLM', join_key):
log.debug('Key exists: %s', join_key)
return True
else:
log.debug('Key does not exist: %s', join_key)
return False
def get_pending_file_rename():
'''
Determine whether there are pending file rename operations that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending file rename operations, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_file_rename
'''
vnames = ('PendingFileRenameOperations', 'PendingFileRenameOperations2')
key = r'SYSTEM\CurrentControlSet\Control\Session Manager'
# If any of the value names exist and have value data set,
# then a reboot is pending.
for vname in vnames:
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
if reg_ret['vdata'] and (reg_ret['vdata'] != '(value not set)'):
return True
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_servermanager():
'''
Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager
'''
vname = 'CurrentRebootAttempts'
key = r'SOFTWARE\Microsoft\ServerManager'
# There are situations where it's possible to have '(value not set)' as
# the value data, and since an actual reboot won't be pending in that
# instance, just catch instances where we try unsuccessfully to cast as int.
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
try:
if int(reg_ret['vdata']) > 0:
return True
except ValueError:
pass
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_update():
'''
Determine whether there are pending updates that require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending updates, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_update
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\RebootRequired'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
MINION_VOLATILE_KEY = r'SYSTEM\CurrentControlSet\Services\salt-minion\Volatile-Data'
REBOOT_REQUIRED_NAME = 'Reboot required'
def set_reboot_required_witnessed():
r'''
This function is used to remember that an event indicating that a reboot is
required was witnessed. This function relies on the salt-minion's ability to
create the following volatile registry key in the *HKLM* hive:
*SYSTEM\\CurrentControlSet\\Services\\salt-minion\\Volatile-Data*
Because this registry key is volatile, it will not persist beyond the
current boot session. Also, in the scope of this key, the name *'Reboot
required'* will be assigned the value of *1*.
For the time being, this function is being used whenever an install
completes with exit code 3010 and can be extended where appropriate in the
future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_reboot_required_witnessed
'''
return __utils__['reg.set_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
volatile=True,
vname=REBOOT_REQUIRED_NAME,
vdata=1,
vtype='REG_DWORD')
def get_reboot_required_witnessed():
'''
Determine if at any time during the current boot session the salt minion
witnessed an event indicating that a reboot is required.
This function will return ``True`` if an install completed with exit
code 3010 during the current boot session and can be extended where
appropriate in the future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the ``Requires reboot`` registry flag is set to ``1``,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_reboot_required_witnessed
'''
value_dict = __utils__['reg.read_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
vname=REBOOT_REQUIRED_NAME)
return value_dict['vdata'] == 1
def get_pending_reboot():
'''
Determine whether there is a reboot pending.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the system is pending reboot, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_reboot
'''
# Order the checks for reboot pending in most to least likely.
checks = (get_pending_update,
get_pending_file_rename,
get_pending_servermanager,
get_pending_component_servicing,
get_reboot_required_witnessed,
get_pending_computer_name,
get_pending_domain_join)
for check in checks:
if check():
return True
return False
|
saltstack/salt
|
salt/modules/win_system.py
|
shutdown_abort
|
python
|
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
|
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L331-L355
| null |
# -*- coding: utf-8 -*-
'''
Module for managing windows systems.
:depends:
- pywintypes
- win32api
- win32con
- win32net
- wmi
Support for reboot, shutdown, etc
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import ctypes
import logging
import time
import platform
from datetime import datetime
# Import salt libs
import salt.utils.functools
import salt.utils.locales
import salt.utils.platform
import salt.utils.winapi
from salt.exceptions import CommandExecutionError
# Import 3rd-party Libs
from salt.ext import six
try:
import wmi
import win32net
import win32api
import win32con
import pywintypes
from ctypes import windll
HAS_WIN32NET_MODS = True
except ImportError:
HAS_WIN32NET_MODS = False
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
Only works on Windows Systems with Win32 Modules
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_system: Requires Windows'
if not HAS_WIN32NET_MODS:
return False, 'Module win_system: Missing win32 modules'
return __virtualname__
def _convert_minutes_seconds(timeout, in_seconds=False):
'''
convert timeout to seconds
'''
return timeout if in_seconds else timeout*60
def _convert_date_time_string(dt_string):
'''
convert string to date time object
'''
dt_string = dt_string.split('.')[0]
dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S')
return dt_obj.strftime('%Y-%m-%d %H:%M:%S')
def _to_unicode(instr):
'''
Converts from current users character encoding to unicode.
When instr has a value of None, the return value of the function
will also be None.
'''
if instr is None or isinstance(instr, six.text_type):
return instr
else:
return six.text_type(instr, 'utf8')
def halt(timeout=5, in_seconds=False):
'''
Halt a running system.
Args:
timeout (int):
Number of seconds before halting the system. Default is 5 seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.halt 5 True
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel): # pylint: disable=unused-argument
'''
Change the system runlevel on sysV compatible systems. Not applicable to
Windows
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
# cmd = ['init', runlevel]
# ret = __salt__['cmd.run'](cmd, python_shell=False)
# return ret
# TODO: Create a mapping of runlevels to # pylint: disable=fixme
# corresponding Windows actions
return 'Not implemented on Windows at this time.'
def poweroff(timeout=5, in_seconds=False):
'''
Power off a running system.
Args:
timeout (int):
Number of seconds before powering off the system. Default is 5
seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.poweroff 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disable=redefined-outer-name
only_on_pending_reboot=False):
'''
Reboot a running system.
Args:
timeout (int):
The number of minutes/seconds before rebooting the system. Use of
minutes or seconds depends on the value of ``in_seconds``. Default
is 5 minutes.
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
wait_for_reboot (bool)
``True`` will sleep for timeout + 30 seconds after reboot has been
initiated. This is useful for use in a highstate. For example, you
may have states that you want to apply only after the reboot.
Default is ``False``.
.. versionadded:: 2015.8.0
only_on_pending_reboot (bool):
If this is set to ``True``, then the reboot will only proceed
if the system reports a pending reboot. Setting this parameter to
``True`` could be useful when calling this function from a final
housekeeping state intended to be executed at the end of a state run
(using *order: last*). Default is ``False``.
Returns:
bool: ``True`` if successful (a reboot will occur), otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.reboot 5
salt '*' system.reboot 5 True
Invoking this function from a final housekeeping state:
.. code-block:: yaml
final_housekeeping:
module.run:
- name: system.reboot
- only_on_pending_reboot: True
- order: last
'''
ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds,
only_on_pending_reboot=only_on_pending_reboot)
if wait_for_reboot:
seconds = _convert_minutes_seconds(timeout, in_seconds)
time.sleep(seconds + 30)
return ret
def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint: disable=redefined-outer-name
in_seconds=False, only_on_pending_reboot=False):
'''
Shutdown a running system.
Args:
message (str):
The message to display to the user before shutting down.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the
user who called the function, the message specified by the lpMessage
parameter, and prompts the user to log off. The dialog box beeps
when it is created and remains on top of other windows (system
modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down immediately without
displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
force_close (bool):
``True`` will force close all open applications. ``False`` will
display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool):
``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown
state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns:
bool:
``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown "System will shutdown in 5 minutes"
'''
if six.PY2:
message = _to_unicode(message)
timeout = _convert_minutes_seconds(timeout, in_seconds)
if only_on_pending_reboot and not get_pending_reboot():
return False
if message and not isinstance(message, six.string_types):
message = message.decode('utf-8')
try:
win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to shutdown the system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def shutdown_hard():
'''
Shutdown a running system with no timeout or warning.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown_hard
'''
return shutdown(timeout=0)
def lock():
'''
Lock the workstation.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.lock
'''
return windll.user32.LockWorkStation()
def set_computer_name(name):
'''
Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
'''
if six.PY2:
name = _to_unicode(name)
if windll.kernel32.SetComputerNameExW(
win32con.ComputerNamePhysicalDnsHostname, name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
pending = __utils__['reg.read_value'](
'HKLM',
r'SYSTEM\CurrentControlSet\Services\Tcpip\Parameters',
'NV Hostname')['vdata']
if pending:
return pending if pending != current else None
return False
def get_computer_name():
'''
Get the Windows computer name
Returns:
str: Returns the computer name if found. Otherwise returns ``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_name
'''
name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)
return name if name else False
def set_computer_desc(desc=None):
'''
Set the Windows computer description
Args:
desc (str):
The computer description
Returns:
str: Description if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
'''
if six.PY2:
desc = _to_unicode(desc)
# Make sure the system exists
# Return an object containing current information array for the computer
system_info = win32net.NetServerGetInfo(None, 101)
# If desc is passed, decode it for unicode
if desc is None:
return False
system_info['comment'] = desc
# Apply new settings
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc.args
log.error('Failed to update system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
return {'Computer Description': get_computer_desc()}
set_computer_description = salt.utils.functools.alias_function(set_computer_desc, 'set_computer_description') # pylint: disable=invalid-name
def get_system_info():
'''
Get system information.
Returns:
dict: Dictionary containing information about the system to include
name, description, version, etc...
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_info
'''
def byte_calc(val):
val = float(val)
if val < 2**10:
return '{0:.3f}B'.format(val)
elif val < 2**20:
return '{0:.3f}KB'.format(val / 2**10)
elif val < 2**30:
return '{0:.3f}MB'.format(val / 2**20)
elif val < 2**40:
return '{0:.3f}GB'.format(val / 2**30)
else:
return '{0:.3f}TB'.format(val / 2**40)
# Lookup dicts for Win32_OperatingSystem
os_type = {1: 'Work Station',
2: 'Domain Controller',
3: 'Server'}
# lookup dicts for Win32_ComputerSystem
domain_role = {0: 'Standalone Workstation',
1: 'Member Workstation',
2: 'Standalone Server',
3: 'Member Server',
4: 'Backup Domain Controller',
5: 'Primary Domain Controller'}
warning_states = {1: 'Other',
2: 'Unknown',
3: 'Safe',
4: 'Warning',
5: 'Critical',
6: 'Non-recoverable'}
pc_system_types = {0: 'Unspecified',
1: 'Desktop',
2: 'Mobile',
3: 'Workstation',
4: 'Enterprise Server',
5: 'SOHO Server',
6: 'Appliance PC',
7: 'Performance Server',
8: 'Maximum'}
# Connect to WMI
with salt.utils.winapi.Com():
conn = wmi.WMI()
system = conn.Win32_OperatingSystem()[0]
ret = {'name': get_computer_name(),
'description': system.Description,
'install_date': system.InstallDate,
'last_boot': system.LastBootUpTime,
'os_manufacturer': system.Manufacturer,
'os_name': system.Caption,
'users': system.NumberOfUsers,
'organization': system.Organization,
'os_architecture': system.OSArchitecture,
'primary': system.Primary,
'os_type': os_type[system.ProductType],
'registered_user': system.RegisteredUser,
'system_directory': system.SystemDirectory,
'system_drive': system.SystemDrive,
'os_version': system.Version,
'windows_directory': system.WindowsDirectory}
system = conn.Win32_ComputerSystem()[0]
# Get pc_system_type depending on Windows version
if platform.release() in ['Vista', '7', '8']:
# Types for Vista, 7, and 8
pc_system_type = pc_system_types[system.PCSystemType]
else:
# New types were added with 8.1 and newer
pc_system_types.update({8: 'Slate', 9: 'Maximum'})
pc_system_type = pc_system_types[system.PCSystemType]
ret.update({
'bootup_state': system.BootupState,
'caption': system.Caption,
'chassis_bootup_state': warning_states[system.ChassisBootupState],
'chassis_sku_number': system.ChassisSKUNumber,
'dns_hostname': system.DNSHostname,
'domain': system.Domain,
'domain_role': domain_role[system.DomainRole],
'hardware_manufacturer': system.Manufacturer,
'hardware_model': system.Model,
'network_server_mode_enabled': system.NetworkServerModeEnabled,
'part_of_domain': system.PartOfDomain,
'pc_system_type': pc_system_type,
'power_state': system.PowerState,
'status': system.Status,
'system_type': system.SystemType,
'total_physical_memory': byte_calc(system.TotalPhysicalMemory),
'total_physical_memory_raw': system.TotalPhysicalMemory,
'thermal_state': warning_states[system.ThermalState],
'workgroup': system.Workgroup
})
# Get processor information
processors = conn.Win32_Processor()
ret['processors'] = 0
ret['processors_logical'] = 0
ret['processor_cores'] = 0
ret['processor_cores_enabled'] = 0
ret['processor_manufacturer'] = processors[0].Manufacturer
ret['processor_max_clock_speed'] = six.text_type(processors[0].MaxClockSpeed) + 'MHz'
for processor in processors:
ret['processors'] += 1
ret['processors_logical'] += processor.NumberOfLogicalProcessors
ret['processor_cores'] += processor.NumberOfCores
ret['processor_cores_enabled'] += processor.NumberOfEnabledCore
bios = conn.Win32_BIOS()[0]
ret.update({'hardware_serial': bios.SerialNumber,
'bios_manufacturer': bios.Manufacturer,
'bios_version': bios.Version,
'bios_details': bios.BIOSVersion,
'bios_caption': bios.Caption,
'bios_description': bios.Description})
ret['install_date'] = _convert_date_time_string(ret['install_date'])
ret['last_boot'] = _convert_date_time_string(ret['last_boot'])
return ret
def get_computer_desc():
'''
Get the Windows computer description
Returns:
str: Returns the computer description if found. Otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_desc
'''
desc = get_system_info()['description']
return False if desc is None else desc
get_computer_description = salt.utils.functools.alias_function(get_computer_desc, 'get_computer_description') # pylint: disable=invalid-name
def get_hostname():
'''
Get the hostname of the windows minion
.. versionadded:: 2016.3.0
Returns:
str: Returns the hostname of the windows minion
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_hostname
'''
cmd = 'hostname'
ret = __salt__['cmd.run'](cmd=cmd)
return ret
def set_hostname(hostname):
'''
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.Rename(Name=hostname)
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``user@domain.tld`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
if six.PY2:
domain = _to_unicode(domain)
username = _to_unicode(username)
password = _to_unicode(password)
account_ou = _to_unicode(account_ou)
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, six.string_types):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
err = _join_domain(domain=domain, username=username, password=password,
account_ou=account_ou, account_exists=account_exists)
if not err:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
raise CommandExecutionError(win32api.FormatMessage(err).rstrip())
def _join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False):
'''
Helper function to join the domain.
Args:
domain (str): The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``user@domain.tld`` or simply ``user``
password (str): Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
Returns:
int:
:param domain:
:param username:
:param password:
:param account_ou:
:param account_exists:
:return:
'''
NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name
NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name
NETSETUP_JOIN_WITH_NEW_NAME = 0x400 # pylint: disable=invalid-name
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
join_options |= NETSETUP_JOIN_WITH_NEW_NAME
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Return the results of the command as an error
# JoinDomainOrWorkgroup returns a strangely formatted value that looks like
# (0,) so return the first item
return comp.JoinDomainOrWorkgroup(
Name=domain, Password=password, UserName=username, AccountOU=account_ou,
FJoinOptions=join_options)[0]
def unjoin_domain(username=None,
password=None,
domain=None,
workgroup='WORKGROUP',
disable=False,
restart=False):
# pylint: disable=anomalous-backslash-in-string
'''
Unjoin a computer from an Active Directory Domain. Requires a restart.
Args:
username (str):
Username of an account which is authorized to manage computer
accounts on the domain. Needs to be a fully qualified name like
``user@domain.tld`` or ``domain.tld\\user``. If the domain is not
specified, the passed domain will be used. If the computer account
doesn't need to be disabled after the computer is unjoined, this can
be ``None``.
password (str):
The password of the specified user
domain (str):
The domain from which to unjoin the computer. Can be ``None``
workgroup (str):
The workgroup to join the computer to. Default is ``WORKGROUP``
.. versionadded:: 2015.8.2/2015.5.7
disable (bool):
``True`` to disable the computer account in Active Directory.
Default is ``False``
restart (bool):
``True`` will restart the computer after successful unjoin. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.unjoin_domain restart=True
salt 'minion-id' system.unjoin_domain username='unjoinuser' \\
password='unjoinpassword' disable=True \\
restart=True
'''
# pylint: enable=anomalous-backslash-in-string
if six.PY2:
username = _to_unicode(username)
password = _to_unicode(password)
domain = _to_unicode(domain)
status = get_domain_workgroup()
if 'Workgroup' in status:
if status['Workgroup'] == workgroup:
return 'Already joined to {0}'.format(workgroup)
if username and '\\' not in username and '@' not in username:
if domain:
username = '{0}@{1}'.format(username, domain)
else:
return 'Must specify domain if not supplied in username'
if username and password is None:
return 'Must specify a password if you pass a username'
NETSETUP_ACCT_DELETE = 0x4 # pylint: disable=invalid-name
unjoin_options = 0x0
if disable:
unjoin_options |= NETSETUP_ACCT_DELETE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
err = comp.UnjoinDomainOrWorkgroup(Password=password,
UserName=username,
FUnjoinOptions=unjoin_options)
# you have to do this because UnjoinDomainOrWorkgroup returns a
# strangely formatted value that looks like (0,)
if not err[0]:
err = comp.JoinDomainOrWorkgroup(Name=workgroup)
if not err[0]:
ret = {'Workgroup': workgroup,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to join the computer to %s', workgroup)
return False
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to unjoin computer from %s', status['Domain'])
return False
def get_domain_workgroup():
'''
Get the domain or workgroup the computer belongs to.
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
Returns:
str: The name of the domain or workgroup
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_domain_workgroup
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
for computer in conn.Win32_ComputerSystem():
if computer.PartOfDomain:
return {'Domain': computer.Domain}
else:
return {'Workgroup': computer.Workgroup}
def set_domain_workgroup(workgroup):
'''
Set the domain or workgroup the computer belongs to.
.. versionadded:: 2019.2.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_domain_workgroup LOCAL
'''
if six.PY2:
workgroup = _to_unicode(workgroup)
# Initialize COM
with salt.utils.winapi.Com():
# Grab the first Win32_ComputerSystem object from wmi
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Now we can join the new workgroup
res = comp.JoinDomainOrWorkgroup(Name=workgroup.upper())
return True if not res[0] else False
def _try_parse_datetime(time_str, fmts):
'''
A helper function that attempts to parse the input time_str as a date.
Args:
time_str (str): A string representing the time
fmts (list): A list of date format strings
Returns:
datetime: Returns a datetime object if parsed properly, otherwise None
'''
result = None
for fmt in fmts:
try:
result = datetime.strptime(time_str, fmt)
break
except ValueError:
pass
return result
def get_system_time():
'''
Get the system time.
Returns:
str: Returns the system time in HH:MM:SS AM/PM format.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_time
'''
now = win32api.GetLocalTime()
meridian = 'AM'
hours = int(now[4])
if hours == 12:
meridian = 'PM'
elif hours == 0:
hours = 12
elif hours > 12:
hours = hours - 12
meridian = 'PM'
return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)
def set_system_time(newtime):
'''
Set the system time.
Args:
newtime (str):
The time to set. Can be any of the following formats:
- HH:MM:SS AM/PM
- HH:MM AM/PM
- HH:MM:SS (24 hour)
- HH:MM (24 hour)
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_system_time 12:01
'''
# Get date/time object from newtime
fmts = ['%I:%M:%S %p', '%I:%M %p', '%H:%M:%S', '%H:%M']
dt_obj = _try_parse_datetime(newtime, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(hours=dt_obj.hour,
minutes=dt_obj.minute,
seconds=dt_obj.second)
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc.args
log.error('Failed to get local time')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
# Check for passed values. If not passed, use current values
if years is None:
years = date_time[0]
if months is None:
months = date_time[1]
if days is None:
days = date_time[3]
if hours is None:
hours = date_time[4]
if minutes is None:
minutes = date_time[5]
if seconds is None:
seconds = date_time[6]
try:
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', ctypes.c_int16),
('wMonth', ctypes.c_int16),
('wDayOfWeek', ctypes.c_int16),
('wDay', ctypes.c_int16),
('wHour', ctypes.c_int16),
('wMinute', ctypes.c_int16),
('wSecond', ctypes.c_int16),
('wMilliseconds', ctypes.c_int16)]
system_time = SYSTEMTIME()
system_time.wYear = int(years)
system_time.wMonth = int(months)
system_time.wDay = int(days)
system_time.wHour = int(hours)
system_time.wMinute = int(minutes)
system_time.wSecond = int(seconds)
system_time_ptr = ctypes.pointer(system_time)
succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr)
if succeeded is not 0:
return True
else:
log.error('Failed to set local time')
raise CommandExecutionError(
win32api.FormatMessage(succeeded).rstrip())
except OSError as err:
log.error('Failed to set local time')
raise CommandExecutionError(err)
def get_system_date():
'''
Get the Windows system date
Returns:
str: Returns the system date
CLI Example:
.. code-block:: bash
salt '*' system.get_system_date
'''
now = win32api.GetLocalTime()
return '{0:02d}/{1:02d}/{2:04d}'.format(now[1], now[3], now[0])
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day)
def start_time_service():
'''
Start the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.start_time_service
'''
return __salt__['service.start']('w32time')
def stop_time_service():
'''
Stop the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.stop_time_service
'''
return __salt__['service.stop']('w32time')
def get_pending_component_servicing():
'''
Determine whether there are pending Component Based Servicing tasks that
require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Component Based Servicing tasks,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_component_servicing
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing\RebootPending'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
def get_pending_domain_join():
'''
Determine whether there is a pending domain join action that requires a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there is a pending domain join action, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_domain_join
'''
base_key = r'SYSTEM\CurrentControlSet\Services\Netlogon'
avoid_key = r'{0}\AvoidSpnSet'.format(base_key)
join_key = r'{0}\JoinDomain'.format(base_key)
# If either the avoid_key or join_key is present,
# then there is a reboot pending.
if __utils__['reg.key_exists']('HKLM', avoid_key):
log.debug('Key exists: %s', avoid_key)
return True
else:
log.debug('Key does not exist: %s', avoid_key)
if __utils__['reg.key_exists']('HKLM', join_key):
log.debug('Key exists: %s', join_key)
return True
else:
log.debug('Key does not exist: %s', join_key)
return False
def get_pending_file_rename():
'''
Determine whether there are pending file rename operations that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending file rename operations, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_file_rename
'''
vnames = ('PendingFileRenameOperations', 'PendingFileRenameOperations2')
key = r'SYSTEM\CurrentControlSet\Control\Session Manager'
# If any of the value names exist and have value data set,
# then a reboot is pending.
for vname in vnames:
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
if reg_ret['vdata'] and (reg_ret['vdata'] != '(value not set)'):
return True
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_servermanager():
'''
Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager
'''
vname = 'CurrentRebootAttempts'
key = r'SOFTWARE\Microsoft\ServerManager'
# There are situations where it's possible to have '(value not set)' as
# the value data, and since an actual reboot won't be pending in that
# instance, just catch instances where we try unsuccessfully to cast as int.
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
try:
if int(reg_ret['vdata']) > 0:
return True
except ValueError:
pass
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_update():
'''
Determine whether there are pending updates that require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending updates, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_update
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\RebootRequired'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
MINION_VOLATILE_KEY = r'SYSTEM\CurrentControlSet\Services\salt-minion\Volatile-Data'
REBOOT_REQUIRED_NAME = 'Reboot required'
def set_reboot_required_witnessed():
r'''
This function is used to remember that an event indicating that a reboot is
required was witnessed. This function relies on the salt-minion's ability to
create the following volatile registry key in the *HKLM* hive:
*SYSTEM\\CurrentControlSet\\Services\\salt-minion\\Volatile-Data*
Because this registry key is volatile, it will not persist beyond the
current boot session. Also, in the scope of this key, the name *'Reboot
required'* will be assigned the value of *1*.
For the time being, this function is being used whenever an install
completes with exit code 3010 and can be extended where appropriate in the
future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_reboot_required_witnessed
'''
return __utils__['reg.set_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
volatile=True,
vname=REBOOT_REQUIRED_NAME,
vdata=1,
vtype='REG_DWORD')
def get_reboot_required_witnessed():
'''
Determine if at any time during the current boot session the salt minion
witnessed an event indicating that a reboot is required.
This function will return ``True`` if an install completed with exit
code 3010 during the current boot session and can be extended where
appropriate in the future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the ``Requires reboot`` registry flag is set to ``1``,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_reboot_required_witnessed
'''
value_dict = __utils__['reg.read_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
vname=REBOOT_REQUIRED_NAME)
return value_dict['vdata'] == 1
def get_pending_reboot():
'''
Determine whether there is a reboot pending.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the system is pending reboot, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_reboot
'''
# Order the checks for reboot pending in most to least likely.
checks = (get_pending_update,
get_pending_file_rename,
get_pending_servermanager,
get_pending_component_servicing,
get_reboot_required_witnessed,
get_pending_computer_name,
get_pending_domain_join)
for check in checks:
if check():
return True
return False
|
saltstack/salt
|
salt/modules/win_system.py
|
set_computer_name
|
python
|
def set_computer_name(name):
'''
Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
'''
if six.PY2:
name = _to_unicode(name)
if windll.kernel32.SetComputerNameExW(
win32con.ComputerNamePhysicalDnsHostname, name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
|
Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L374-L405
|
[
"def get_computer_name():\n '''\n Get the Windows computer name\n\n Returns:\n str: Returns the computer name if found. Otherwise returns ``False``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'minion-id' system.get_computer_name\n '''\n name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)\n return name if name else False\n",
"def _to_unicode(instr):\n '''\n Converts from current users character encoding to unicode.\n When instr has a value of None, the return value of the function\n will also be None.\n '''\n if instr is None or isinstance(instr, six.text_type):\n return instr\n else:\n return six.text_type(instr, 'utf8')\n",
"def get_pending_computer_name():\n '''\n Get a pending computer name. If the computer name has been changed, and the\n change is pending a system reboot, this function will return the pending\n computer name. Otherwise, ``None`` will be returned. If there was an error\n retrieving the pending computer name, ``False`` will be returned, and an\n error message will be logged to the minion log.\n\n Returns:\n str:\n Returns the pending name if pending restart. Returns ``None`` if not\n pending restart.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'minion-id' system.get_pending_computer_name\n '''\n current = get_computer_name()\n pending = __utils__['reg.read_value'](\n 'HKLM',\n r'SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters',\n 'NV Hostname')['vdata']\n if pending:\n return pending if pending != current else None\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Module for managing windows systems.
:depends:
- pywintypes
- win32api
- win32con
- win32net
- wmi
Support for reboot, shutdown, etc
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import ctypes
import logging
import time
import platform
from datetime import datetime
# Import salt libs
import salt.utils.functools
import salt.utils.locales
import salt.utils.platform
import salt.utils.winapi
from salt.exceptions import CommandExecutionError
# Import 3rd-party Libs
from salt.ext import six
try:
import wmi
import win32net
import win32api
import win32con
import pywintypes
from ctypes import windll
HAS_WIN32NET_MODS = True
except ImportError:
HAS_WIN32NET_MODS = False
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
Only works on Windows Systems with Win32 Modules
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_system: Requires Windows'
if not HAS_WIN32NET_MODS:
return False, 'Module win_system: Missing win32 modules'
return __virtualname__
def _convert_minutes_seconds(timeout, in_seconds=False):
'''
convert timeout to seconds
'''
return timeout if in_seconds else timeout*60
def _convert_date_time_string(dt_string):
'''
convert string to date time object
'''
dt_string = dt_string.split('.')[0]
dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S')
return dt_obj.strftime('%Y-%m-%d %H:%M:%S')
def _to_unicode(instr):
'''
Converts from current users character encoding to unicode.
When instr has a value of None, the return value of the function
will also be None.
'''
if instr is None or isinstance(instr, six.text_type):
return instr
else:
return six.text_type(instr, 'utf8')
def halt(timeout=5, in_seconds=False):
'''
Halt a running system.
Args:
timeout (int):
Number of seconds before halting the system. Default is 5 seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.halt 5 True
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel): # pylint: disable=unused-argument
'''
Change the system runlevel on sysV compatible systems. Not applicable to
Windows
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
# cmd = ['init', runlevel]
# ret = __salt__['cmd.run'](cmd, python_shell=False)
# return ret
# TODO: Create a mapping of runlevels to # pylint: disable=fixme
# corresponding Windows actions
return 'Not implemented on Windows at this time.'
def poweroff(timeout=5, in_seconds=False):
'''
Power off a running system.
Args:
timeout (int):
Number of seconds before powering off the system. Default is 5
seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.poweroff 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disable=redefined-outer-name
only_on_pending_reboot=False):
'''
Reboot a running system.
Args:
timeout (int):
The number of minutes/seconds before rebooting the system. Use of
minutes or seconds depends on the value of ``in_seconds``. Default
is 5 minutes.
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
wait_for_reboot (bool)
``True`` will sleep for timeout + 30 seconds after reboot has been
initiated. This is useful for use in a highstate. For example, you
may have states that you want to apply only after the reboot.
Default is ``False``.
.. versionadded:: 2015.8.0
only_on_pending_reboot (bool):
If this is set to ``True``, then the reboot will only proceed
if the system reports a pending reboot. Setting this parameter to
``True`` could be useful when calling this function from a final
housekeeping state intended to be executed at the end of a state run
(using *order: last*). Default is ``False``.
Returns:
bool: ``True`` if successful (a reboot will occur), otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.reboot 5
salt '*' system.reboot 5 True
Invoking this function from a final housekeeping state:
.. code-block:: yaml
final_housekeeping:
module.run:
- name: system.reboot
- only_on_pending_reboot: True
- order: last
'''
ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds,
only_on_pending_reboot=only_on_pending_reboot)
if wait_for_reboot:
seconds = _convert_minutes_seconds(timeout, in_seconds)
time.sleep(seconds + 30)
return ret
def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint: disable=redefined-outer-name
in_seconds=False, only_on_pending_reboot=False):
'''
Shutdown a running system.
Args:
message (str):
The message to display to the user before shutting down.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the
user who called the function, the message specified by the lpMessage
parameter, and prompts the user to log off. The dialog box beeps
when it is created and remains on top of other windows (system
modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down immediately without
displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
force_close (bool):
``True`` will force close all open applications. ``False`` will
display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool):
``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown
state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns:
bool:
``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown "System will shutdown in 5 minutes"
'''
if six.PY2:
message = _to_unicode(message)
timeout = _convert_minutes_seconds(timeout, in_seconds)
if only_on_pending_reboot and not get_pending_reboot():
return False
if message and not isinstance(message, six.string_types):
message = message.decode('utf-8')
try:
win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to shutdown the system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def shutdown_hard():
'''
Shutdown a running system with no timeout or warning.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown_hard
'''
return shutdown(timeout=0)
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def lock():
'''
Lock the workstation.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.lock
'''
return windll.user32.LockWorkStation()
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
pending = __utils__['reg.read_value'](
'HKLM',
r'SYSTEM\CurrentControlSet\Services\Tcpip\Parameters',
'NV Hostname')['vdata']
if pending:
return pending if pending != current else None
return False
def get_computer_name():
'''
Get the Windows computer name
Returns:
str: Returns the computer name if found. Otherwise returns ``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_name
'''
name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)
return name if name else False
def set_computer_desc(desc=None):
'''
Set the Windows computer description
Args:
desc (str):
The computer description
Returns:
str: Description if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
'''
if six.PY2:
desc = _to_unicode(desc)
# Make sure the system exists
# Return an object containing current information array for the computer
system_info = win32net.NetServerGetInfo(None, 101)
# If desc is passed, decode it for unicode
if desc is None:
return False
system_info['comment'] = desc
# Apply new settings
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc.args
log.error('Failed to update system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
return {'Computer Description': get_computer_desc()}
set_computer_description = salt.utils.functools.alias_function(set_computer_desc, 'set_computer_description') # pylint: disable=invalid-name
def get_system_info():
'''
Get system information.
Returns:
dict: Dictionary containing information about the system to include
name, description, version, etc...
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_info
'''
def byte_calc(val):
val = float(val)
if val < 2**10:
return '{0:.3f}B'.format(val)
elif val < 2**20:
return '{0:.3f}KB'.format(val / 2**10)
elif val < 2**30:
return '{0:.3f}MB'.format(val / 2**20)
elif val < 2**40:
return '{0:.3f}GB'.format(val / 2**30)
else:
return '{0:.3f}TB'.format(val / 2**40)
# Lookup dicts for Win32_OperatingSystem
os_type = {1: 'Work Station',
2: 'Domain Controller',
3: 'Server'}
# lookup dicts for Win32_ComputerSystem
domain_role = {0: 'Standalone Workstation',
1: 'Member Workstation',
2: 'Standalone Server',
3: 'Member Server',
4: 'Backup Domain Controller',
5: 'Primary Domain Controller'}
warning_states = {1: 'Other',
2: 'Unknown',
3: 'Safe',
4: 'Warning',
5: 'Critical',
6: 'Non-recoverable'}
pc_system_types = {0: 'Unspecified',
1: 'Desktop',
2: 'Mobile',
3: 'Workstation',
4: 'Enterprise Server',
5: 'SOHO Server',
6: 'Appliance PC',
7: 'Performance Server',
8: 'Maximum'}
# Connect to WMI
with salt.utils.winapi.Com():
conn = wmi.WMI()
system = conn.Win32_OperatingSystem()[0]
ret = {'name': get_computer_name(),
'description': system.Description,
'install_date': system.InstallDate,
'last_boot': system.LastBootUpTime,
'os_manufacturer': system.Manufacturer,
'os_name': system.Caption,
'users': system.NumberOfUsers,
'organization': system.Organization,
'os_architecture': system.OSArchitecture,
'primary': system.Primary,
'os_type': os_type[system.ProductType],
'registered_user': system.RegisteredUser,
'system_directory': system.SystemDirectory,
'system_drive': system.SystemDrive,
'os_version': system.Version,
'windows_directory': system.WindowsDirectory}
system = conn.Win32_ComputerSystem()[0]
# Get pc_system_type depending on Windows version
if platform.release() in ['Vista', '7', '8']:
# Types for Vista, 7, and 8
pc_system_type = pc_system_types[system.PCSystemType]
else:
# New types were added with 8.1 and newer
pc_system_types.update({8: 'Slate', 9: 'Maximum'})
pc_system_type = pc_system_types[system.PCSystemType]
ret.update({
'bootup_state': system.BootupState,
'caption': system.Caption,
'chassis_bootup_state': warning_states[system.ChassisBootupState],
'chassis_sku_number': system.ChassisSKUNumber,
'dns_hostname': system.DNSHostname,
'domain': system.Domain,
'domain_role': domain_role[system.DomainRole],
'hardware_manufacturer': system.Manufacturer,
'hardware_model': system.Model,
'network_server_mode_enabled': system.NetworkServerModeEnabled,
'part_of_domain': system.PartOfDomain,
'pc_system_type': pc_system_type,
'power_state': system.PowerState,
'status': system.Status,
'system_type': system.SystemType,
'total_physical_memory': byte_calc(system.TotalPhysicalMemory),
'total_physical_memory_raw': system.TotalPhysicalMemory,
'thermal_state': warning_states[system.ThermalState],
'workgroup': system.Workgroup
})
# Get processor information
processors = conn.Win32_Processor()
ret['processors'] = 0
ret['processors_logical'] = 0
ret['processor_cores'] = 0
ret['processor_cores_enabled'] = 0
ret['processor_manufacturer'] = processors[0].Manufacturer
ret['processor_max_clock_speed'] = six.text_type(processors[0].MaxClockSpeed) + 'MHz'
for processor in processors:
ret['processors'] += 1
ret['processors_logical'] += processor.NumberOfLogicalProcessors
ret['processor_cores'] += processor.NumberOfCores
ret['processor_cores_enabled'] += processor.NumberOfEnabledCore
bios = conn.Win32_BIOS()[0]
ret.update({'hardware_serial': bios.SerialNumber,
'bios_manufacturer': bios.Manufacturer,
'bios_version': bios.Version,
'bios_details': bios.BIOSVersion,
'bios_caption': bios.Caption,
'bios_description': bios.Description})
ret['install_date'] = _convert_date_time_string(ret['install_date'])
ret['last_boot'] = _convert_date_time_string(ret['last_boot'])
return ret
def get_computer_desc():
'''
Get the Windows computer description
Returns:
str: Returns the computer description if found. Otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_desc
'''
desc = get_system_info()['description']
return False if desc is None else desc
get_computer_description = salt.utils.functools.alias_function(get_computer_desc, 'get_computer_description') # pylint: disable=invalid-name
def get_hostname():
'''
Get the hostname of the windows minion
.. versionadded:: 2016.3.0
Returns:
str: Returns the hostname of the windows minion
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_hostname
'''
cmd = 'hostname'
ret = __salt__['cmd.run'](cmd=cmd)
return ret
def set_hostname(hostname):
'''
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.Rename(Name=hostname)
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``user@domain.tld`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
if six.PY2:
domain = _to_unicode(domain)
username = _to_unicode(username)
password = _to_unicode(password)
account_ou = _to_unicode(account_ou)
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, six.string_types):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
err = _join_domain(domain=domain, username=username, password=password,
account_ou=account_ou, account_exists=account_exists)
if not err:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
raise CommandExecutionError(win32api.FormatMessage(err).rstrip())
def _join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False):
'''
Helper function to join the domain.
Args:
domain (str): The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``user@domain.tld`` or simply ``user``
password (str): Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
Returns:
int:
:param domain:
:param username:
:param password:
:param account_ou:
:param account_exists:
:return:
'''
NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name
NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name
NETSETUP_JOIN_WITH_NEW_NAME = 0x400 # pylint: disable=invalid-name
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
join_options |= NETSETUP_JOIN_WITH_NEW_NAME
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Return the results of the command as an error
# JoinDomainOrWorkgroup returns a strangely formatted value that looks like
# (0,) so return the first item
return comp.JoinDomainOrWorkgroup(
Name=domain, Password=password, UserName=username, AccountOU=account_ou,
FJoinOptions=join_options)[0]
def unjoin_domain(username=None,
password=None,
domain=None,
workgroup='WORKGROUP',
disable=False,
restart=False):
# pylint: disable=anomalous-backslash-in-string
'''
Unjoin a computer from an Active Directory Domain. Requires a restart.
Args:
username (str):
Username of an account which is authorized to manage computer
accounts on the domain. Needs to be a fully qualified name like
``user@domain.tld`` or ``domain.tld\\user``. If the domain is not
specified, the passed domain will be used. If the computer account
doesn't need to be disabled after the computer is unjoined, this can
be ``None``.
password (str):
The password of the specified user
domain (str):
The domain from which to unjoin the computer. Can be ``None``
workgroup (str):
The workgroup to join the computer to. Default is ``WORKGROUP``
.. versionadded:: 2015.8.2/2015.5.7
disable (bool):
``True`` to disable the computer account in Active Directory.
Default is ``False``
restart (bool):
``True`` will restart the computer after successful unjoin. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.unjoin_domain restart=True
salt 'minion-id' system.unjoin_domain username='unjoinuser' \\
password='unjoinpassword' disable=True \\
restart=True
'''
# pylint: enable=anomalous-backslash-in-string
if six.PY2:
username = _to_unicode(username)
password = _to_unicode(password)
domain = _to_unicode(domain)
status = get_domain_workgroup()
if 'Workgroup' in status:
if status['Workgroup'] == workgroup:
return 'Already joined to {0}'.format(workgroup)
if username and '\\' not in username and '@' not in username:
if domain:
username = '{0}@{1}'.format(username, domain)
else:
return 'Must specify domain if not supplied in username'
if username and password is None:
return 'Must specify a password if you pass a username'
NETSETUP_ACCT_DELETE = 0x4 # pylint: disable=invalid-name
unjoin_options = 0x0
if disable:
unjoin_options |= NETSETUP_ACCT_DELETE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
err = comp.UnjoinDomainOrWorkgroup(Password=password,
UserName=username,
FUnjoinOptions=unjoin_options)
# you have to do this because UnjoinDomainOrWorkgroup returns a
# strangely formatted value that looks like (0,)
if not err[0]:
err = comp.JoinDomainOrWorkgroup(Name=workgroup)
if not err[0]:
ret = {'Workgroup': workgroup,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to join the computer to %s', workgroup)
return False
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to unjoin computer from %s', status['Domain'])
return False
def get_domain_workgroup():
'''
Get the domain or workgroup the computer belongs to.
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
Returns:
str: The name of the domain or workgroup
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_domain_workgroup
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
for computer in conn.Win32_ComputerSystem():
if computer.PartOfDomain:
return {'Domain': computer.Domain}
else:
return {'Workgroup': computer.Workgroup}
def set_domain_workgroup(workgroup):
'''
Set the domain or workgroup the computer belongs to.
.. versionadded:: 2019.2.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_domain_workgroup LOCAL
'''
if six.PY2:
workgroup = _to_unicode(workgroup)
# Initialize COM
with salt.utils.winapi.Com():
# Grab the first Win32_ComputerSystem object from wmi
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Now we can join the new workgroup
res = comp.JoinDomainOrWorkgroup(Name=workgroup.upper())
return True if not res[0] else False
def _try_parse_datetime(time_str, fmts):
'''
A helper function that attempts to parse the input time_str as a date.
Args:
time_str (str): A string representing the time
fmts (list): A list of date format strings
Returns:
datetime: Returns a datetime object if parsed properly, otherwise None
'''
result = None
for fmt in fmts:
try:
result = datetime.strptime(time_str, fmt)
break
except ValueError:
pass
return result
def get_system_time():
'''
Get the system time.
Returns:
str: Returns the system time in HH:MM:SS AM/PM format.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_time
'''
now = win32api.GetLocalTime()
meridian = 'AM'
hours = int(now[4])
if hours == 12:
meridian = 'PM'
elif hours == 0:
hours = 12
elif hours > 12:
hours = hours - 12
meridian = 'PM'
return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)
def set_system_time(newtime):
'''
Set the system time.
Args:
newtime (str):
The time to set. Can be any of the following formats:
- HH:MM:SS AM/PM
- HH:MM AM/PM
- HH:MM:SS (24 hour)
- HH:MM (24 hour)
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_system_time 12:01
'''
# Get date/time object from newtime
fmts = ['%I:%M:%S %p', '%I:%M %p', '%H:%M:%S', '%H:%M']
dt_obj = _try_parse_datetime(newtime, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(hours=dt_obj.hour,
minutes=dt_obj.minute,
seconds=dt_obj.second)
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc.args
log.error('Failed to get local time')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
# Check for passed values. If not passed, use current values
if years is None:
years = date_time[0]
if months is None:
months = date_time[1]
if days is None:
days = date_time[3]
if hours is None:
hours = date_time[4]
if minutes is None:
minutes = date_time[5]
if seconds is None:
seconds = date_time[6]
try:
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', ctypes.c_int16),
('wMonth', ctypes.c_int16),
('wDayOfWeek', ctypes.c_int16),
('wDay', ctypes.c_int16),
('wHour', ctypes.c_int16),
('wMinute', ctypes.c_int16),
('wSecond', ctypes.c_int16),
('wMilliseconds', ctypes.c_int16)]
system_time = SYSTEMTIME()
system_time.wYear = int(years)
system_time.wMonth = int(months)
system_time.wDay = int(days)
system_time.wHour = int(hours)
system_time.wMinute = int(minutes)
system_time.wSecond = int(seconds)
system_time_ptr = ctypes.pointer(system_time)
succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr)
if succeeded is not 0:
return True
else:
log.error('Failed to set local time')
raise CommandExecutionError(
win32api.FormatMessage(succeeded).rstrip())
except OSError as err:
log.error('Failed to set local time')
raise CommandExecutionError(err)
def get_system_date():
'''
Get the Windows system date
Returns:
str: Returns the system date
CLI Example:
.. code-block:: bash
salt '*' system.get_system_date
'''
now = win32api.GetLocalTime()
return '{0:02d}/{1:02d}/{2:04d}'.format(now[1], now[3], now[0])
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day)
def start_time_service():
'''
Start the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.start_time_service
'''
return __salt__['service.start']('w32time')
def stop_time_service():
'''
Stop the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.stop_time_service
'''
return __salt__['service.stop']('w32time')
def get_pending_component_servicing():
'''
Determine whether there are pending Component Based Servicing tasks that
require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Component Based Servicing tasks,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_component_servicing
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing\RebootPending'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
def get_pending_domain_join():
'''
Determine whether there is a pending domain join action that requires a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there is a pending domain join action, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_domain_join
'''
base_key = r'SYSTEM\CurrentControlSet\Services\Netlogon'
avoid_key = r'{0}\AvoidSpnSet'.format(base_key)
join_key = r'{0}\JoinDomain'.format(base_key)
# If either the avoid_key or join_key is present,
# then there is a reboot pending.
if __utils__['reg.key_exists']('HKLM', avoid_key):
log.debug('Key exists: %s', avoid_key)
return True
else:
log.debug('Key does not exist: %s', avoid_key)
if __utils__['reg.key_exists']('HKLM', join_key):
log.debug('Key exists: %s', join_key)
return True
else:
log.debug('Key does not exist: %s', join_key)
return False
def get_pending_file_rename():
'''
Determine whether there are pending file rename operations that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending file rename operations, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_file_rename
'''
vnames = ('PendingFileRenameOperations', 'PendingFileRenameOperations2')
key = r'SYSTEM\CurrentControlSet\Control\Session Manager'
# If any of the value names exist and have value data set,
# then a reboot is pending.
for vname in vnames:
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
if reg_ret['vdata'] and (reg_ret['vdata'] != '(value not set)'):
return True
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_servermanager():
'''
Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager
'''
vname = 'CurrentRebootAttempts'
key = r'SOFTWARE\Microsoft\ServerManager'
# There are situations where it's possible to have '(value not set)' as
# the value data, and since an actual reboot won't be pending in that
# instance, just catch instances where we try unsuccessfully to cast as int.
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
try:
if int(reg_ret['vdata']) > 0:
return True
except ValueError:
pass
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_update():
'''
Determine whether there are pending updates that require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending updates, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_update
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\RebootRequired'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
MINION_VOLATILE_KEY = r'SYSTEM\CurrentControlSet\Services\salt-minion\Volatile-Data'
REBOOT_REQUIRED_NAME = 'Reboot required'
def set_reboot_required_witnessed():
r'''
This function is used to remember that an event indicating that a reboot is
required was witnessed. This function relies on the salt-minion's ability to
create the following volatile registry key in the *HKLM* hive:
*SYSTEM\\CurrentControlSet\\Services\\salt-minion\\Volatile-Data*
Because this registry key is volatile, it will not persist beyond the
current boot session. Also, in the scope of this key, the name *'Reboot
required'* will be assigned the value of *1*.
For the time being, this function is being used whenever an install
completes with exit code 3010 and can be extended where appropriate in the
future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_reboot_required_witnessed
'''
return __utils__['reg.set_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
volatile=True,
vname=REBOOT_REQUIRED_NAME,
vdata=1,
vtype='REG_DWORD')
def get_reboot_required_witnessed():
'''
Determine if at any time during the current boot session the salt minion
witnessed an event indicating that a reboot is required.
This function will return ``True`` if an install completed with exit
code 3010 during the current boot session and can be extended where
appropriate in the future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the ``Requires reboot`` registry flag is set to ``1``,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_reboot_required_witnessed
'''
value_dict = __utils__['reg.read_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
vname=REBOOT_REQUIRED_NAME)
return value_dict['vdata'] == 1
def get_pending_reboot():
'''
Determine whether there is a reboot pending.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the system is pending reboot, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_reboot
'''
# Order the checks for reboot pending in most to least likely.
checks = (get_pending_update,
get_pending_file_rename,
get_pending_servermanager,
get_pending_component_servicing,
get_reboot_required_witnessed,
get_pending_computer_name,
get_pending_domain_join)
for check in checks:
if check():
return True
return False
|
saltstack/salt
|
salt/modules/win_system.py
|
get_pending_computer_name
|
python
|
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
pending = __utils__['reg.read_value'](
'HKLM',
r'SYSTEM\CurrentControlSet\Services\Tcpip\Parameters',
'NV Hostname')['vdata']
if pending:
return pending if pending != current else None
return False
|
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L408-L434
|
[
"def get_computer_name():\n '''\n Get the Windows computer name\n\n Returns:\n str: Returns the computer name if found. Otherwise returns ``False``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'minion-id' system.get_computer_name\n '''\n name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)\n return name if name else False\n"
] |
# -*- coding: utf-8 -*-
'''
Module for managing windows systems.
:depends:
- pywintypes
- win32api
- win32con
- win32net
- wmi
Support for reboot, shutdown, etc
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import ctypes
import logging
import time
import platform
from datetime import datetime
# Import salt libs
import salt.utils.functools
import salt.utils.locales
import salt.utils.platform
import salt.utils.winapi
from salt.exceptions import CommandExecutionError
# Import 3rd-party Libs
from salt.ext import six
try:
import wmi
import win32net
import win32api
import win32con
import pywintypes
from ctypes import windll
HAS_WIN32NET_MODS = True
except ImportError:
HAS_WIN32NET_MODS = False
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
Only works on Windows Systems with Win32 Modules
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_system: Requires Windows'
if not HAS_WIN32NET_MODS:
return False, 'Module win_system: Missing win32 modules'
return __virtualname__
def _convert_minutes_seconds(timeout, in_seconds=False):
'''
convert timeout to seconds
'''
return timeout if in_seconds else timeout*60
def _convert_date_time_string(dt_string):
'''
convert string to date time object
'''
dt_string = dt_string.split('.')[0]
dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S')
return dt_obj.strftime('%Y-%m-%d %H:%M:%S')
def _to_unicode(instr):
'''
Converts from current users character encoding to unicode.
When instr has a value of None, the return value of the function
will also be None.
'''
if instr is None or isinstance(instr, six.text_type):
return instr
else:
return six.text_type(instr, 'utf8')
def halt(timeout=5, in_seconds=False):
'''
Halt a running system.
Args:
timeout (int):
Number of seconds before halting the system. Default is 5 seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.halt 5 True
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel): # pylint: disable=unused-argument
'''
Change the system runlevel on sysV compatible systems. Not applicable to
Windows
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
# cmd = ['init', runlevel]
# ret = __salt__['cmd.run'](cmd, python_shell=False)
# return ret
# TODO: Create a mapping of runlevels to # pylint: disable=fixme
# corresponding Windows actions
return 'Not implemented on Windows at this time.'
def poweroff(timeout=5, in_seconds=False):
'''
Power off a running system.
Args:
timeout (int):
Number of seconds before powering off the system. Default is 5
seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.poweroff 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disable=redefined-outer-name
only_on_pending_reboot=False):
'''
Reboot a running system.
Args:
timeout (int):
The number of minutes/seconds before rebooting the system. Use of
minutes or seconds depends on the value of ``in_seconds``. Default
is 5 minutes.
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
wait_for_reboot (bool)
``True`` will sleep for timeout + 30 seconds after reboot has been
initiated. This is useful for use in a highstate. For example, you
may have states that you want to apply only after the reboot.
Default is ``False``.
.. versionadded:: 2015.8.0
only_on_pending_reboot (bool):
If this is set to ``True``, then the reboot will only proceed
if the system reports a pending reboot. Setting this parameter to
``True`` could be useful when calling this function from a final
housekeeping state intended to be executed at the end of a state run
(using *order: last*). Default is ``False``.
Returns:
bool: ``True`` if successful (a reboot will occur), otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.reboot 5
salt '*' system.reboot 5 True
Invoking this function from a final housekeeping state:
.. code-block:: yaml
final_housekeeping:
module.run:
- name: system.reboot
- only_on_pending_reboot: True
- order: last
'''
ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds,
only_on_pending_reboot=only_on_pending_reboot)
if wait_for_reboot:
seconds = _convert_minutes_seconds(timeout, in_seconds)
time.sleep(seconds + 30)
return ret
def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint: disable=redefined-outer-name
in_seconds=False, only_on_pending_reboot=False):
'''
Shutdown a running system.
Args:
message (str):
The message to display to the user before shutting down.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the
user who called the function, the message specified by the lpMessage
parameter, and prompts the user to log off. The dialog box beeps
when it is created and remains on top of other windows (system
modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down immediately without
displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
force_close (bool):
``True`` will force close all open applications. ``False`` will
display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool):
``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown
state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns:
bool:
``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown "System will shutdown in 5 minutes"
'''
if six.PY2:
message = _to_unicode(message)
timeout = _convert_minutes_seconds(timeout, in_seconds)
if only_on_pending_reboot and not get_pending_reboot():
return False
if message and not isinstance(message, six.string_types):
message = message.decode('utf-8')
try:
win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to shutdown the system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def shutdown_hard():
'''
Shutdown a running system with no timeout or warning.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown_hard
'''
return shutdown(timeout=0)
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def lock():
'''
Lock the workstation.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.lock
'''
return windll.user32.LockWorkStation()
def set_computer_name(name):
'''
Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
'''
if six.PY2:
name = _to_unicode(name)
if windll.kernel32.SetComputerNameExW(
win32con.ComputerNamePhysicalDnsHostname, name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
def get_computer_name():
'''
Get the Windows computer name
Returns:
str: Returns the computer name if found. Otherwise returns ``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_name
'''
name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)
return name if name else False
def set_computer_desc(desc=None):
'''
Set the Windows computer description
Args:
desc (str):
The computer description
Returns:
str: Description if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
'''
if six.PY2:
desc = _to_unicode(desc)
# Make sure the system exists
# Return an object containing current information array for the computer
system_info = win32net.NetServerGetInfo(None, 101)
# If desc is passed, decode it for unicode
if desc is None:
return False
system_info['comment'] = desc
# Apply new settings
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc.args
log.error('Failed to update system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
return {'Computer Description': get_computer_desc()}
set_computer_description = salt.utils.functools.alias_function(set_computer_desc, 'set_computer_description') # pylint: disable=invalid-name
def get_system_info():
'''
Get system information.
Returns:
dict: Dictionary containing information about the system to include
name, description, version, etc...
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_info
'''
def byte_calc(val):
val = float(val)
if val < 2**10:
return '{0:.3f}B'.format(val)
elif val < 2**20:
return '{0:.3f}KB'.format(val / 2**10)
elif val < 2**30:
return '{0:.3f}MB'.format(val / 2**20)
elif val < 2**40:
return '{0:.3f}GB'.format(val / 2**30)
else:
return '{0:.3f}TB'.format(val / 2**40)
# Lookup dicts for Win32_OperatingSystem
os_type = {1: 'Work Station',
2: 'Domain Controller',
3: 'Server'}
# lookup dicts for Win32_ComputerSystem
domain_role = {0: 'Standalone Workstation',
1: 'Member Workstation',
2: 'Standalone Server',
3: 'Member Server',
4: 'Backup Domain Controller',
5: 'Primary Domain Controller'}
warning_states = {1: 'Other',
2: 'Unknown',
3: 'Safe',
4: 'Warning',
5: 'Critical',
6: 'Non-recoverable'}
pc_system_types = {0: 'Unspecified',
1: 'Desktop',
2: 'Mobile',
3: 'Workstation',
4: 'Enterprise Server',
5: 'SOHO Server',
6: 'Appliance PC',
7: 'Performance Server',
8: 'Maximum'}
# Connect to WMI
with salt.utils.winapi.Com():
conn = wmi.WMI()
system = conn.Win32_OperatingSystem()[0]
ret = {'name': get_computer_name(),
'description': system.Description,
'install_date': system.InstallDate,
'last_boot': system.LastBootUpTime,
'os_manufacturer': system.Manufacturer,
'os_name': system.Caption,
'users': system.NumberOfUsers,
'organization': system.Organization,
'os_architecture': system.OSArchitecture,
'primary': system.Primary,
'os_type': os_type[system.ProductType],
'registered_user': system.RegisteredUser,
'system_directory': system.SystemDirectory,
'system_drive': system.SystemDrive,
'os_version': system.Version,
'windows_directory': system.WindowsDirectory}
system = conn.Win32_ComputerSystem()[0]
# Get pc_system_type depending on Windows version
if platform.release() in ['Vista', '7', '8']:
# Types for Vista, 7, and 8
pc_system_type = pc_system_types[system.PCSystemType]
else:
# New types were added with 8.1 and newer
pc_system_types.update({8: 'Slate', 9: 'Maximum'})
pc_system_type = pc_system_types[system.PCSystemType]
ret.update({
'bootup_state': system.BootupState,
'caption': system.Caption,
'chassis_bootup_state': warning_states[system.ChassisBootupState],
'chassis_sku_number': system.ChassisSKUNumber,
'dns_hostname': system.DNSHostname,
'domain': system.Domain,
'domain_role': domain_role[system.DomainRole],
'hardware_manufacturer': system.Manufacturer,
'hardware_model': system.Model,
'network_server_mode_enabled': system.NetworkServerModeEnabled,
'part_of_domain': system.PartOfDomain,
'pc_system_type': pc_system_type,
'power_state': system.PowerState,
'status': system.Status,
'system_type': system.SystemType,
'total_physical_memory': byte_calc(system.TotalPhysicalMemory),
'total_physical_memory_raw': system.TotalPhysicalMemory,
'thermal_state': warning_states[system.ThermalState],
'workgroup': system.Workgroup
})
# Get processor information
processors = conn.Win32_Processor()
ret['processors'] = 0
ret['processors_logical'] = 0
ret['processor_cores'] = 0
ret['processor_cores_enabled'] = 0
ret['processor_manufacturer'] = processors[0].Manufacturer
ret['processor_max_clock_speed'] = six.text_type(processors[0].MaxClockSpeed) + 'MHz'
for processor in processors:
ret['processors'] += 1
ret['processors_logical'] += processor.NumberOfLogicalProcessors
ret['processor_cores'] += processor.NumberOfCores
ret['processor_cores_enabled'] += processor.NumberOfEnabledCore
bios = conn.Win32_BIOS()[0]
ret.update({'hardware_serial': bios.SerialNumber,
'bios_manufacturer': bios.Manufacturer,
'bios_version': bios.Version,
'bios_details': bios.BIOSVersion,
'bios_caption': bios.Caption,
'bios_description': bios.Description})
ret['install_date'] = _convert_date_time_string(ret['install_date'])
ret['last_boot'] = _convert_date_time_string(ret['last_boot'])
return ret
def get_computer_desc():
'''
Get the Windows computer description
Returns:
str: Returns the computer description if found. Otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_desc
'''
desc = get_system_info()['description']
return False if desc is None else desc
get_computer_description = salt.utils.functools.alias_function(get_computer_desc, 'get_computer_description') # pylint: disable=invalid-name
def get_hostname():
'''
Get the hostname of the windows minion
.. versionadded:: 2016.3.0
Returns:
str: Returns the hostname of the windows minion
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_hostname
'''
cmd = 'hostname'
ret = __salt__['cmd.run'](cmd=cmd)
return ret
def set_hostname(hostname):
'''
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.Rename(Name=hostname)
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``user@domain.tld`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
if six.PY2:
domain = _to_unicode(domain)
username = _to_unicode(username)
password = _to_unicode(password)
account_ou = _to_unicode(account_ou)
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, six.string_types):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
err = _join_domain(domain=domain, username=username, password=password,
account_ou=account_ou, account_exists=account_exists)
if not err:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
raise CommandExecutionError(win32api.FormatMessage(err).rstrip())
def _join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False):
'''
Helper function to join the domain.
Args:
domain (str): The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``user@domain.tld`` or simply ``user``
password (str): Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
Returns:
int:
:param domain:
:param username:
:param password:
:param account_ou:
:param account_exists:
:return:
'''
NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name
NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name
NETSETUP_JOIN_WITH_NEW_NAME = 0x400 # pylint: disable=invalid-name
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
join_options |= NETSETUP_JOIN_WITH_NEW_NAME
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Return the results of the command as an error
# JoinDomainOrWorkgroup returns a strangely formatted value that looks like
# (0,) so return the first item
return comp.JoinDomainOrWorkgroup(
Name=domain, Password=password, UserName=username, AccountOU=account_ou,
FJoinOptions=join_options)[0]
def unjoin_domain(username=None,
password=None,
domain=None,
workgroup='WORKGROUP',
disable=False,
restart=False):
# pylint: disable=anomalous-backslash-in-string
'''
Unjoin a computer from an Active Directory Domain. Requires a restart.
Args:
username (str):
Username of an account which is authorized to manage computer
accounts on the domain. Needs to be a fully qualified name like
``user@domain.tld`` or ``domain.tld\\user``. If the domain is not
specified, the passed domain will be used. If the computer account
doesn't need to be disabled after the computer is unjoined, this can
be ``None``.
password (str):
The password of the specified user
domain (str):
The domain from which to unjoin the computer. Can be ``None``
workgroup (str):
The workgroup to join the computer to. Default is ``WORKGROUP``
.. versionadded:: 2015.8.2/2015.5.7
disable (bool):
``True`` to disable the computer account in Active Directory.
Default is ``False``
restart (bool):
``True`` will restart the computer after successful unjoin. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.unjoin_domain restart=True
salt 'minion-id' system.unjoin_domain username='unjoinuser' \\
password='unjoinpassword' disable=True \\
restart=True
'''
# pylint: enable=anomalous-backslash-in-string
if six.PY2:
username = _to_unicode(username)
password = _to_unicode(password)
domain = _to_unicode(domain)
status = get_domain_workgroup()
if 'Workgroup' in status:
if status['Workgroup'] == workgroup:
return 'Already joined to {0}'.format(workgroup)
if username and '\\' not in username and '@' not in username:
if domain:
username = '{0}@{1}'.format(username, domain)
else:
return 'Must specify domain if not supplied in username'
if username and password is None:
return 'Must specify a password if you pass a username'
NETSETUP_ACCT_DELETE = 0x4 # pylint: disable=invalid-name
unjoin_options = 0x0
if disable:
unjoin_options |= NETSETUP_ACCT_DELETE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
err = comp.UnjoinDomainOrWorkgroup(Password=password,
UserName=username,
FUnjoinOptions=unjoin_options)
# you have to do this because UnjoinDomainOrWorkgroup returns a
# strangely formatted value that looks like (0,)
if not err[0]:
err = comp.JoinDomainOrWorkgroup(Name=workgroup)
if not err[0]:
ret = {'Workgroup': workgroup,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to join the computer to %s', workgroup)
return False
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to unjoin computer from %s', status['Domain'])
return False
def get_domain_workgroup():
'''
Get the domain or workgroup the computer belongs to.
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
Returns:
str: The name of the domain or workgroup
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_domain_workgroup
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
for computer in conn.Win32_ComputerSystem():
if computer.PartOfDomain:
return {'Domain': computer.Domain}
else:
return {'Workgroup': computer.Workgroup}
def set_domain_workgroup(workgroup):
'''
Set the domain or workgroup the computer belongs to.
.. versionadded:: 2019.2.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_domain_workgroup LOCAL
'''
if six.PY2:
workgroup = _to_unicode(workgroup)
# Initialize COM
with salt.utils.winapi.Com():
# Grab the first Win32_ComputerSystem object from wmi
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Now we can join the new workgroup
res = comp.JoinDomainOrWorkgroup(Name=workgroup.upper())
return True if not res[0] else False
def _try_parse_datetime(time_str, fmts):
'''
A helper function that attempts to parse the input time_str as a date.
Args:
time_str (str): A string representing the time
fmts (list): A list of date format strings
Returns:
datetime: Returns a datetime object if parsed properly, otherwise None
'''
result = None
for fmt in fmts:
try:
result = datetime.strptime(time_str, fmt)
break
except ValueError:
pass
return result
def get_system_time():
'''
Get the system time.
Returns:
str: Returns the system time in HH:MM:SS AM/PM format.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_time
'''
now = win32api.GetLocalTime()
meridian = 'AM'
hours = int(now[4])
if hours == 12:
meridian = 'PM'
elif hours == 0:
hours = 12
elif hours > 12:
hours = hours - 12
meridian = 'PM'
return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)
def set_system_time(newtime):
'''
Set the system time.
Args:
newtime (str):
The time to set. Can be any of the following formats:
- HH:MM:SS AM/PM
- HH:MM AM/PM
- HH:MM:SS (24 hour)
- HH:MM (24 hour)
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_system_time 12:01
'''
# Get date/time object from newtime
fmts = ['%I:%M:%S %p', '%I:%M %p', '%H:%M:%S', '%H:%M']
dt_obj = _try_parse_datetime(newtime, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(hours=dt_obj.hour,
minutes=dt_obj.minute,
seconds=dt_obj.second)
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc.args
log.error('Failed to get local time')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
# Check for passed values. If not passed, use current values
if years is None:
years = date_time[0]
if months is None:
months = date_time[1]
if days is None:
days = date_time[3]
if hours is None:
hours = date_time[4]
if minutes is None:
minutes = date_time[5]
if seconds is None:
seconds = date_time[6]
try:
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', ctypes.c_int16),
('wMonth', ctypes.c_int16),
('wDayOfWeek', ctypes.c_int16),
('wDay', ctypes.c_int16),
('wHour', ctypes.c_int16),
('wMinute', ctypes.c_int16),
('wSecond', ctypes.c_int16),
('wMilliseconds', ctypes.c_int16)]
system_time = SYSTEMTIME()
system_time.wYear = int(years)
system_time.wMonth = int(months)
system_time.wDay = int(days)
system_time.wHour = int(hours)
system_time.wMinute = int(minutes)
system_time.wSecond = int(seconds)
system_time_ptr = ctypes.pointer(system_time)
succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr)
if succeeded is not 0:
return True
else:
log.error('Failed to set local time')
raise CommandExecutionError(
win32api.FormatMessage(succeeded).rstrip())
except OSError as err:
log.error('Failed to set local time')
raise CommandExecutionError(err)
def get_system_date():
'''
Get the Windows system date
Returns:
str: Returns the system date
CLI Example:
.. code-block:: bash
salt '*' system.get_system_date
'''
now = win32api.GetLocalTime()
return '{0:02d}/{1:02d}/{2:04d}'.format(now[1], now[3], now[0])
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day)
def start_time_service():
'''
Start the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.start_time_service
'''
return __salt__['service.start']('w32time')
def stop_time_service():
'''
Stop the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.stop_time_service
'''
return __salt__['service.stop']('w32time')
def get_pending_component_servicing():
'''
Determine whether there are pending Component Based Servicing tasks that
require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Component Based Servicing tasks,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_component_servicing
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing\RebootPending'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
def get_pending_domain_join():
'''
Determine whether there is a pending domain join action that requires a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there is a pending domain join action, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_domain_join
'''
base_key = r'SYSTEM\CurrentControlSet\Services\Netlogon'
avoid_key = r'{0}\AvoidSpnSet'.format(base_key)
join_key = r'{0}\JoinDomain'.format(base_key)
# If either the avoid_key or join_key is present,
# then there is a reboot pending.
if __utils__['reg.key_exists']('HKLM', avoid_key):
log.debug('Key exists: %s', avoid_key)
return True
else:
log.debug('Key does not exist: %s', avoid_key)
if __utils__['reg.key_exists']('HKLM', join_key):
log.debug('Key exists: %s', join_key)
return True
else:
log.debug('Key does not exist: %s', join_key)
return False
def get_pending_file_rename():
'''
Determine whether there are pending file rename operations that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending file rename operations, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_file_rename
'''
vnames = ('PendingFileRenameOperations', 'PendingFileRenameOperations2')
key = r'SYSTEM\CurrentControlSet\Control\Session Manager'
# If any of the value names exist and have value data set,
# then a reboot is pending.
for vname in vnames:
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
if reg_ret['vdata'] and (reg_ret['vdata'] != '(value not set)'):
return True
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_servermanager():
'''
Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager
'''
vname = 'CurrentRebootAttempts'
key = r'SOFTWARE\Microsoft\ServerManager'
# There are situations where it's possible to have '(value not set)' as
# the value data, and since an actual reboot won't be pending in that
# instance, just catch instances where we try unsuccessfully to cast as int.
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
try:
if int(reg_ret['vdata']) > 0:
return True
except ValueError:
pass
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_update():
'''
Determine whether there are pending updates that require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending updates, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_update
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\RebootRequired'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
MINION_VOLATILE_KEY = r'SYSTEM\CurrentControlSet\Services\salt-minion\Volatile-Data'
REBOOT_REQUIRED_NAME = 'Reboot required'
def set_reboot_required_witnessed():
r'''
This function is used to remember that an event indicating that a reboot is
required was witnessed. This function relies on the salt-minion's ability to
create the following volatile registry key in the *HKLM* hive:
*SYSTEM\\CurrentControlSet\\Services\\salt-minion\\Volatile-Data*
Because this registry key is volatile, it will not persist beyond the
current boot session. Also, in the scope of this key, the name *'Reboot
required'* will be assigned the value of *1*.
For the time being, this function is being used whenever an install
completes with exit code 3010 and can be extended where appropriate in the
future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_reboot_required_witnessed
'''
return __utils__['reg.set_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
volatile=True,
vname=REBOOT_REQUIRED_NAME,
vdata=1,
vtype='REG_DWORD')
def get_reboot_required_witnessed():
'''
Determine if at any time during the current boot session the salt minion
witnessed an event indicating that a reboot is required.
This function will return ``True`` if an install completed with exit
code 3010 during the current boot session and can be extended where
appropriate in the future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the ``Requires reboot`` registry flag is set to ``1``,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_reboot_required_witnessed
'''
value_dict = __utils__['reg.read_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
vname=REBOOT_REQUIRED_NAME)
return value_dict['vdata'] == 1
def get_pending_reboot():
'''
Determine whether there is a reboot pending.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the system is pending reboot, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_reboot
'''
# Order the checks for reboot pending in most to least likely.
checks = (get_pending_update,
get_pending_file_rename,
get_pending_servermanager,
get_pending_component_servicing,
get_reboot_required_witnessed,
get_pending_computer_name,
get_pending_domain_join)
for check in checks:
if check():
return True
return False
|
saltstack/salt
|
salt/modules/win_system.py
|
set_computer_desc
|
python
|
def set_computer_desc(desc=None):
'''
Set the Windows computer description
Args:
desc (str):
The computer description
Returns:
str: Description if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
'''
if six.PY2:
desc = _to_unicode(desc)
# Make sure the system exists
# Return an object containing current information array for the computer
system_info = win32net.NetServerGetInfo(None, 101)
# If desc is passed, decode it for unicode
if desc is None:
return False
system_info['comment'] = desc
# Apply new settings
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc.args
log.error('Failed to update system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
return {'Computer Description': get_computer_desc()}
|
Set the Windows computer description
Args:
desc (str):
The computer description
Returns:
str: Description if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L454-L496
|
[
"def _to_unicode(instr):\n '''\n Converts from current users character encoding to unicode.\n When instr has a value of None, the return value of the function\n will also be None.\n '''\n if instr is None or isinstance(instr, six.text_type):\n return instr\n else:\n return six.text_type(instr, 'utf8')\n",
"def get_computer_desc():\n '''\n Get the Windows computer description\n\n Returns:\n str: Returns the computer description if found. Otherwise returns\n ``False``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'minion-id' system.get_computer_desc\n '''\n desc = get_system_info()['description']\n return False if desc is None else desc\n"
] |
# -*- coding: utf-8 -*-
'''
Module for managing windows systems.
:depends:
- pywintypes
- win32api
- win32con
- win32net
- wmi
Support for reboot, shutdown, etc
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import ctypes
import logging
import time
import platform
from datetime import datetime
# Import salt libs
import salt.utils.functools
import salt.utils.locales
import salt.utils.platform
import salt.utils.winapi
from salt.exceptions import CommandExecutionError
# Import 3rd-party Libs
from salt.ext import six
try:
import wmi
import win32net
import win32api
import win32con
import pywintypes
from ctypes import windll
HAS_WIN32NET_MODS = True
except ImportError:
HAS_WIN32NET_MODS = False
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
Only works on Windows Systems with Win32 Modules
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_system: Requires Windows'
if not HAS_WIN32NET_MODS:
return False, 'Module win_system: Missing win32 modules'
return __virtualname__
def _convert_minutes_seconds(timeout, in_seconds=False):
'''
convert timeout to seconds
'''
return timeout if in_seconds else timeout*60
def _convert_date_time_string(dt_string):
'''
convert string to date time object
'''
dt_string = dt_string.split('.')[0]
dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S')
return dt_obj.strftime('%Y-%m-%d %H:%M:%S')
def _to_unicode(instr):
'''
Converts from current users character encoding to unicode.
When instr has a value of None, the return value of the function
will also be None.
'''
if instr is None or isinstance(instr, six.text_type):
return instr
else:
return six.text_type(instr, 'utf8')
def halt(timeout=5, in_seconds=False):
'''
Halt a running system.
Args:
timeout (int):
Number of seconds before halting the system. Default is 5 seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.halt 5 True
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel): # pylint: disable=unused-argument
'''
Change the system runlevel on sysV compatible systems. Not applicable to
Windows
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
# cmd = ['init', runlevel]
# ret = __salt__['cmd.run'](cmd, python_shell=False)
# return ret
# TODO: Create a mapping of runlevels to # pylint: disable=fixme
# corresponding Windows actions
return 'Not implemented on Windows at this time.'
def poweroff(timeout=5, in_seconds=False):
'''
Power off a running system.
Args:
timeout (int):
Number of seconds before powering off the system. Default is 5
seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.poweroff 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disable=redefined-outer-name
only_on_pending_reboot=False):
'''
Reboot a running system.
Args:
timeout (int):
The number of minutes/seconds before rebooting the system. Use of
minutes or seconds depends on the value of ``in_seconds``. Default
is 5 minutes.
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
wait_for_reboot (bool)
``True`` will sleep for timeout + 30 seconds after reboot has been
initiated. This is useful for use in a highstate. For example, you
may have states that you want to apply only after the reboot.
Default is ``False``.
.. versionadded:: 2015.8.0
only_on_pending_reboot (bool):
If this is set to ``True``, then the reboot will only proceed
if the system reports a pending reboot. Setting this parameter to
``True`` could be useful when calling this function from a final
housekeeping state intended to be executed at the end of a state run
(using *order: last*). Default is ``False``.
Returns:
bool: ``True`` if successful (a reboot will occur), otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.reboot 5
salt '*' system.reboot 5 True
Invoking this function from a final housekeeping state:
.. code-block:: yaml
final_housekeeping:
module.run:
- name: system.reboot
- only_on_pending_reboot: True
- order: last
'''
ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds,
only_on_pending_reboot=only_on_pending_reboot)
if wait_for_reboot:
seconds = _convert_minutes_seconds(timeout, in_seconds)
time.sleep(seconds + 30)
return ret
def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint: disable=redefined-outer-name
in_seconds=False, only_on_pending_reboot=False):
'''
Shutdown a running system.
Args:
message (str):
The message to display to the user before shutting down.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the
user who called the function, the message specified by the lpMessage
parameter, and prompts the user to log off. The dialog box beeps
when it is created and remains on top of other windows (system
modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down immediately without
displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
force_close (bool):
``True`` will force close all open applications. ``False`` will
display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool):
``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown
state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns:
bool:
``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown "System will shutdown in 5 minutes"
'''
if six.PY2:
message = _to_unicode(message)
timeout = _convert_minutes_seconds(timeout, in_seconds)
if only_on_pending_reboot and not get_pending_reboot():
return False
if message and not isinstance(message, six.string_types):
message = message.decode('utf-8')
try:
win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to shutdown the system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def shutdown_hard():
'''
Shutdown a running system with no timeout or warning.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown_hard
'''
return shutdown(timeout=0)
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def lock():
'''
Lock the workstation.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.lock
'''
return windll.user32.LockWorkStation()
def set_computer_name(name):
'''
Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
'''
if six.PY2:
name = _to_unicode(name)
if windll.kernel32.SetComputerNameExW(
win32con.ComputerNamePhysicalDnsHostname, name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
pending = __utils__['reg.read_value'](
'HKLM',
r'SYSTEM\CurrentControlSet\Services\Tcpip\Parameters',
'NV Hostname')['vdata']
if pending:
return pending if pending != current else None
return False
def get_computer_name():
'''
Get the Windows computer name
Returns:
str: Returns the computer name if found. Otherwise returns ``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_name
'''
name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)
return name if name else False
set_computer_description = salt.utils.functools.alias_function(set_computer_desc, 'set_computer_description') # pylint: disable=invalid-name
def get_system_info():
'''
Get system information.
Returns:
dict: Dictionary containing information about the system to include
name, description, version, etc...
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_info
'''
def byte_calc(val):
val = float(val)
if val < 2**10:
return '{0:.3f}B'.format(val)
elif val < 2**20:
return '{0:.3f}KB'.format(val / 2**10)
elif val < 2**30:
return '{0:.3f}MB'.format(val / 2**20)
elif val < 2**40:
return '{0:.3f}GB'.format(val / 2**30)
else:
return '{0:.3f}TB'.format(val / 2**40)
# Lookup dicts for Win32_OperatingSystem
os_type = {1: 'Work Station',
2: 'Domain Controller',
3: 'Server'}
# lookup dicts for Win32_ComputerSystem
domain_role = {0: 'Standalone Workstation',
1: 'Member Workstation',
2: 'Standalone Server',
3: 'Member Server',
4: 'Backup Domain Controller',
5: 'Primary Domain Controller'}
warning_states = {1: 'Other',
2: 'Unknown',
3: 'Safe',
4: 'Warning',
5: 'Critical',
6: 'Non-recoverable'}
pc_system_types = {0: 'Unspecified',
1: 'Desktop',
2: 'Mobile',
3: 'Workstation',
4: 'Enterprise Server',
5: 'SOHO Server',
6: 'Appliance PC',
7: 'Performance Server',
8: 'Maximum'}
# Connect to WMI
with salt.utils.winapi.Com():
conn = wmi.WMI()
system = conn.Win32_OperatingSystem()[0]
ret = {'name': get_computer_name(),
'description': system.Description,
'install_date': system.InstallDate,
'last_boot': system.LastBootUpTime,
'os_manufacturer': system.Manufacturer,
'os_name': system.Caption,
'users': system.NumberOfUsers,
'organization': system.Organization,
'os_architecture': system.OSArchitecture,
'primary': system.Primary,
'os_type': os_type[system.ProductType],
'registered_user': system.RegisteredUser,
'system_directory': system.SystemDirectory,
'system_drive': system.SystemDrive,
'os_version': system.Version,
'windows_directory': system.WindowsDirectory}
system = conn.Win32_ComputerSystem()[0]
# Get pc_system_type depending on Windows version
if platform.release() in ['Vista', '7', '8']:
# Types for Vista, 7, and 8
pc_system_type = pc_system_types[system.PCSystemType]
else:
# New types were added with 8.1 and newer
pc_system_types.update({8: 'Slate', 9: 'Maximum'})
pc_system_type = pc_system_types[system.PCSystemType]
ret.update({
'bootup_state': system.BootupState,
'caption': system.Caption,
'chassis_bootup_state': warning_states[system.ChassisBootupState],
'chassis_sku_number': system.ChassisSKUNumber,
'dns_hostname': system.DNSHostname,
'domain': system.Domain,
'domain_role': domain_role[system.DomainRole],
'hardware_manufacturer': system.Manufacturer,
'hardware_model': system.Model,
'network_server_mode_enabled': system.NetworkServerModeEnabled,
'part_of_domain': system.PartOfDomain,
'pc_system_type': pc_system_type,
'power_state': system.PowerState,
'status': system.Status,
'system_type': system.SystemType,
'total_physical_memory': byte_calc(system.TotalPhysicalMemory),
'total_physical_memory_raw': system.TotalPhysicalMemory,
'thermal_state': warning_states[system.ThermalState],
'workgroup': system.Workgroup
})
# Get processor information
processors = conn.Win32_Processor()
ret['processors'] = 0
ret['processors_logical'] = 0
ret['processor_cores'] = 0
ret['processor_cores_enabled'] = 0
ret['processor_manufacturer'] = processors[0].Manufacturer
ret['processor_max_clock_speed'] = six.text_type(processors[0].MaxClockSpeed) + 'MHz'
for processor in processors:
ret['processors'] += 1
ret['processors_logical'] += processor.NumberOfLogicalProcessors
ret['processor_cores'] += processor.NumberOfCores
ret['processor_cores_enabled'] += processor.NumberOfEnabledCore
bios = conn.Win32_BIOS()[0]
ret.update({'hardware_serial': bios.SerialNumber,
'bios_manufacturer': bios.Manufacturer,
'bios_version': bios.Version,
'bios_details': bios.BIOSVersion,
'bios_caption': bios.Caption,
'bios_description': bios.Description})
ret['install_date'] = _convert_date_time_string(ret['install_date'])
ret['last_boot'] = _convert_date_time_string(ret['last_boot'])
return ret
def get_computer_desc():
'''
Get the Windows computer description
Returns:
str: Returns the computer description if found. Otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_desc
'''
desc = get_system_info()['description']
return False if desc is None else desc
get_computer_description = salt.utils.functools.alias_function(get_computer_desc, 'get_computer_description') # pylint: disable=invalid-name
def get_hostname():
'''
Get the hostname of the windows minion
.. versionadded:: 2016.3.0
Returns:
str: Returns the hostname of the windows minion
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_hostname
'''
cmd = 'hostname'
ret = __salt__['cmd.run'](cmd=cmd)
return ret
def set_hostname(hostname):
'''
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.Rename(Name=hostname)
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``user@domain.tld`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
if six.PY2:
domain = _to_unicode(domain)
username = _to_unicode(username)
password = _to_unicode(password)
account_ou = _to_unicode(account_ou)
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, six.string_types):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
err = _join_domain(domain=domain, username=username, password=password,
account_ou=account_ou, account_exists=account_exists)
if not err:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
raise CommandExecutionError(win32api.FormatMessage(err).rstrip())
def _join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False):
'''
Helper function to join the domain.
Args:
domain (str): The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``user@domain.tld`` or simply ``user``
password (str): Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
Returns:
int:
:param domain:
:param username:
:param password:
:param account_ou:
:param account_exists:
:return:
'''
NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name
NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name
NETSETUP_JOIN_WITH_NEW_NAME = 0x400 # pylint: disable=invalid-name
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
join_options |= NETSETUP_JOIN_WITH_NEW_NAME
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Return the results of the command as an error
# JoinDomainOrWorkgroup returns a strangely formatted value that looks like
# (0,) so return the first item
return comp.JoinDomainOrWorkgroup(
Name=domain, Password=password, UserName=username, AccountOU=account_ou,
FJoinOptions=join_options)[0]
def unjoin_domain(username=None,
password=None,
domain=None,
workgroup='WORKGROUP',
disable=False,
restart=False):
# pylint: disable=anomalous-backslash-in-string
'''
Unjoin a computer from an Active Directory Domain. Requires a restart.
Args:
username (str):
Username of an account which is authorized to manage computer
accounts on the domain. Needs to be a fully qualified name like
``user@domain.tld`` or ``domain.tld\\user``. If the domain is not
specified, the passed domain will be used. If the computer account
doesn't need to be disabled after the computer is unjoined, this can
be ``None``.
password (str):
The password of the specified user
domain (str):
The domain from which to unjoin the computer. Can be ``None``
workgroup (str):
The workgroup to join the computer to. Default is ``WORKGROUP``
.. versionadded:: 2015.8.2/2015.5.7
disable (bool):
``True`` to disable the computer account in Active Directory.
Default is ``False``
restart (bool):
``True`` will restart the computer after successful unjoin. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.unjoin_domain restart=True
salt 'minion-id' system.unjoin_domain username='unjoinuser' \\
password='unjoinpassword' disable=True \\
restart=True
'''
# pylint: enable=anomalous-backslash-in-string
if six.PY2:
username = _to_unicode(username)
password = _to_unicode(password)
domain = _to_unicode(domain)
status = get_domain_workgroup()
if 'Workgroup' in status:
if status['Workgroup'] == workgroup:
return 'Already joined to {0}'.format(workgroup)
if username and '\\' not in username and '@' not in username:
if domain:
username = '{0}@{1}'.format(username, domain)
else:
return 'Must specify domain if not supplied in username'
if username and password is None:
return 'Must specify a password if you pass a username'
NETSETUP_ACCT_DELETE = 0x4 # pylint: disable=invalid-name
unjoin_options = 0x0
if disable:
unjoin_options |= NETSETUP_ACCT_DELETE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
err = comp.UnjoinDomainOrWorkgroup(Password=password,
UserName=username,
FUnjoinOptions=unjoin_options)
# you have to do this because UnjoinDomainOrWorkgroup returns a
# strangely formatted value that looks like (0,)
if not err[0]:
err = comp.JoinDomainOrWorkgroup(Name=workgroup)
if not err[0]:
ret = {'Workgroup': workgroup,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to join the computer to %s', workgroup)
return False
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to unjoin computer from %s', status['Domain'])
return False
def get_domain_workgroup():
'''
Get the domain or workgroup the computer belongs to.
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
Returns:
str: The name of the domain or workgroup
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_domain_workgroup
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
for computer in conn.Win32_ComputerSystem():
if computer.PartOfDomain:
return {'Domain': computer.Domain}
else:
return {'Workgroup': computer.Workgroup}
def set_domain_workgroup(workgroup):
'''
Set the domain or workgroup the computer belongs to.
.. versionadded:: 2019.2.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_domain_workgroup LOCAL
'''
if six.PY2:
workgroup = _to_unicode(workgroup)
# Initialize COM
with salt.utils.winapi.Com():
# Grab the first Win32_ComputerSystem object from wmi
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Now we can join the new workgroup
res = comp.JoinDomainOrWorkgroup(Name=workgroup.upper())
return True if not res[0] else False
def _try_parse_datetime(time_str, fmts):
'''
A helper function that attempts to parse the input time_str as a date.
Args:
time_str (str): A string representing the time
fmts (list): A list of date format strings
Returns:
datetime: Returns a datetime object if parsed properly, otherwise None
'''
result = None
for fmt in fmts:
try:
result = datetime.strptime(time_str, fmt)
break
except ValueError:
pass
return result
def get_system_time():
'''
Get the system time.
Returns:
str: Returns the system time in HH:MM:SS AM/PM format.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_time
'''
now = win32api.GetLocalTime()
meridian = 'AM'
hours = int(now[4])
if hours == 12:
meridian = 'PM'
elif hours == 0:
hours = 12
elif hours > 12:
hours = hours - 12
meridian = 'PM'
return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)
def set_system_time(newtime):
'''
Set the system time.
Args:
newtime (str):
The time to set. Can be any of the following formats:
- HH:MM:SS AM/PM
- HH:MM AM/PM
- HH:MM:SS (24 hour)
- HH:MM (24 hour)
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_system_time 12:01
'''
# Get date/time object from newtime
fmts = ['%I:%M:%S %p', '%I:%M %p', '%H:%M:%S', '%H:%M']
dt_obj = _try_parse_datetime(newtime, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(hours=dt_obj.hour,
minutes=dt_obj.minute,
seconds=dt_obj.second)
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc.args
log.error('Failed to get local time')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
# Check for passed values. If not passed, use current values
if years is None:
years = date_time[0]
if months is None:
months = date_time[1]
if days is None:
days = date_time[3]
if hours is None:
hours = date_time[4]
if minutes is None:
minutes = date_time[5]
if seconds is None:
seconds = date_time[6]
try:
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', ctypes.c_int16),
('wMonth', ctypes.c_int16),
('wDayOfWeek', ctypes.c_int16),
('wDay', ctypes.c_int16),
('wHour', ctypes.c_int16),
('wMinute', ctypes.c_int16),
('wSecond', ctypes.c_int16),
('wMilliseconds', ctypes.c_int16)]
system_time = SYSTEMTIME()
system_time.wYear = int(years)
system_time.wMonth = int(months)
system_time.wDay = int(days)
system_time.wHour = int(hours)
system_time.wMinute = int(minutes)
system_time.wSecond = int(seconds)
system_time_ptr = ctypes.pointer(system_time)
succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr)
if succeeded is not 0:
return True
else:
log.error('Failed to set local time')
raise CommandExecutionError(
win32api.FormatMessage(succeeded).rstrip())
except OSError as err:
log.error('Failed to set local time')
raise CommandExecutionError(err)
def get_system_date():
'''
Get the Windows system date
Returns:
str: Returns the system date
CLI Example:
.. code-block:: bash
salt '*' system.get_system_date
'''
now = win32api.GetLocalTime()
return '{0:02d}/{1:02d}/{2:04d}'.format(now[1], now[3], now[0])
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day)
def start_time_service():
'''
Start the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.start_time_service
'''
return __salt__['service.start']('w32time')
def stop_time_service():
'''
Stop the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.stop_time_service
'''
return __salt__['service.stop']('w32time')
def get_pending_component_servicing():
'''
Determine whether there are pending Component Based Servicing tasks that
require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Component Based Servicing tasks,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_component_servicing
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing\RebootPending'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
def get_pending_domain_join():
'''
Determine whether there is a pending domain join action that requires a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there is a pending domain join action, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_domain_join
'''
base_key = r'SYSTEM\CurrentControlSet\Services\Netlogon'
avoid_key = r'{0}\AvoidSpnSet'.format(base_key)
join_key = r'{0}\JoinDomain'.format(base_key)
# If either the avoid_key or join_key is present,
# then there is a reboot pending.
if __utils__['reg.key_exists']('HKLM', avoid_key):
log.debug('Key exists: %s', avoid_key)
return True
else:
log.debug('Key does not exist: %s', avoid_key)
if __utils__['reg.key_exists']('HKLM', join_key):
log.debug('Key exists: %s', join_key)
return True
else:
log.debug('Key does not exist: %s', join_key)
return False
def get_pending_file_rename():
'''
Determine whether there are pending file rename operations that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending file rename operations, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_file_rename
'''
vnames = ('PendingFileRenameOperations', 'PendingFileRenameOperations2')
key = r'SYSTEM\CurrentControlSet\Control\Session Manager'
# If any of the value names exist and have value data set,
# then a reboot is pending.
for vname in vnames:
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
if reg_ret['vdata'] and (reg_ret['vdata'] != '(value not set)'):
return True
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_servermanager():
'''
Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager
'''
vname = 'CurrentRebootAttempts'
key = r'SOFTWARE\Microsoft\ServerManager'
# There are situations where it's possible to have '(value not set)' as
# the value data, and since an actual reboot won't be pending in that
# instance, just catch instances where we try unsuccessfully to cast as int.
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
try:
if int(reg_ret['vdata']) > 0:
return True
except ValueError:
pass
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_update():
'''
Determine whether there are pending updates that require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending updates, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_update
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\RebootRequired'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
MINION_VOLATILE_KEY = r'SYSTEM\CurrentControlSet\Services\salt-minion\Volatile-Data'
REBOOT_REQUIRED_NAME = 'Reboot required'
def set_reboot_required_witnessed():
r'''
This function is used to remember that an event indicating that a reboot is
required was witnessed. This function relies on the salt-minion's ability to
create the following volatile registry key in the *HKLM* hive:
*SYSTEM\\CurrentControlSet\\Services\\salt-minion\\Volatile-Data*
Because this registry key is volatile, it will not persist beyond the
current boot session. Also, in the scope of this key, the name *'Reboot
required'* will be assigned the value of *1*.
For the time being, this function is being used whenever an install
completes with exit code 3010 and can be extended where appropriate in the
future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_reboot_required_witnessed
'''
return __utils__['reg.set_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
volatile=True,
vname=REBOOT_REQUIRED_NAME,
vdata=1,
vtype='REG_DWORD')
def get_reboot_required_witnessed():
'''
Determine if at any time during the current boot session the salt minion
witnessed an event indicating that a reboot is required.
This function will return ``True`` if an install completed with exit
code 3010 during the current boot session and can be extended where
appropriate in the future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the ``Requires reboot`` registry flag is set to ``1``,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_reboot_required_witnessed
'''
value_dict = __utils__['reg.read_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
vname=REBOOT_REQUIRED_NAME)
return value_dict['vdata'] == 1
def get_pending_reboot():
'''
Determine whether there is a reboot pending.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the system is pending reboot, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_reboot
'''
# Order the checks for reboot pending in most to least likely.
checks = (get_pending_update,
get_pending_file_rename,
get_pending_servermanager,
get_pending_component_servicing,
get_reboot_required_witnessed,
get_pending_computer_name,
get_pending_domain_join)
for check in checks:
if check():
return True
return False
|
saltstack/salt
|
salt/modules/win_system.py
|
get_system_info
|
python
|
def get_system_info():
'''
Get system information.
Returns:
dict: Dictionary containing information about the system to include
name, description, version, etc...
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_info
'''
def byte_calc(val):
val = float(val)
if val < 2**10:
return '{0:.3f}B'.format(val)
elif val < 2**20:
return '{0:.3f}KB'.format(val / 2**10)
elif val < 2**30:
return '{0:.3f}MB'.format(val / 2**20)
elif val < 2**40:
return '{0:.3f}GB'.format(val / 2**30)
else:
return '{0:.3f}TB'.format(val / 2**40)
# Lookup dicts for Win32_OperatingSystem
os_type = {1: 'Work Station',
2: 'Domain Controller',
3: 'Server'}
# lookup dicts for Win32_ComputerSystem
domain_role = {0: 'Standalone Workstation',
1: 'Member Workstation',
2: 'Standalone Server',
3: 'Member Server',
4: 'Backup Domain Controller',
5: 'Primary Domain Controller'}
warning_states = {1: 'Other',
2: 'Unknown',
3: 'Safe',
4: 'Warning',
5: 'Critical',
6: 'Non-recoverable'}
pc_system_types = {0: 'Unspecified',
1: 'Desktop',
2: 'Mobile',
3: 'Workstation',
4: 'Enterprise Server',
5: 'SOHO Server',
6: 'Appliance PC',
7: 'Performance Server',
8: 'Maximum'}
# Connect to WMI
with salt.utils.winapi.Com():
conn = wmi.WMI()
system = conn.Win32_OperatingSystem()[0]
ret = {'name': get_computer_name(),
'description': system.Description,
'install_date': system.InstallDate,
'last_boot': system.LastBootUpTime,
'os_manufacturer': system.Manufacturer,
'os_name': system.Caption,
'users': system.NumberOfUsers,
'organization': system.Organization,
'os_architecture': system.OSArchitecture,
'primary': system.Primary,
'os_type': os_type[system.ProductType],
'registered_user': system.RegisteredUser,
'system_directory': system.SystemDirectory,
'system_drive': system.SystemDrive,
'os_version': system.Version,
'windows_directory': system.WindowsDirectory}
system = conn.Win32_ComputerSystem()[0]
# Get pc_system_type depending on Windows version
if platform.release() in ['Vista', '7', '8']:
# Types for Vista, 7, and 8
pc_system_type = pc_system_types[system.PCSystemType]
else:
# New types were added with 8.1 and newer
pc_system_types.update({8: 'Slate', 9: 'Maximum'})
pc_system_type = pc_system_types[system.PCSystemType]
ret.update({
'bootup_state': system.BootupState,
'caption': system.Caption,
'chassis_bootup_state': warning_states[system.ChassisBootupState],
'chassis_sku_number': system.ChassisSKUNumber,
'dns_hostname': system.DNSHostname,
'domain': system.Domain,
'domain_role': domain_role[system.DomainRole],
'hardware_manufacturer': system.Manufacturer,
'hardware_model': system.Model,
'network_server_mode_enabled': system.NetworkServerModeEnabled,
'part_of_domain': system.PartOfDomain,
'pc_system_type': pc_system_type,
'power_state': system.PowerState,
'status': system.Status,
'system_type': system.SystemType,
'total_physical_memory': byte_calc(system.TotalPhysicalMemory),
'total_physical_memory_raw': system.TotalPhysicalMemory,
'thermal_state': warning_states[system.ThermalState],
'workgroup': system.Workgroup
})
# Get processor information
processors = conn.Win32_Processor()
ret['processors'] = 0
ret['processors_logical'] = 0
ret['processor_cores'] = 0
ret['processor_cores_enabled'] = 0
ret['processor_manufacturer'] = processors[0].Manufacturer
ret['processor_max_clock_speed'] = six.text_type(processors[0].MaxClockSpeed) + 'MHz'
for processor in processors:
ret['processors'] += 1
ret['processors_logical'] += processor.NumberOfLogicalProcessors
ret['processor_cores'] += processor.NumberOfCores
ret['processor_cores_enabled'] += processor.NumberOfEnabledCore
bios = conn.Win32_BIOS()[0]
ret.update({'hardware_serial': bios.SerialNumber,
'bios_manufacturer': bios.Manufacturer,
'bios_version': bios.Version,
'bios_details': bios.BIOSVersion,
'bios_caption': bios.Caption,
'bios_description': bios.Description})
ret['install_date'] = _convert_date_time_string(ret['install_date'])
ret['last_boot'] = _convert_date_time_string(ret['last_boot'])
return ret
|
Get system information.
Returns:
dict: Dictionary containing information about the system to include
name, description, version, etc...
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_info
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L502-L632
|
[
"def get_computer_name():\n '''\n Get the Windows computer name\n\n Returns:\n str: Returns the computer name if found. Otherwise returns ``False``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'minion-id' system.get_computer_name\n '''\n name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)\n return name if name else False\n",
"def _convert_date_time_string(dt_string):\n '''\n convert string to date time object\n '''\n dt_string = dt_string.split('.')[0]\n dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S')\n return dt_obj.strftime('%Y-%m-%d %H:%M:%S')\n",
"def byte_calc(val):\n val = float(val)\n if val < 2**10:\n return '{0:.3f}B'.format(val)\n elif val < 2**20:\n return '{0:.3f}KB'.format(val / 2**10)\n elif val < 2**30:\n return '{0:.3f}MB'.format(val / 2**20)\n elif val < 2**40:\n return '{0:.3f}GB'.format(val / 2**30)\n else:\n return '{0:.3f}TB'.format(val / 2**40)\n"
] |
# -*- coding: utf-8 -*-
'''
Module for managing windows systems.
:depends:
- pywintypes
- win32api
- win32con
- win32net
- wmi
Support for reboot, shutdown, etc
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import ctypes
import logging
import time
import platform
from datetime import datetime
# Import salt libs
import salt.utils.functools
import salt.utils.locales
import salt.utils.platform
import salt.utils.winapi
from salt.exceptions import CommandExecutionError
# Import 3rd-party Libs
from salt.ext import six
try:
import wmi
import win32net
import win32api
import win32con
import pywintypes
from ctypes import windll
HAS_WIN32NET_MODS = True
except ImportError:
HAS_WIN32NET_MODS = False
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
Only works on Windows Systems with Win32 Modules
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_system: Requires Windows'
if not HAS_WIN32NET_MODS:
return False, 'Module win_system: Missing win32 modules'
return __virtualname__
def _convert_minutes_seconds(timeout, in_seconds=False):
'''
convert timeout to seconds
'''
return timeout if in_seconds else timeout*60
def _convert_date_time_string(dt_string):
'''
convert string to date time object
'''
dt_string = dt_string.split('.')[0]
dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S')
return dt_obj.strftime('%Y-%m-%d %H:%M:%S')
def _to_unicode(instr):
'''
Converts from current users character encoding to unicode.
When instr has a value of None, the return value of the function
will also be None.
'''
if instr is None or isinstance(instr, six.text_type):
return instr
else:
return six.text_type(instr, 'utf8')
def halt(timeout=5, in_seconds=False):
'''
Halt a running system.
Args:
timeout (int):
Number of seconds before halting the system. Default is 5 seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.halt 5 True
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel): # pylint: disable=unused-argument
'''
Change the system runlevel on sysV compatible systems. Not applicable to
Windows
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
# cmd = ['init', runlevel]
# ret = __salt__['cmd.run'](cmd, python_shell=False)
# return ret
# TODO: Create a mapping of runlevels to # pylint: disable=fixme
# corresponding Windows actions
return 'Not implemented on Windows at this time.'
def poweroff(timeout=5, in_seconds=False):
'''
Power off a running system.
Args:
timeout (int):
Number of seconds before powering off the system. Default is 5
seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.poweroff 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disable=redefined-outer-name
only_on_pending_reboot=False):
'''
Reboot a running system.
Args:
timeout (int):
The number of minutes/seconds before rebooting the system. Use of
minutes or seconds depends on the value of ``in_seconds``. Default
is 5 minutes.
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
wait_for_reboot (bool)
``True`` will sleep for timeout + 30 seconds after reboot has been
initiated. This is useful for use in a highstate. For example, you
may have states that you want to apply only after the reboot.
Default is ``False``.
.. versionadded:: 2015.8.0
only_on_pending_reboot (bool):
If this is set to ``True``, then the reboot will only proceed
if the system reports a pending reboot. Setting this parameter to
``True`` could be useful when calling this function from a final
housekeeping state intended to be executed at the end of a state run
(using *order: last*). Default is ``False``.
Returns:
bool: ``True`` if successful (a reboot will occur), otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.reboot 5
salt '*' system.reboot 5 True
Invoking this function from a final housekeeping state:
.. code-block:: yaml
final_housekeeping:
module.run:
- name: system.reboot
- only_on_pending_reboot: True
- order: last
'''
ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds,
only_on_pending_reboot=only_on_pending_reboot)
if wait_for_reboot:
seconds = _convert_minutes_seconds(timeout, in_seconds)
time.sleep(seconds + 30)
return ret
def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint: disable=redefined-outer-name
in_seconds=False, only_on_pending_reboot=False):
'''
Shutdown a running system.
Args:
message (str):
The message to display to the user before shutting down.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the
user who called the function, the message specified by the lpMessage
parameter, and prompts the user to log off. The dialog box beeps
when it is created and remains on top of other windows (system
modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down immediately without
displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
force_close (bool):
``True`` will force close all open applications. ``False`` will
display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool):
``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown
state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns:
bool:
``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown "System will shutdown in 5 minutes"
'''
if six.PY2:
message = _to_unicode(message)
timeout = _convert_minutes_seconds(timeout, in_seconds)
if only_on_pending_reboot and not get_pending_reboot():
return False
if message and not isinstance(message, six.string_types):
message = message.decode('utf-8')
try:
win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to shutdown the system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def shutdown_hard():
'''
Shutdown a running system with no timeout or warning.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown_hard
'''
return shutdown(timeout=0)
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def lock():
'''
Lock the workstation.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.lock
'''
return windll.user32.LockWorkStation()
def set_computer_name(name):
'''
Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
'''
if six.PY2:
name = _to_unicode(name)
if windll.kernel32.SetComputerNameExW(
win32con.ComputerNamePhysicalDnsHostname, name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
pending = __utils__['reg.read_value'](
'HKLM',
r'SYSTEM\CurrentControlSet\Services\Tcpip\Parameters',
'NV Hostname')['vdata']
if pending:
return pending if pending != current else None
return False
def get_computer_name():
'''
Get the Windows computer name
Returns:
str: Returns the computer name if found. Otherwise returns ``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_name
'''
name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)
return name if name else False
def set_computer_desc(desc=None):
'''
Set the Windows computer description
Args:
desc (str):
The computer description
Returns:
str: Description if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
'''
if six.PY2:
desc = _to_unicode(desc)
# Make sure the system exists
# Return an object containing current information array for the computer
system_info = win32net.NetServerGetInfo(None, 101)
# If desc is passed, decode it for unicode
if desc is None:
return False
system_info['comment'] = desc
# Apply new settings
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc.args
log.error('Failed to update system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
return {'Computer Description': get_computer_desc()}
set_computer_description = salt.utils.functools.alias_function(set_computer_desc, 'set_computer_description') # pylint: disable=invalid-name
def get_computer_desc():
'''
Get the Windows computer description
Returns:
str: Returns the computer description if found. Otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_desc
'''
desc = get_system_info()['description']
return False if desc is None else desc
get_computer_description = salt.utils.functools.alias_function(get_computer_desc, 'get_computer_description') # pylint: disable=invalid-name
def get_hostname():
'''
Get the hostname of the windows minion
.. versionadded:: 2016.3.0
Returns:
str: Returns the hostname of the windows minion
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_hostname
'''
cmd = 'hostname'
ret = __salt__['cmd.run'](cmd=cmd)
return ret
def set_hostname(hostname):
'''
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.Rename(Name=hostname)
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``user@domain.tld`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
if six.PY2:
domain = _to_unicode(domain)
username = _to_unicode(username)
password = _to_unicode(password)
account_ou = _to_unicode(account_ou)
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, six.string_types):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
err = _join_domain(domain=domain, username=username, password=password,
account_ou=account_ou, account_exists=account_exists)
if not err:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
raise CommandExecutionError(win32api.FormatMessage(err).rstrip())
def _join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False):
'''
Helper function to join the domain.
Args:
domain (str): The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``user@domain.tld`` or simply ``user``
password (str): Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
Returns:
int:
:param domain:
:param username:
:param password:
:param account_ou:
:param account_exists:
:return:
'''
NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name
NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name
NETSETUP_JOIN_WITH_NEW_NAME = 0x400 # pylint: disable=invalid-name
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
join_options |= NETSETUP_JOIN_WITH_NEW_NAME
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Return the results of the command as an error
# JoinDomainOrWorkgroup returns a strangely formatted value that looks like
# (0,) so return the first item
return comp.JoinDomainOrWorkgroup(
Name=domain, Password=password, UserName=username, AccountOU=account_ou,
FJoinOptions=join_options)[0]
def unjoin_domain(username=None,
password=None,
domain=None,
workgroup='WORKGROUP',
disable=False,
restart=False):
# pylint: disable=anomalous-backslash-in-string
'''
Unjoin a computer from an Active Directory Domain. Requires a restart.
Args:
username (str):
Username of an account which is authorized to manage computer
accounts on the domain. Needs to be a fully qualified name like
``user@domain.tld`` or ``domain.tld\\user``. If the domain is not
specified, the passed domain will be used. If the computer account
doesn't need to be disabled after the computer is unjoined, this can
be ``None``.
password (str):
The password of the specified user
domain (str):
The domain from which to unjoin the computer. Can be ``None``
workgroup (str):
The workgroup to join the computer to. Default is ``WORKGROUP``
.. versionadded:: 2015.8.2/2015.5.7
disable (bool):
``True`` to disable the computer account in Active Directory.
Default is ``False``
restart (bool):
``True`` will restart the computer after successful unjoin. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.unjoin_domain restart=True
salt 'minion-id' system.unjoin_domain username='unjoinuser' \\
password='unjoinpassword' disable=True \\
restart=True
'''
# pylint: enable=anomalous-backslash-in-string
if six.PY2:
username = _to_unicode(username)
password = _to_unicode(password)
domain = _to_unicode(domain)
status = get_domain_workgroup()
if 'Workgroup' in status:
if status['Workgroup'] == workgroup:
return 'Already joined to {0}'.format(workgroup)
if username and '\\' not in username and '@' not in username:
if domain:
username = '{0}@{1}'.format(username, domain)
else:
return 'Must specify domain if not supplied in username'
if username and password is None:
return 'Must specify a password if you pass a username'
NETSETUP_ACCT_DELETE = 0x4 # pylint: disable=invalid-name
unjoin_options = 0x0
if disable:
unjoin_options |= NETSETUP_ACCT_DELETE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
err = comp.UnjoinDomainOrWorkgroup(Password=password,
UserName=username,
FUnjoinOptions=unjoin_options)
# you have to do this because UnjoinDomainOrWorkgroup returns a
# strangely formatted value that looks like (0,)
if not err[0]:
err = comp.JoinDomainOrWorkgroup(Name=workgroup)
if not err[0]:
ret = {'Workgroup': workgroup,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to join the computer to %s', workgroup)
return False
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to unjoin computer from %s', status['Domain'])
return False
def get_domain_workgroup():
'''
Get the domain or workgroup the computer belongs to.
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
Returns:
str: The name of the domain or workgroup
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_domain_workgroup
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
for computer in conn.Win32_ComputerSystem():
if computer.PartOfDomain:
return {'Domain': computer.Domain}
else:
return {'Workgroup': computer.Workgroup}
def set_domain_workgroup(workgroup):
'''
Set the domain or workgroup the computer belongs to.
.. versionadded:: 2019.2.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_domain_workgroup LOCAL
'''
if six.PY2:
workgroup = _to_unicode(workgroup)
# Initialize COM
with salt.utils.winapi.Com():
# Grab the first Win32_ComputerSystem object from wmi
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Now we can join the new workgroup
res = comp.JoinDomainOrWorkgroup(Name=workgroup.upper())
return True if not res[0] else False
def _try_parse_datetime(time_str, fmts):
'''
A helper function that attempts to parse the input time_str as a date.
Args:
time_str (str): A string representing the time
fmts (list): A list of date format strings
Returns:
datetime: Returns a datetime object if parsed properly, otherwise None
'''
result = None
for fmt in fmts:
try:
result = datetime.strptime(time_str, fmt)
break
except ValueError:
pass
return result
def get_system_time():
'''
Get the system time.
Returns:
str: Returns the system time in HH:MM:SS AM/PM format.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_time
'''
now = win32api.GetLocalTime()
meridian = 'AM'
hours = int(now[4])
if hours == 12:
meridian = 'PM'
elif hours == 0:
hours = 12
elif hours > 12:
hours = hours - 12
meridian = 'PM'
return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)
def set_system_time(newtime):
'''
Set the system time.
Args:
newtime (str):
The time to set. Can be any of the following formats:
- HH:MM:SS AM/PM
- HH:MM AM/PM
- HH:MM:SS (24 hour)
- HH:MM (24 hour)
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_system_time 12:01
'''
# Get date/time object from newtime
fmts = ['%I:%M:%S %p', '%I:%M %p', '%H:%M:%S', '%H:%M']
dt_obj = _try_parse_datetime(newtime, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(hours=dt_obj.hour,
minutes=dt_obj.minute,
seconds=dt_obj.second)
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc.args
log.error('Failed to get local time')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
# Check for passed values. If not passed, use current values
if years is None:
years = date_time[0]
if months is None:
months = date_time[1]
if days is None:
days = date_time[3]
if hours is None:
hours = date_time[4]
if minutes is None:
minutes = date_time[5]
if seconds is None:
seconds = date_time[6]
try:
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', ctypes.c_int16),
('wMonth', ctypes.c_int16),
('wDayOfWeek', ctypes.c_int16),
('wDay', ctypes.c_int16),
('wHour', ctypes.c_int16),
('wMinute', ctypes.c_int16),
('wSecond', ctypes.c_int16),
('wMilliseconds', ctypes.c_int16)]
system_time = SYSTEMTIME()
system_time.wYear = int(years)
system_time.wMonth = int(months)
system_time.wDay = int(days)
system_time.wHour = int(hours)
system_time.wMinute = int(minutes)
system_time.wSecond = int(seconds)
system_time_ptr = ctypes.pointer(system_time)
succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr)
if succeeded is not 0:
return True
else:
log.error('Failed to set local time')
raise CommandExecutionError(
win32api.FormatMessage(succeeded).rstrip())
except OSError as err:
log.error('Failed to set local time')
raise CommandExecutionError(err)
def get_system_date():
'''
Get the Windows system date
Returns:
str: Returns the system date
CLI Example:
.. code-block:: bash
salt '*' system.get_system_date
'''
now = win32api.GetLocalTime()
return '{0:02d}/{1:02d}/{2:04d}'.format(now[1], now[3], now[0])
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day)
def start_time_service():
'''
Start the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.start_time_service
'''
return __salt__['service.start']('w32time')
def stop_time_service():
'''
Stop the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.stop_time_service
'''
return __salt__['service.stop']('w32time')
def get_pending_component_servicing():
'''
Determine whether there are pending Component Based Servicing tasks that
require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Component Based Servicing tasks,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_component_servicing
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing\RebootPending'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
def get_pending_domain_join():
'''
Determine whether there is a pending domain join action that requires a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there is a pending domain join action, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_domain_join
'''
base_key = r'SYSTEM\CurrentControlSet\Services\Netlogon'
avoid_key = r'{0}\AvoidSpnSet'.format(base_key)
join_key = r'{0}\JoinDomain'.format(base_key)
# If either the avoid_key or join_key is present,
# then there is a reboot pending.
if __utils__['reg.key_exists']('HKLM', avoid_key):
log.debug('Key exists: %s', avoid_key)
return True
else:
log.debug('Key does not exist: %s', avoid_key)
if __utils__['reg.key_exists']('HKLM', join_key):
log.debug('Key exists: %s', join_key)
return True
else:
log.debug('Key does not exist: %s', join_key)
return False
def get_pending_file_rename():
'''
Determine whether there are pending file rename operations that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending file rename operations, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_file_rename
'''
vnames = ('PendingFileRenameOperations', 'PendingFileRenameOperations2')
key = r'SYSTEM\CurrentControlSet\Control\Session Manager'
# If any of the value names exist and have value data set,
# then a reboot is pending.
for vname in vnames:
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
if reg_ret['vdata'] and (reg_ret['vdata'] != '(value not set)'):
return True
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_servermanager():
'''
Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager
'''
vname = 'CurrentRebootAttempts'
key = r'SOFTWARE\Microsoft\ServerManager'
# There are situations where it's possible to have '(value not set)' as
# the value data, and since an actual reboot won't be pending in that
# instance, just catch instances where we try unsuccessfully to cast as int.
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
try:
if int(reg_ret['vdata']) > 0:
return True
except ValueError:
pass
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_update():
'''
Determine whether there are pending updates that require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending updates, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_update
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\RebootRequired'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
MINION_VOLATILE_KEY = r'SYSTEM\CurrentControlSet\Services\salt-minion\Volatile-Data'
REBOOT_REQUIRED_NAME = 'Reboot required'
def set_reboot_required_witnessed():
r'''
This function is used to remember that an event indicating that a reboot is
required was witnessed. This function relies on the salt-minion's ability to
create the following volatile registry key in the *HKLM* hive:
*SYSTEM\\CurrentControlSet\\Services\\salt-minion\\Volatile-Data*
Because this registry key is volatile, it will not persist beyond the
current boot session. Also, in the scope of this key, the name *'Reboot
required'* will be assigned the value of *1*.
For the time being, this function is being used whenever an install
completes with exit code 3010 and can be extended where appropriate in the
future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_reboot_required_witnessed
'''
return __utils__['reg.set_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
volatile=True,
vname=REBOOT_REQUIRED_NAME,
vdata=1,
vtype='REG_DWORD')
def get_reboot_required_witnessed():
'''
Determine if at any time during the current boot session the salt minion
witnessed an event indicating that a reboot is required.
This function will return ``True`` if an install completed with exit
code 3010 during the current boot session and can be extended where
appropriate in the future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the ``Requires reboot`` registry flag is set to ``1``,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_reboot_required_witnessed
'''
value_dict = __utils__['reg.read_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
vname=REBOOT_REQUIRED_NAME)
return value_dict['vdata'] == 1
def get_pending_reboot():
'''
Determine whether there is a reboot pending.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the system is pending reboot, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_reboot
'''
# Order the checks for reboot pending in most to least likely.
checks = (get_pending_update,
get_pending_file_rename,
get_pending_servermanager,
get_pending_component_servicing,
get_reboot_required_witnessed,
get_pending_computer_name,
get_pending_domain_join)
for check in checks:
if check():
return True
return False
|
saltstack/salt
|
salt/modules/win_system.py
|
set_hostname
|
python
|
def set_hostname(hostname):
'''
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
return comp.Rename(Name=hostname)
|
Set the hostname of the windows minion, requires a restart before this will
be updated.
.. versionadded:: 2016.3.0
Args:
hostname (str): The hostname to set
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L676-L698
| null |
# -*- coding: utf-8 -*-
'''
Module for managing windows systems.
:depends:
- pywintypes
- win32api
- win32con
- win32net
- wmi
Support for reboot, shutdown, etc
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import ctypes
import logging
import time
import platform
from datetime import datetime
# Import salt libs
import salt.utils.functools
import salt.utils.locales
import salt.utils.platform
import salt.utils.winapi
from salt.exceptions import CommandExecutionError
# Import 3rd-party Libs
from salt.ext import six
try:
import wmi
import win32net
import win32api
import win32con
import pywintypes
from ctypes import windll
HAS_WIN32NET_MODS = True
except ImportError:
HAS_WIN32NET_MODS = False
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
Only works on Windows Systems with Win32 Modules
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_system: Requires Windows'
if not HAS_WIN32NET_MODS:
return False, 'Module win_system: Missing win32 modules'
return __virtualname__
def _convert_minutes_seconds(timeout, in_seconds=False):
'''
convert timeout to seconds
'''
return timeout if in_seconds else timeout*60
def _convert_date_time_string(dt_string):
'''
convert string to date time object
'''
dt_string = dt_string.split('.')[0]
dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S')
return dt_obj.strftime('%Y-%m-%d %H:%M:%S')
def _to_unicode(instr):
'''
Converts from current users character encoding to unicode.
When instr has a value of None, the return value of the function
will also be None.
'''
if instr is None or isinstance(instr, six.text_type):
return instr
else:
return six.text_type(instr, 'utf8')
def halt(timeout=5, in_seconds=False):
'''
Halt a running system.
Args:
timeout (int):
Number of seconds before halting the system. Default is 5 seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.halt 5 True
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel): # pylint: disable=unused-argument
'''
Change the system runlevel on sysV compatible systems. Not applicable to
Windows
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
# cmd = ['init', runlevel]
# ret = __salt__['cmd.run'](cmd, python_shell=False)
# return ret
# TODO: Create a mapping of runlevels to # pylint: disable=fixme
# corresponding Windows actions
return 'Not implemented on Windows at this time.'
def poweroff(timeout=5, in_seconds=False):
'''
Power off a running system.
Args:
timeout (int):
Number of seconds before powering off the system. Default is 5
seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.poweroff 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disable=redefined-outer-name
only_on_pending_reboot=False):
'''
Reboot a running system.
Args:
timeout (int):
The number of minutes/seconds before rebooting the system. Use of
minutes or seconds depends on the value of ``in_seconds``. Default
is 5 minutes.
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
wait_for_reboot (bool)
``True`` will sleep for timeout + 30 seconds after reboot has been
initiated. This is useful for use in a highstate. For example, you
may have states that you want to apply only after the reboot.
Default is ``False``.
.. versionadded:: 2015.8.0
only_on_pending_reboot (bool):
If this is set to ``True``, then the reboot will only proceed
if the system reports a pending reboot. Setting this parameter to
``True`` could be useful when calling this function from a final
housekeeping state intended to be executed at the end of a state run
(using *order: last*). Default is ``False``.
Returns:
bool: ``True`` if successful (a reboot will occur), otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.reboot 5
salt '*' system.reboot 5 True
Invoking this function from a final housekeeping state:
.. code-block:: yaml
final_housekeeping:
module.run:
- name: system.reboot
- only_on_pending_reboot: True
- order: last
'''
ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds,
only_on_pending_reboot=only_on_pending_reboot)
if wait_for_reboot:
seconds = _convert_minutes_seconds(timeout, in_seconds)
time.sleep(seconds + 30)
return ret
def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint: disable=redefined-outer-name
in_seconds=False, only_on_pending_reboot=False):
'''
Shutdown a running system.
Args:
message (str):
The message to display to the user before shutting down.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the
user who called the function, the message specified by the lpMessage
parameter, and prompts the user to log off. The dialog box beeps
when it is created and remains on top of other windows (system
modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down immediately without
displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes
in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0
force_close (bool):
``True`` will force close all open applications. ``False`` will
display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool):
``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown
state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns:
bool:
``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown "System will shutdown in 5 minutes"
'''
if six.PY2:
message = _to_unicode(message)
timeout = _convert_minutes_seconds(timeout, in_seconds)
if only_on_pending_reboot and not get_pending_reboot():
return False
if message and not isinstance(message, six.string_types):
message = message.decode('utf-8')
try:
win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to shutdown the system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def shutdown_hard():
'''
Shutdown a running system with no timeout or warning.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.shutdown_hard
'''
return shutdown(timeout=0)
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
def lock():
'''
Lock the workstation.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.lock
'''
return windll.user32.LockWorkStation()
def set_computer_name(name):
'''
Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
'''
if six.PY2:
name = _to_unicode(name)
if windll.kernel32.SetComputerNameExW(
win32con.ComputerNamePhysicalDnsHostname, name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
pending = __utils__['reg.read_value'](
'HKLM',
r'SYSTEM\CurrentControlSet\Services\Tcpip\Parameters',
'NV Hostname')['vdata']
if pending:
return pending if pending != current else None
return False
def get_computer_name():
'''
Get the Windows computer name
Returns:
str: Returns the computer name if found. Otherwise returns ``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_name
'''
name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)
return name if name else False
def set_computer_desc(desc=None):
'''
Set the Windows computer description
Args:
desc (str):
The computer description
Returns:
str: Description if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
'''
if six.PY2:
desc = _to_unicode(desc)
# Make sure the system exists
# Return an object containing current information array for the computer
system_info = win32net.NetServerGetInfo(None, 101)
# If desc is passed, decode it for unicode
if desc is None:
return False
system_info['comment'] = desc
# Apply new settings
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc.args
log.error('Failed to update system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
return {'Computer Description': get_computer_desc()}
set_computer_description = salt.utils.functools.alias_function(set_computer_desc, 'set_computer_description') # pylint: disable=invalid-name
def get_system_info():
'''
Get system information.
Returns:
dict: Dictionary containing information about the system to include
name, description, version, etc...
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_info
'''
def byte_calc(val):
val = float(val)
if val < 2**10:
return '{0:.3f}B'.format(val)
elif val < 2**20:
return '{0:.3f}KB'.format(val / 2**10)
elif val < 2**30:
return '{0:.3f}MB'.format(val / 2**20)
elif val < 2**40:
return '{0:.3f}GB'.format(val / 2**30)
else:
return '{0:.3f}TB'.format(val / 2**40)
# Lookup dicts for Win32_OperatingSystem
os_type = {1: 'Work Station',
2: 'Domain Controller',
3: 'Server'}
# lookup dicts for Win32_ComputerSystem
domain_role = {0: 'Standalone Workstation',
1: 'Member Workstation',
2: 'Standalone Server',
3: 'Member Server',
4: 'Backup Domain Controller',
5: 'Primary Domain Controller'}
warning_states = {1: 'Other',
2: 'Unknown',
3: 'Safe',
4: 'Warning',
5: 'Critical',
6: 'Non-recoverable'}
pc_system_types = {0: 'Unspecified',
1: 'Desktop',
2: 'Mobile',
3: 'Workstation',
4: 'Enterprise Server',
5: 'SOHO Server',
6: 'Appliance PC',
7: 'Performance Server',
8: 'Maximum'}
# Connect to WMI
with salt.utils.winapi.Com():
conn = wmi.WMI()
system = conn.Win32_OperatingSystem()[0]
ret = {'name': get_computer_name(),
'description': system.Description,
'install_date': system.InstallDate,
'last_boot': system.LastBootUpTime,
'os_manufacturer': system.Manufacturer,
'os_name': system.Caption,
'users': system.NumberOfUsers,
'organization': system.Organization,
'os_architecture': system.OSArchitecture,
'primary': system.Primary,
'os_type': os_type[system.ProductType],
'registered_user': system.RegisteredUser,
'system_directory': system.SystemDirectory,
'system_drive': system.SystemDrive,
'os_version': system.Version,
'windows_directory': system.WindowsDirectory}
system = conn.Win32_ComputerSystem()[0]
# Get pc_system_type depending on Windows version
if platform.release() in ['Vista', '7', '8']:
# Types for Vista, 7, and 8
pc_system_type = pc_system_types[system.PCSystemType]
else:
# New types were added with 8.1 and newer
pc_system_types.update({8: 'Slate', 9: 'Maximum'})
pc_system_type = pc_system_types[system.PCSystemType]
ret.update({
'bootup_state': system.BootupState,
'caption': system.Caption,
'chassis_bootup_state': warning_states[system.ChassisBootupState],
'chassis_sku_number': system.ChassisSKUNumber,
'dns_hostname': system.DNSHostname,
'domain': system.Domain,
'domain_role': domain_role[system.DomainRole],
'hardware_manufacturer': system.Manufacturer,
'hardware_model': system.Model,
'network_server_mode_enabled': system.NetworkServerModeEnabled,
'part_of_domain': system.PartOfDomain,
'pc_system_type': pc_system_type,
'power_state': system.PowerState,
'status': system.Status,
'system_type': system.SystemType,
'total_physical_memory': byte_calc(system.TotalPhysicalMemory),
'total_physical_memory_raw': system.TotalPhysicalMemory,
'thermal_state': warning_states[system.ThermalState],
'workgroup': system.Workgroup
})
# Get processor information
processors = conn.Win32_Processor()
ret['processors'] = 0
ret['processors_logical'] = 0
ret['processor_cores'] = 0
ret['processor_cores_enabled'] = 0
ret['processor_manufacturer'] = processors[0].Manufacturer
ret['processor_max_clock_speed'] = six.text_type(processors[0].MaxClockSpeed) + 'MHz'
for processor in processors:
ret['processors'] += 1
ret['processors_logical'] += processor.NumberOfLogicalProcessors
ret['processor_cores'] += processor.NumberOfCores
ret['processor_cores_enabled'] += processor.NumberOfEnabledCore
bios = conn.Win32_BIOS()[0]
ret.update({'hardware_serial': bios.SerialNumber,
'bios_manufacturer': bios.Manufacturer,
'bios_version': bios.Version,
'bios_details': bios.BIOSVersion,
'bios_caption': bios.Caption,
'bios_description': bios.Description})
ret['install_date'] = _convert_date_time_string(ret['install_date'])
ret['last_boot'] = _convert_date_time_string(ret['last_boot'])
return ret
def get_computer_desc():
'''
Get the Windows computer description
Returns:
str: Returns the computer description if found. Otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_desc
'''
desc = get_system_info()['description']
return False if desc is None else desc
get_computer_description = salt.utils.functools.alias_function(get_computer_desc, 'get_computer_description') # pylint: disable=invalid-name
def get_hostname():
'''
Get the hostname of the windows minion
.. versionadded:: 2016.3.0
Returns:
str: Returns the hostname of the windows minion
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_hostname
'''
cmd = 'hostname'
ret = __salt__['cmd.run'](cmd=cmd)
return ret
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specified domain. Needs to be either fully qualified like
``user@domain.tld`` or simply ``user``
password (str):
Password of the specified user
account_ou (str):
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool):
If set to ``True`` the computer will only join the domain if the
account already exists. If set to ``False`` the computer account
will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
if six.PY2:
domain = _to_unicode(domain)
username = _to_unicode(username)
password = _to_unicode(password)
account_ou = _to_unicode(account_ou)
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, six.string_types):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
err = _join_domain(domain=domain, username=username, password=password,
account_ou=account_ou, account_exists=account_exists)
if not err:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
raise CommandExecutionError(win32api.FormatMessage(err).rstrip())
def _join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False):
'''
Helper function to join the domain.
Args:
domain (str): The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``user@domain.tld`` or simply ``user``
password (str): Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
Returns:
int:
:param domain:
:param username:
:param password:
:param account_ou:
:param account_exists:
:return:
'''
NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name
NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name
NETSETUP_JOIN_WITH_NEW_NAME = 0x400 # pylint: disable=invalid-name
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
join_options |= NETSETUP_JOIN_WITH_NEW_NAME
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Return the results of the command as an error
# JoinDomainOrWorkgroup returns a strangely formatted value that looks like
# (0,) so return the first item
return comp.JoinDomainOrWorkgroup(
Name=domain, Password=password, UserName=username, AccountOU=account_ou,
FJoinOptions=join_options)[0]
def unjoin_domain(username=None,
password=None,
domain=None,
workgroup='WORKGROUP',
disable=False,
restart=False):
# pylint: disable=anomalous-backslash-in-string
'''
Unjoin a computer from an Active Directory Domain. Requires a restart.
Args:
username (str):
Username of an account which is authorized to manage computer
accounts on the domain. Needs to be a fully qualified name like
``user@domain.tld`` or ``domain.tld\\user``. If the domain is not
specified, the passed domain will be used. If the computer account
doesn't need to be disabled after the computer is unjoined, this can
be ``None``.
password (str):
The password of the specified user
domain (str):
The domain from which to unjoin the computer. Can be ``None``
workgroup (str):
The workgroup to join the computer to. Default is ``WORKGROUP``
.. versionadded:: 2015.8.2/2015.5.7
disable (bool):
``True`` to disable the computer account in Active Directory.
Default is ``False``
restart (bool):
``True`` will restart the computer after successful unjoin. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7
Returns:
dict: Returns a dictionary if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.unjoin_domain restart=True
salt 'minion-id' system.unjoin_domain username='unjoinuser' \\
password='unjoinpassword' disable=True \\
restart=True
'''
# pylint: enable=anomalous-backslash-in-string
if six.PY2:
username = _to_unicode(username)
password = _to_unicode(password)
domain = _to_unicode(domain)
status = get_domain_workgroup()
if 'Workgroup' in status:
if status['Workgroup'] == workgroup:
return 'Already joined to {0}'.format(workgroup)
if username and '\\' not in username and '@' not in username:
if domain:
username = '{0}@{1}'.format(username, domain)
else:
return 'Must specify domain if not supplied in username'
if username and password is None:
return 'Must specify a password if you pass a username'
NETSETUP_ACCT_DELETE = 0x4 # pylint: disable=invalid-name
unjoin_options = 0x0
if disable:
unjoin_options |= NETSETUP_ACCT_DELETE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
err = comp.UnjoinDomainOrWorkgroup(Password=password,
UserName=username,
FUnjoinOptions=unjoin_options)
# you have to do this because UnjoinDomainOrWorkgroup returns a
# strangely formatted value that looks like (0,)
if not err[0]:
err = comp.JoinDomainOrWorkgroup(Name=workgroup)
if not err[0]:
ret = {'Workgroup': workgroup,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to join the computer to %s', workgroup)
return False
else:
log.error(win32api.FormatMessage(err[0]).rstrip())
log.error('Failed to unjoin computer from %s', status['Domain'])
return False
def get_domain_workgroup():
'''
Get the domain or workgroup the computer belongs to.
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
Returns:
str: The name of the domain or workgroup
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_domain_workgroup
'''
with salt.utils.winapi.Com():
conn = wmi.WMI()
for computer in conn.Win32_ComputerSystem():
if computer.PartOfDomain:
return {'Domain': computer.Domain}
else:
return {'Workgroup': computer.Workgroup}
def set_domain_workgroup(workgroup):
'''
Set the domain or workgroup the computer belongs to.
.. versionadded:: 2019.2.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_domain_workgroup LOCAL
'''
if six.PY2:
workgroup = _to_unicode(workgroup)
# Initialize COM
with salt.utils.winapi.Com():
# Grab the first Win32_ComputerSystem object from wmi
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Now we can join the new workgroup
res = comp.JoinDomainOrWorkgroup(Name=workgroup.upper())
return True if not res[0] else False
def _try_parse_datetime(time_str, fmts):
'''
A helper function that attempts to parse the input time_str as a date.
Args:
time_str (str): A string representing the time
fmts (list): A list of date format strings
Returns:
datetime: Returns a datetime object if parsed properly, otherwise None
'''
result = None
for fmt in fmts:
try:
result = datetime.strptime(time_str, fmt)
break
except ValueError:
pass
return result
def get_system_time():
'''
Get the system time.
Returns:
str: Returns the system time in HH:MM:SS AM/PM format.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_time
'''
now = win32api.GetLocalTime()
meridian = 'AM'
hours = int(now[4])
if hours == 12:
meridian = 'PM'
elif hours == 0:
hours = 12
elif hours > 12:
hours = hours - 12
meridian = 'PM'
return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)
def set_system_time(newtime):
'''
Set the system time.
Args:
newtime (str):
The time to set. Can be any of the following formats:
- HH:MM:SS AM/PM
- HH:MM AM/PM
- HH:MM:SS (24 hour)
- HH:MM (24 hour)
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_system_time 12:01
'''
# Get date/time object from newtime
fmts = ['%I:%M:%S %p', '%I:%M %p', '%H:%M:%S', '%H:%M']
dt_obj = _try_parse_datetime(newtime, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(hours=dt_obj.hour,
minutes=dt_obj.minute,
seconds=dt_obj.second)
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc.args
log.error('Failed to get local time')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
# Check for passed values. If not passed, use current values
if years is None:
years = date_time[0]
if months is None:
months = date_time[1]
if days is None:
days = date_time[3]
if hours is None:
hours = date_time[4]
if minutes is None:
minutes = date_time[5]
if seconds is None:
seconds = date_time[6]
try:
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', ctypes.c_int16),
('wMonth', ctypes.c_int16),
('wDayOfWeek', ctypes.c_int16),
('wDay', ctypes.c_int16),
('wHour', ctypes.c_int16),
('wMinute', ctypes.c_int16),
('wSecond', ctypes.c_int16),
('wMilliseconds', ctypes.c_int16)]
system_time = SYSTEMTIME()
system_time.wYear = int(years)
system_time.wMonth = int(months)
system_time.wDay = int(days)
system_time.wHour = int(hours)
system_time.wMinute = int(minutes)
system_time.wSecond = int(seconds)
system_time_ptr = ctypes.pointer(system_time)
succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr)
if succeeded is not 0:
return True
else:
log.error('Failed to set local time')
raise CommandExecutionError(
win32api.FormatMessage(succeeded).rstrip())
except OSError as err:
log.error('Failed to set local time')
raise CommandExecutionError(err)
def get_system_date():
'''
Get the Windows system date
Returns:
str: Returns the system date
CLI Example:
.. code-block:: bash
salt '*' system.get_system_date
'''
now = win32api.GetLocalTime()
return '{0:02d}/{1:02d}/{2:04d}'.format(now[1], now[3], now[0])
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day)
def start_time_service():
'''
Start the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.start_time_service
'''
return __salt__['service.start']('w32time')
def stop_time_service():
'''
Stop the Windows time service
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.stop_time_service
'''
return __salt__['service.stop']('w32time')
def get_pending_component_servicing():
'''
Determine whether there are pending Component Based Servicing tasks that
require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Component Based Servicing tasks,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_component_servicing
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing\RebootPending'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
def get_pending_domain_join():
'''
Determine whether there is a pending domain join action that requires a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there is a pending domain join action, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_domain_join
'''
base_key = r'SYSTEM\CurrentControlSet\Services\Netlogon'
avoid_key = r'{0}\AvoidSpnSet'.format(base_key)
join_key = r'{0}\JoinDomain'.format(base_key)
# If either the avoid_key or join_key is present,
# then there is a reboot pending.
if __utils__['reg.key_exists']('HKLM', avoid_key):
log.debug('Key exists: %s', avoid_key)
return True
else:
log.debug('Key does not exist: %s', avoid_key)
if __utils__['reg.key_exists']('HKLM', join_key):
log.debug('Key exists: %s', join_key)
return True
else:
log.debug('Key does not exist: %s', join_key)
return False
def get_pending_file_rename():
'''
Determine whether there are pending file rename operations that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending file rename operations, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_file_rename
'''
vnames = ('PendingFileRenameOperations', 'PendingFileRenameOperations2')
key = r'SYSTEM\CurrentControlSet\Control\Session Manager'
# If any of the value names exist and have value data set,
# then a reboot is pending.
for vname in vnames:
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
if reg_ret['vdata'] and (reg_ret['vdata'] != '(value not set)'):
return True
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_servermanager():
'''
Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager
'''
vname = 'CurrentRebootAttempts'
key = r'SOFTWARE\Microsoft\ServerManager'
# There are situations where it's possible to have '(value not set)' as
# the value data, and since an actual reboot won't be pending in that
# instance, just catch instances where we try unsuccessfully to cast as int.
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
try:
if int(reg_ret['vdata']) > 0:
return True
except ValueError:
pass
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_update():
'''
Determine whether there are pending updates that require a reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending updates, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_update
'''
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\RebootRequired'
# So long as the registry key exists, a reboot is pending.
if __utils__['reg.key_exists']('HKLM', key):
log.debug('Key exists: %s', key)
return True
else:
log.debug('Key does not exist: %s', key)
return False
MINION_VOLATILE_KEY = r'SYSTEM\CurrentControlSet\Services\salt-minion\Volatile-Data'
REBOOT_REQUIRED_NAME = 'Reboot required'
def set_reboot_required_witnessed():
r'''
This function is used to remember that an event indicating that a reboot is
required was witnessed. This function relies on the salt-minion's ability to
create the following volatile registry key in the *HKLM* hive:
*SYSTEM\\CurrentControlSet\\Services\\salt-minion\\Volatile-Data*
Because this registry key is volatile, it will not persist beyond the
current boot session. Also, in the scope of this key, the name *'Reboot
required'* will be assigned the value of *1*.
For the time being, this function is being used whenever an install
completes with exit code 3010 and can be extended where appropriate in the
future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_reboot_required_witnessed
'''
return __utils__['reg.set_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
volatile=True,
vname=REBOOT_REQUIRED_NAME,
vdata=1,
vtype='REG_DWORD')
def get_reboot_required_witnessed():
'''
Determine if at any time during the current boot session the salt minion
witnessed an event indicating that a reboot is required.
This function will return ``True`` if an install completed with exit
code 3010 during the current boot session and can be extended where
appropriate in the future.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the ``Requires reboot`` registry flag is set to ``1``,
otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_reboot_required_witnessed
'''
value_dict = __utils__['reg.read_value'](
hive='HKLM',
key=MINION_VOLATILE_KEY,
vname=REBOOT_REQUIRED_NAME)
return value_dict['vdata'] == 1
def get_pending_reboot():
'''
Determine whether there is a reboot pending.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if the system is pending reboot, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_reboot
'''
# Order the checks for reboot pending in most to least likely.
checks = (get_pending_update,
get_pending_file_rename,
get_pending_servermanager,
get_pending_component_servicing,
get_reboot_required_witnessed,
get_pending_computer_name,
get_pending_domain_join)
for check in checks:
if check():
return True
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.