text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_institution_url(base_url): """ Clean up a given base URL. :param base_url: The base URL of the API. :type base_url: str :rtype: str """
base_url = base_url.rstrip('/') index = base_url.find('/api/v1') if index != -1: return base_url[0:index] return base_url
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def file_or_path(file): """ Open a file and return the handler if a path is given. If a file handler is given, return it directly. :param file: A file handler or path to a file. :returns: A tuple with the open file handler and whether it was a path. :rtype: (file, bool) """
is_path = False if isinstance(file, string_types): if not os.path.exists(file): raise IOError('File at path ' + file + ' does not exist.') file = open(file, 'rb') is_path = True return file, is_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, **kwargs): """ Delete this calendar event. :calls: `DELETE /api/v1/calendar_events/:id \ <https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.destroy>`_ :rtype: :class:`canvasapi.calendar_event.CalendarEvent` """
response = self._requester.request( 'DELETE', 'calendar_events/{}'.format(self.id), _kwargs=combine_kwargs(**kwargs) ) return CalendarEvent(self._requester, response.json())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def edit(self, **kwargs): """ Modify this calendar event. :calls: `PUT /api/v1/calendar_events/:id \ <https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.update>`_ :rtype: :class:`canvasapi.calendar_event.CalendarEvent` """
response = self._requester.request( 'PUT', 'calendar_events/{}'.format(self.id), _kwargs=combine_kwargs(**kwargs) ) if 'title' in response.json(): super(CalendarEvent, self).set_attributes(response.json()) return CalendarEvent(self._requester, response.json())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _wrap_layer(name, input_layer, build_func, dropout_rate=0.0, trainable=True): """Wrap layers with residual, normalization and dropout. :param name: Prefix of names for internal layers. :param input_layer: Input layer. :param build_func: A callable that takes the input tensor and generates the output tensor. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer. """
build_output = build_func(input_layer) if dropout_rate > 0.0: dropout_layer = keras.layers.Dropout( rate=dropout_rate, name='%s-Dropout' % name, )(build_output) else: dropout_layer = build_output if isinstance(input_layer, list): input_layer = input_layer[0] add_layer = keras.layers.Add(name='%s-Add' % name)([input_layer, dropout_layer]) normal_layer = LayerNormalization( trainable=trainable, name='%s-Norm' % name, )(add_layer) return normal_layer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def attention_builder(name, head_num, activation, history_only, trainable=True): """Get multi-head self-attention builder. :param name: Prefix of names for internal layers. :param head_num: Number of heads in multi-head self-attention. :param activation: Activation for multi-head self-attention. :param history_only: Only use history data. :param trainable: Whether the layer is trainable. :return: """
def _attention_builder(x): return MultiHeadAttention( head_num=head_num, activation=activation, history_only=history_only, trainable=trainable, name=name, )(x) return _attention_builder
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def feed_forward_builder(name, hidden_dim, activation, trainable=True): """Get position-wise feed-forward layer builder. :param name: Prefix of names for internal layers. :param hidden_dim: Hidden dimension of feed forward layer. :param activation: Activation for feed-forward layer. :param trainable: Whether the layer is trainable. :return: """
def _feed_forward_builder(x): return FeedForward( units=hidden_dim, activation=activation, trainable=trainable, name=name, )(x) return _feed_forward_builder
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_encoder_component(name, input_layer, head_num, hidden_dim, attention_activation=None, feed_forward_activation='relu', dropout_rate=0.0, trainable=True): """Multi-head self-attention and feed-forward layer. :param name: Prefix of names for internal layers. :param input_layer: Input layer. :param head_num: Number of heads in multi-head self-attention. :param hidden_dim: Hidden dimension of feed forward layer. :param attention_activation: Activation for multi-head self-attention. :param feed_forward_activation: Activation for feed-forward layer. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer. """
attention_name = '%s-MultiHeadSelfAttention' % name feed_forward_name = '%s-FeedForward' % name attention_layer = _wrap_layer( name=attention_name, input_layer=input_layer, build_func=attention_builder( name=attention_name, head_num=head_num, activation=attention_activation, history_only=False, trainable=trainable, ), dropout_rate=dropout_rate, trainable=trainable, ) feed_forward_layer = _wrap_layer( name=feed_forward_name, input_layer=attention_layer, build_func=feed_forward_builder( name=feed_forward_name, hidden_dim=hidden_dim, activation=feed_forward_activation, trainable=trainable, ), dropout_rate=dropout_rate, trainable=trainable, ) return feed_forward_layer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_decoder_component(name, input_layer, encoded_layer, head_num, hidden_dim, attention_activation=None, feed_forward_activation='relu', dropout_rate=0.0, trainable=True): """Multi-head self-attention, multi-head query attention and feed-forward layer. :param name: Prefix of names for internal layers. :param input_layer: Input layer. :param encoded_layer: Encoded layer from encoder. :param head_num: Number of heads in multi-head self-attention. :param hidden_dim: Hidden dimension of feed forward layer. :param attention_activation: Activation for multi-head self-attention. :param feed_forward_activation: Activation for feed-forward layer. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer. """
self_attention_name = '%s-MultiHeadSelfAttention' % name query_attention_name = '%s-MultiHeadQueryAttention' % name feed_forward_name = '%s-FeedForward' % name self_attention_layer = _wrap_layer( name=self_attention_name, input_layer=input_layer, build_func=attention_builder( name=self_attention_name, head_num=head_num, activation=attention_activation, history_only=True, trainable=trainable, ), dropout_rate=dropout_rate, trainable=trainable, ) query_attention_layer = _wrap_layer( name=query_attention_name, input_layer=[self_attention_layer, encoded_layer, encoded_layer], build_func=attention_builder( name=query_attention_name, head_num=head_num, activation=attention_activation, history_only=False, trainable=trainable, ), dropout_rate=dropout_rate, trainable=trainable, ) feed_forward_layer = _wrap_layer( name=feed_forward_name, input_layer=query_attention_layer, build_func=feed_forward_builder( name=feed_forward_name, hidden_dim=hidden_dim, activation=feed_forward_activation, trainable=trainable, ), dropout_rate=dropout_rate, trainable=trainable, ) return feed_forward_layer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_encoders(encoder_num, input_layer, head_num, hidden_dim, attention_activation=None, feed_forward_activation='relu', dropout_rate=0.0, trainable=True): """Get encoders. :param encoder_num: Number of encoder components. :param input_layer: Input layer. :param head_num: Number of heads in multi-head self-attention. :param hidden_dim: Hidden dimension of feed forward layer. :param attention_activation: Activation for multi-head self-attention. :param feed_forward_activation: Activation for feed-forward layer. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer. """
last_layer = input_layer for i in range(encoder_num): last_layer = get_encoder_component( name='Encoder-%d' % (i + 1), input_layer=last_layer, head_num=head_num, hidden_dim=hidden_dim, attention_activation=attention_activation, feed_forward_activation=feed_forward_activation, dropout_rate=dropout_rate, trainable=trainable, ) return last_layer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_decoders(decoder_num, input_layer, encoded_layer, head_num, hidden_dim, attention_activation=None, feed_forward_activation='relu', dropout_rate=0.0, trainable=True): """Get decoders. :param decoder_num: Number of decoder components. :param input_layer: Input layer. :param encoded_layer: Encoded layer from encoder. :param head_num: Number of heads in multi-head self-attention. :param hidden_dim: Hidden dimension of feed forward layer. :param attention_activation: Activation for multi-head self-attention. :param feed_forward_activation: Activation for feed-forward layer. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer. """
last_layer = input_layer for i in range(decoder_num): last_layer = get_decoder_component( name='Decoder-%d' % (i + 1), input_layer=last_layer, encoded_layer=encoded_layer, head_num=head_num, hidden_dim=hidden_dim, attention_activation=attention_activation, feed_forward_activation=feed_forward_activation, dropout_rate=dropout_rate, trainable=trainable, ) return last_layer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_model(token_num, embed_dim, encoder_num, decoder_num, head_num, hidden_dim, attention_activation=None, feed_forward_activation='relu', dropout_rate=0.0, use_same_embed=True, embed_weights=None, embed_trainable=None, trainable=True): """Get full model without compilation. :param token_num: Number of distinct tokens. :param embed_dim: Dimension of token embedding. :param encoder_num: Number of encoder components. :param decoder_num: Number of decoder components. :param head_num: Number of heads in multi-head self-attention. :param hidden_dim: Hidden dimension of feed forward layer. :param attention_activation: Activation for multi-head self-attention. :param feed_forward_activation: Activation for feed-forward layer. :param dropout_rate: Dropout rate. :param use_same_embed: Whether to use the same token embedding layer. `token_num`, `embed_weights` and `embed_trainable` should be lists of two elements if it is False. :param embed_weights: Initial weights of token embedding. :param embed_trainable: Whether the token embedding is trainable. It will automatically set to False if the given value is None when embedding weights has been provided. :param trainable: Whether the layers are trainable. :return: Keras model. """
if not isinstance(token_num, list): token_num = [token_num, token_num] encoder_token_num, decoder_token_num = token_num if not isinstance(embed_weights, list): embed_weights = [embed_weights, embed_weights] encoder_embed_weights, decoder_embed_weights = embed_weights if encoder_embed_weights is not None: encoder_embed_weights = [encoder_embed_weights] if decoder_embed_weights is not None: decoder_embed_weights = [decoder_embed_weights] if not isinstance(embed_trainable, list): embed_trainable = [embed_trainable, embed_trainable] encoder_embed_trainable, decoder_embed_trainable = embed_trainable if encoder_embed_trainable is None: encoder_embed_trainable = encoder_embed_weights is None if decoder_embed_trainable is None: decoder_embed_trainable = decoder_embed_weights is None if use_same_embed: encoder_embed_layer = decoder_embed_layer = EmbeddingRet( input_dim=encoder_token_num, output_dim=embed_dim, mask_zero=True, weights=encoder_embed_weights, trainable=encoder_embed_trainable, name='Token-Embedding', ) else: encoder_embed_layer = EmbeddingRet( input_dim=encoder_token_num, output_dim=embed_dim, mask_zero=True, weights=encoder_embed_weights, trainable=encoder_embed_trainable, name='Encoder-Token-Embedding', ) decoder_embed_layer = EmbeddingRet( input_dim=decoder_token_num, output_dim=embed_dim, mask_zero=True, weights=decoder_embed_weights, trainable=decoder_embed_trainable, name='Decoder-Token-Embedding', ) encoder_input = keras.layers.Input(shape=(None,), name='Encoder-Input') encoder_embed = TrigPosEmbedding( mode=TrigPosEmbedding.MODE_ADD, name='Encoder-Embedding', )(encoder_embed_layer(encoder_input)[0]) encoded_layer = get_encoders( encoder_num=encoder_num, input_layer=encoder_embed, head_num=head_num, hidden_dim=hidden_dim, attention_activation=attention_activation, feed_forward_activation=feed_forward_activation, dropout_rate=dropout_rate, trainable=trainable, ) decoder_input = keras.layers.Input(shape=(None,), name='Decoder-Input') decoder_embed, decoder_embed_weights = decoder_embed_layer(decoder_input) decoder_embed = TrigPosEmbedding( mode=TrigPosEmbedding.MODE_ADD, name='Decoder-Embedding', )(decoder_embed) decoded_layer = get_decoders( decoder_num=decoder_num, input_layer=decoder_embed, encoded_layer=encoded_layer, head_num=head_num, hidden_dim=hidden_dim, attention_activation=attention_activation, feed_forward_activation=feed_forward_activation, dropout_rate=dropout_rate, trainable=trainable, ) dense_layer = EmbeddingSim( trainable=trainable, name='Output', )([decoded_layer, decoder_embed_weights]) return keras.models.Model(inputs=[encoder_input, decoder_input], outputs=dense_layer)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decode(model, tokens, start_token, end_token, pad_token, max_len=10000, max_repeat=10, max_repeat_block=10): """Decode with the given model and input tokens. :param model: The trained model. :param tokens: The input tokens of encoder. :param start_token: The token that represents the start of a sentence. :param end_token: The token that represents the end of a sentence. :param pad_token: The token that represents padding. :param max_len: Maximum length of decoded list. :param max_repeat: Maximum number of repeating blocks. :param max_repeat_block: Maximum length of the repeating block. :return: Decoded tokens. """
is_single = not isinstance(tokens[0], list) if is_single: tokens = [tokens] batch_size = len(tokens) decoder_inputs = [[start_token] for _ in range(batch_size)] outputs = [None for _ in range(batch_size)] output_len = 1 while len(list(filter(lambda x: x is None, outputs))) > 0: output_len += 1 batch_inputs, batch_outputs = [], [] max_input_len = 0 index_map = {} for i in range(batch_size): if outputs[i] is None: index_map[len(batch_inputs)] = i batch_inputs.append(tokens[i][:]) batch_outputs.append(decoder_inputs[i]) max_input_len = max(max_input_len, len(tokens[i])) for i in range(len(batch_inputs)): batch_inputs[i] += [pad_token] * (max_input_len - len(batch_inputs[i])) predicts = model.predict([np.asarray(batch_inputs), np.asarray(batch_outputs)]) for i in range(len(predicts)): last_token = np.argmax(predicts[i][-1]) decoder_inputs[index_map[i]].append(last_token) if last_token == end_token or\ (max_len is not None and output_len >= max_len) or\ _get_max_suffix_repeat_times(decoder_inputs, max_repeat * max_repeat_block) >= max_repeat: outputs[index_map[i]] = decoder_inputs[index_map[i]] if is_single: outputs = outputs[0] return outputs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gelu(x): """An approximation of gelu. See: https://arxiv.org/pdf/1606.08415.pdf """
return 0.5 * x * (1.0 + K.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * K.pow(x, 3))))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prefixed(self, prefix): """Context manager for parsing envvars with a common prefix."""
old_prefix = self._prefix if old_prefix is None: self._prefix = prefix else: self._prefix = "{}{}".format(old_prefix, prefix) yield self self._prefix = old_prefix
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_parser(self, name, func): """Register a new parser method with the name ``name``. ``func`` must receive the input value for an environment variable. """
self.__parser_map__[name] = _func2method(func, method_name=name) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parser_for(self, name): """Decorator that registers a new parser method with the name ``name``. The decorated function must receive the input value for an environment variable. """
def decorator(func): self.add_parser(name, func) return func return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_parser_from_field(self, name, field_cls): """Register a new parser method with name ``name``, given a marshmallow ``Field``."""
self.__parser_map__[name] = _field2method(field_cls, method_name=name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open(self, fp, mode='r'): """ Open the NMEAFile. """
self._file = open(fp, mode=mode) return self._file
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def xml_records(filename): """ If the second return value is not None, then it is an Exception encountered during parsing. The first return value will be the XML string. @type filename str @rtype: generator of (etree.Element or str), (None or Exception) """
with Evtx(filename) as evtx: for xml, record in evtx_file_xml_view(evtx.get_file_header()): try: yield to_lxml(xml), None except etree.XMLSyntaxError as e: yield xml, e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def template_instance(self): ''' parse the template instance node. this is used to compute the location of the template definition structure. Returns: TemplateInstanceNode: the template instance. ''' ofs = self.offset() if self.unpack_byte(0x0) & 0x0F == 0xF: ofs += 4 return TemplateInstanceNode(self._buf, ofs, self._chunk, self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def template(self): ''' parse the template referenced by this root node. note, this template structure is not guaranteed to be located within the root node's boundaries. Returns: TemplateNode: the template. ''' instance = self.template_instance() offset = self._chunk.offset() + instance.template_offset() node = TemplateNode(self._buf, offset, self._chunk, instance) return node
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evtx_chunk_xml_view(chunk): """ Generate XML representations of the records in an EVTX chunk. Records are ordered by chunk.records() Args: chunk (Evtx.Chunk): the chunk to render. Yields: tuple[str, Evtx.Record]: the rendered XML document and the raw record. """
for record in chunk.records(): record_str = evtx_record_xml_view(record) yield record_str, record
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evtx_file_xml_view(file_header): """ Generate XML representations of the records in an EVTX file. Records are ordered by file_header.chunks(), and then by chunk.records() Args: chunk (Evtx.FileHeader): the file header to render. Yields: tuple[str, Evtx.Record]: the rendered XML document and the raw record. """
for chunk in file_header.chunks(): for record in chunk.records(): record_str = evtx_record_xml_view(record) yield record_str, record
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_record(self, record_num): """ Get a Record by record number. @type record_num: int @param record_num: The record number of the the record to fetch. @rtype Record or None @return The record request by record number, or None if the record is not found. """
for chunk in self.chunks(): first_record = chunk.log_first_record_number() last_record = chunk.log_last_record_number() if not (first_record <= record_num <= last_record): continue for record in chunk.records(): if record.record_num() == record_num: return record return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data(self): """ Return the raw data block which makes up this record as a bytestring. @rtype str @return A string that is a copy of the buffer that makes up this record. """
return self._buf[self.offset():self.offset() + self.size()]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def lxml(self): ''' render the record into a lxml document. this is useful for querying data from the record using xpath, etc. note: lxml must be installed. Returns: lxml.etree.ElementTree: the rendered and parsed xml document. Raises: ImportError: if lxml is not installed. ''' import lxml.etree return lxml.etree.fromstring((e_views.XML_HEADER + self.xml()).encode('utf-8'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self): """List all users"""
self.reqparse.add_argument('page', type=int, default=1, required=True) self.reqparse.add_argument('count', type=int, default=50, choices=[25, 50, 100]) self.reqparse.add_argument('authSystem', type=str, default=None, action='append') args = self.reqparse.parse_args() qry = db.User.order_by(User.username) if args['authSystem']: qry = qry.filter(User.auth_system.in_(args['authSystem'])) total = qry.count() qry = qry.limit(args['count']) if (args['page'] - 1) > 0: offset = (args['page'] - 1) * args['count'] qry = qry.offset(offset) users = qry.all() return self.make_response({ 'users': [x.to_json() for x in users], 'userCount': total, 'authSystems': list(current_app.available_auth_systems.keys()), 'activeAuthSystem': current_app.active_auth_system.name })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def options(self): """Returns metadata information required for User Creation"""
roles = db.Role.all() return self.make_response({ 'roles': roles, 'authSystems': list(current_app.available_auth_systems.keys()), 'activeAuthSystem': current_app.active_auth_system.name })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, user_id): """Returns a specific user"""
user = db.User.find_one(User.user_id == user_id) roles = db.Role.all() if not user: return self.make_response('Unable to find the user requested, might have been removed', HTTP.NOT_FOUND) return self.make_response({ 'user': user.to_json(), 'roles': roles }, HTTP.OK)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put(self, user_id): """Update a user object"""
self.reqparse.add_argument('roles', type=str, action='append') args = self.reqparse.parse_args() auditlog(event='user.create', actor=session['user'].username, data=args) user = db.User.find_one(User.user_id == user_id) roles = db.Role.find(Role.name.in_(args['roles'])) if not user: return self.make_response('No such user found: {}'.format(user_id), HTTP.NOT_FOUND) if user.username == 'admin' and user.auth_system == 'builtin': return self.make_response('You cannot modify the built-in admin user', HTTP.FORBIDDEN) user.roles = [] for role in roles: if role in args['roles']: user.roles.append(role) db.session.add(user) db.session.commit() return self.make_response({'message': 'User roles updated'}, HTTP.OK)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def return_resource_name(self, record, resource_type): """ Removes the trailing AWS domain from a DNS record to return the resource name e.g bucketname.s3.amazonaws.com will return bucketname Args: record (str): DNS record resource_type: AWS Resource type (i.e. S3 Bucket, Elastic Beanstalk, etc..) """
try: if resource_type == 's3': regex = re.compile('.*(\.(?:s3-|s3){1}(?:.*)?\.amazonaws\.com)') bucket_name = record.replace(regex.match(record).group(1), '') return bucket_name except Exception as e: self.log.error('Unable to parse DNS record {} for resource type {}/{}'.format(record, resource_type, e)) return record
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_json(self, is_admin=False): """Returns a dict representation of the object Args: is_admin (`bool`): If true, include information about the account that should be avaiable only to admins Returns: `dict` """
if is_admin: return { 'accountId': self.account_id, 'accountName': self.account_name, 'accountType': self.account_type, 'contacts': self.contacts, 'enabled': True if self.enabled == 1 else False, 'requiredRoles': self.required_roles, 'properties': {to_camelcase(prop.name): prop.value for prop in self.account.properties} } else: return { 'accountId': self.account_id, 'accountName': self.account_name, 'contacts': self.contacts }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(account): """Returns the class object identified by `account_id` Args: account (`int`, `str`): Unique ID of the account to load from database Returns: `Account` object if found, else None """
account = Account.get(account) if not account: return None acct_type = AccountType.get(account.account_type_id).account_type account_class = get_plugin_by_name(PLUGIN_NAMESPACES['accounts'], acct_type) return account_class(account)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all(cls, include_disabled=True): """Returns a list of all accounts of a given type Args: include_disabled (`bool`): Include disabled accounts. Default: `True` Returns: list of account objects """
if cls == BaseAccount: raise InquisitorError('get_all on BaseAccount is not supported') account_type_id = db.AccountType.find_one(account_type=cls.account_type).account_type_id qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name) if not include_disabled: qry = qry.filter(Account.enabled == 1) accounts = qry.find(Account.account_type_id == account_type_id) return {res.account_id: cls(res) for res in accounts}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search(*, include_disabled=True, account_ids=None, account_type_id=None, properties=None, return_query=False): """Search for accounts based on the provided filters Args: include_disabled (`bool`): Include disabled accounts (default: True) account_ids: (`list` of `int`): List of account IDs account_type_id (`int`): Account Type ID to limit results to properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list of strings, in which case a boolean OR search is performed on the values return_query (`bool`): Returns the query object prior to adding the limit and offset functions. Allows for sub-classes to amend the search feature with extra conditions. The calling function must handle pagination on its own Returns: `list` of `Account`, `sqlalchemy.orm.Query` """
qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name) if not include_disabled: qry = qry.filter(Account.enabled == 1) if account_ids: if type(account_ids) not in (list, tuple): account_ids = [account_ids] qry = qry.filter(Account.account_id.in_(account_ids)) if account_type_id: qry = qry.filter(Account.account_type_id == account_type_id) if properties: for prop_name, value in properties.items(): alias = aliased(AccountProperty) qry = qry.join(alias, Account.account_id == alias.account_id) if type(value) == list: where_clause = [] for item in value: where_clause.append(alias.value == item) qry = qry.filter( and_( alias.name == prop_name, or_(*where_clause) ).self_group() ) else: qry = qry.filter( and_( alias.name == prop_name, alias.value == value ).self_group() ) if return_query: return qry total = qry.count() return total, list(map(BaseAccount.get_typed_account, qry.all()))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute_scheduler(self): """Main entry point for the scheduler. This method will start two scheduled jobs, `schedule_jobs` which takes care of scheduling the actual SQS messaging and `process_status_queue` which will track the current status of the jobs as workers are executing them Returns: `None` """
try: # Schedule periodic scheduling of jobs self.scheduler.add_job( self.schedule_jobs, trigger='interval', name='schedule_jobs', minutes=15, start_date=datetime.now() + timedelta(seconds=1) ) self.scheduler.add_job( self.process_status_queue, trigger='interval', name='process_status_queue', seconds=30, start_date=datetime.now() + timedelta(seconds=5), max_instances=1 ) self.scheduler.start() except KeyboardInterrupt: self.scheduler.shutdown()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_current_jobs(self): """Return a list of the currently scheduled jobs in APScheduler Returns: `dict` of `str`: :obj:`apscheduler/job:Job` """
jobs = {} for job in self.scheduler.get_jobs(): if job.name not in ('schedule_jobs', 'process_status_queue'): jobs[job.name] = job return jobs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_worker_queue_message(self, *, batch_id, job_name, entry_point, worker_args, retry_count=0): """Send a message to the `worker_queue` for a worker to execute the requests job Args: batch_id (`str`): Unique ID of the batch the job belongs to job_name (`str`): Non-unique ID of the job. This is used to ensure that the same job is only scheduled a single time per batch entry_point (`dict`): A dictionary providing the entry point information for the worker to load the class worker_args (`dict`): A dictionary with the arguments required by the worker class (if any, can be an empty dictionary) retry_count (`int`): The number of times this one job has been attempted to be executed. If a job fails to execute after 3 retries it will be marked as failed Returns: `None` """
try: job_id = str(uuid4()) self.job_queue.send_message( MessageBody=json.dumps({ 'batch_id': batch_id, 'job_id': job_id, 'job_name': job_name, 'entry_point': entry_point, 'worker_args': worker_args, }), MessageDeduplicationId=job_id, MessageGroupId=batch_id, MessageAttributes={ 'RetryCount': { 'StringValue': str(retry_count), 'DataType': 'Number' } } ) if retry_count == 0: job = SchedulerJob() job.job_id = job_id job.batch_id = batch_id job.status = SchedulerStatus.PENDING job.data = worker_args db.session.add(job) db.session.commit() except: self.log.exception('Error when processing worker task')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_status_message(self, object_id, status): """Send a message to the `status_queue` to update a job's status. Returns `True` if the message was sent, else `False` Args: object_id (`str`): ID of the job that was executed status (:obj:`SchedulerStatus`): Status of the job Returns: `bool` """
try: body = json.dumps({ 'id': object_id, 'status': status }) self.status_queue.send_message( MessageBody=body, MessageGroupId='job_status', MessageDeduplicationId=get_hash((object_id, status)) ) return True except Exception as ex: print(ex) return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_status_queue(self): """Process all messages in the `status_queue` and check for any batches that needs to change status Returns: `None` """
self.log.debug('Start processing status queue') while True: messages = self.status_queue.receive_messages(MaxNumberOfMessages=10) if not messages: break for message in messages: data = json.loads(message.body) job = SchedulerJob.get(data['id']) try: if job and job.update_status(data['status']): db.session.commit() except SchedulerError as ex: if hasattr(ex, 'message') and ex.message == 'Attempting to update already completed job': pass message.delete() # Close any batch that is now complete open_batches = db.SchedulerBatch.find(SchedulerBatch.status < SchedulerStatus.COMPLETED) for batch in open_batches: open_jobs = list(filter(lambda x: x.status < SchedulerStatus.COMPLETED, batch.jobs)) if not open_jobs: open_batches.remove(batch) batch.update_status(SchedulerStatus.COMPLETED) self.log.debug('Closed completed batch {}'.format(batch.batch_id)) else: started_jobs = list(filter(lambda x: x.status > SchedulerStatus.PENDING, open_jobs)) if batch.status == SchedulerStatus.PENDING and len(started_jobs) > 0: batch.update_status(SchedulerStatus.STARTED) self.log.debug('Started batch manually {}'.format(batch.batch_id)) # Check for stale batches / jobs for batch in open_batches: if batch.started < datetime.now() - timedelta(hours=2): self.log.warning('Closing a stale scheduler batch: {}'.format(batch.batch_id)) for job in batch.jobs: if job.status < SchedulerStatus.COMPLETED: job.update_status(SchedulerStatus.ABORTED) batch.update_status(SchedulerStatus.ABORTED) db.session.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, *args, **kwargs): """Iterate through all AWS accounts and apply roles and policies from Github Args: *args: Optional list of arguments **kwargs: Optional list of keyword arguments Returns: `None` """
accounts = list(AWSAccount.get_all(include_disabled=False).values()) self.manage_policies(accounts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_policies_from_git(self): """Retrieve policies from the Git repo. Returns a dictionary containing all the roles and policies Returns: :obj:`dict` of `str`: `dict` """
fldr = mkdtemp() try: url = 'https://{token}:x-oauth-basic@{server}/{repo}'.format(**{ 'token': self.dbconfig.get('git_auth_token', self.ns), 'server': self.dbconfig.get('git_server', self.ns), 'repo': self.dbconfig.get('git_repo', self.ns) }) policies = {'GLOBAL': {}} if self.dbconfig.get('git_no_ssl_verify', self.ns, False): os.environ['GIT_SSL_NO_VERIFY'] = '1' repo = Repo.clone_from(url, fldr) for obj in repo.head.commit.tree: name, ext = os.path.splitext(obj.name) # Read the standard policies if ext == '.json': policies['GLOBAL'][name] = obj.data_stream.read() # Read any account role specific policies if name == 'roles' and obj.type == 'tree': for account in [x for x in obj.trees]: for role in [x for x in account.trees]: role_policies = {policy.name.replace('.json', ''): policy.data_stream.read() for policy in role.blobs if policy.name.endswith('.json')} if account.name in policies: if role.name in policies[account.name]: policies[account.name][role.name] += role_policies else: policies[account.name][role.name] = role_policies else: policies[account.name] = { role.name: role_policies } return policies finally: if os.path.exists(fldr) and os.path.isdir(fldr): shutil.rmtree(fldr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_policies_from_aws(client, scope='Local'): """Returns a list of all the policies currently applied to an AWS Account. Returns a list containing all the policies for the specified scope Args: client (:obj:`boto3.session.Session`): A boto3 Session object scope (`str`): The policy scope to use. Default: Local Returns: :obj:`list` of `dict` """
done = False marker = None policies = [] while not done: if marker: response = client.list_policies(Marker=marker, Scope=scope) else: response = client.list_policies(Scope=scope) policies += response['Policies'] if response['IsTruncated']: marker = response['Marker'] else: done = True return policies
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_roles(client): """Returns a list of all the roles for an account. Returns a list containing all the roles for the account. Args: client (:obj:`boto3.session.Session`): A boto3 Session object Returns: :obj:`list` of `dict` """
done = False marker = None roles = [] while not done: if marker: response = client.list_roles(Marker=marker) else: response = client.list_roles() roles += response['Roles'] if response['IsTruncated']: marker = response['Marker'] else: done = True return roles
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_policy(self, account, client, document, name, arn=None): """Create a new IAM policy. If the policy already exists, a new version will be added and if needed the oldest policy version not in use will be removed. Returns a dictionary containing the policy or version information Args: account (:obj:`Account`): Account to create the policy on client (:obj:`boto3.client`): A boto3 client object document (`str`): Policy document name (`str`): Name of the policy to create / update arn (`str`): Optional ARN for the policy to update Returns: `dict` """
if not arn and not name: raise ValueError('create_policy must be called with either arn or name in the argument list') if arn: response = client.list_policy_versions(PolicyArn=arn) # If we're at the max of the 5 possible versions, remove the oldest version that is not # the currently active policy if len(response['Versions']) >= 5: version = [x for x in sorted( response['Versions'], key=lambda k: k['CreateDate'] ) if not x['IsDefaultVersion']][0] self.log.info('Deleting oldest IAM Policy version {}/{}'.format(arn, version['VersionId'])) client.delete_policy_version(PolicyArn=arn, VersionId=version['VersionId']) auditlog( event='iam.check_roles.delete_policy_version', actor=self.ns, data={ 'account': account.account_name, 'policyName': name, 'policyArn': arn, 'versionId': version['VersionId'] } ) res = client.create_policy_version( PolicyArn=arn, PolicyDocument=document, SetAsDefault=True ) else: res = client.create_policy( PolicyName=name, PolicyDocument=document ) auditlog( event='iam.check_roles.create_policy', actor=self.ns, data={ 'account': account.account_name, 'policyName': name, 'policyArn': arn } ) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def notify(self, subsystem, recipient, subject, body_html, body_text): """You can send messages either to channels and private groups by using the following formats #channel-name @username-direct-message Args: subsystem (`str`): Name of the subsystem originating the notification recipient (`str`): Recipient subject (`str`): Subject / title of the notification, not used for this notifier body_html (`str)`: HTML formatted version of the message, not used for this notifier body_text (`str`): Text formatted version of the message Returns: `None` """
if not re.match(self.validation, recipient, re.I): raise ValueError('Invalid recipient provided') if recipient.startswith('#'): target_type = 'channel' elif recipient.find('@') != -1: target_type = 'user' else: self.log.error('Unknown contact type for Slack: {}'.format(recipient)) return try: self._send_message( target_type=target_type, target=recipient, message=body_text, title=subject ) except SlackError as ex: self.log.error('Failed sending message to {}: {}'.format(recipient, ex))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_message(contacts, message): """List of contacts the send the message to. You can send messages either to channels and private groups by using the following formats #channel-name @username-direct-message If the channel is the name of a private group / channel, you must first invite the bot to the channel to ensure it is allowed to send messages to the group. Returns true if the message was sent, else `False` Args: contacts (:obj:`list` of `str`,`str`): List of contacts message (str): Message to send Returns: `bool` """
if type(contacts) == str: contacts = [contacts] recipients = list(set(contacts)) send_notification( subsystem='UNKNOWN', recipients=[NotificationContact('slack', x) for x in recipients], subject=None, body_html=message, body_text=message )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _register_default_option(nsobj, opt): """ Register default ConfigOption value if it doesn't exist. If does exist, update the description if needed """
item = ConfigItem.get(nsobj.namespace_prefix, opt.name) if not item: logger.info('Adding {} ({}) = {} to {}'.format( opt.name, opt.type, opt.default_value, nsobj.namespace_prefix )) item = ConfigItem() item.namespace_prefix = nsobj.namespace_prefix item.key = opt.name item.value = opt.default_value item.type = opt.type item.description = opt.description nsobj.config_items.append(item) else: if item.description != opt.description: logger.info('Updating description of {} / {}'.format(item.namespace_prefix, item.key)) item.description = opt.description db.session.add(item)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _import_templates(force=False): """Import templates from disk into database Reads all templates from disk and adds them to the database. By default, any template that has been modified by the user will not be updated. This can however be changed by setting `force` to `True`, which causes all templates to be imported regardless of status Args: force (`bool`): Force overwrite any templates with local changes made. Default: `False` Returns: `None` """
tmplpath = os.path.join(resource_filename('cloud_inquisitor', 'data'), 'templates') disk_templates = {f: os.path.join(root, f) for root, directory, files in os.walk(tmplpath) for f in files} db_templates = {tmpl.template_name: tmpl for tmpl in db.Template.find()} for name, template_file in disk_templates.items(): with open(template_file, 'r') as f: body = f.read() disk_hash = get_hash(body) if name not in db_templates: template = Template() template.template_name = name template.template = body db.session.add(template) auditlog( event='template.import', actor='init', data={ 'template_name': name, 'template': body } ) logger.info('Imported template {}'.format(name)) else: template = db_templates[name] db_hash = get_hash(template.template) if db_hash != disk_hash: if force or not db_templates[name].is_modified: template.template = body db.session.add(template) auditlog( event='template.update', actor='init', data={ 'template_name': name, 'template_diff': diff(template.template, body) } ) logger.info('Updated template {}'.format(name)) else: logger.warning( 'Updated template available for {}. Will not import as it would' ' overwrite user edited content and force is not enabled'.format(name) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initialize(): """Initialize the application configuration, adding any missing default configuration or roles Returns: `None` """
global __initialized if __initialized: return # Setup all the default base settings try: for data in DEFAULT_CONFIG_OPTIONS: nsobj = _get_config_namespace(data['prefix'], data['name'], sort_order=data['sort_order']) for opt in data['options']: _register_default_option(nsobj, opt) db.session.add(nsobj) # Iterate over all of our plugins and setup their defaults for ns, info in CINQ_PLUGINS.items(): if info['name'] == 'commands': continue for entry_point in info['plugins']: _cls = entry_point.load() if hasattr(_cls, 'ns'): ns_name = '{}: {}'.format(info['name'].capitalize(), _cls.name) if not isinstance(_cls.options, abstractproperty): nsobj = _get_config_namespace(_cls.ns, ns_name) if _cls.options: for opt in _cls.options: _register_default_option(nsobj, opt) db.session.add(nsobj) # Create the default roles if they are missing and import any missing or updated templates, # if they havent been modified by the user _add_default_roles() _import_templates() db.session.commit() dbconfig.reload_data() __initialized = True except ProgrammingError as ex: if str(ex).find('1146') != -1: logging.getLogger('cloud_inquisitor').error( 'Missing required tables, please make sure you run `cloud-inquisitor db upgrade`' )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def before_request(): """Checks to ensure that the session is valid and validates the users CSRF token is present Returns: `None` """
if not request.path.startswith('/saml') and not request.path.startswith('/auth'): # Validate the session has the items we need if 'accounts' not in session: logger.debug('Missing \'accounts\' from session object, sending user to login page') return BaseView.make_unauth_response() # Require the CSRF token to be present if we are performing a change action (add, delete or modify objects) # but exclude the SAML endpoints from the CSRF check if request.method in ('POST', 'PUT', 'DELETE',): if session['csrf_token'] != request.headers.get('X-Csrf-Token'): logger.info('CSRF Token is missing or incorrect, sending user to login page') abort(403)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def after_request(response): """Modifies the response object prior to sending it to the client. Used to add CORS headers to the request Args: response (response): Flask response object Returns: `None` """
response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization') response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE') return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_auth_system(self, auth_system): """Register a given authentication system with the framework. Returns `True` if the `auth_system` is registered as the active auth system, else `False` Args: auth_system (:obj:`BaseAuthPlugin`): A subclass of the `BaseAuthPlugin` class to register Returns: `bool` """
auth_system_settings = dbconfig.get('auth_system') if auth_system.name not in auth_system_settings['available']: auth_system_settings['available'].append(auth_system.name) dbconfig.set('default', 'auth_system', DBCChoice(auth_system_settings)) if auth_system.name == auth_system_settings['enabled'][0]: self.active_auth_system = auth_system auth_system().bootstrap() logger.debug('Registered {} as the active auth system'.format(auth_system.name)) return True else: logger.debug('Not trying to load the {} auth system as it is disabled by config'.format(auth_system.name)) return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_menu_item(self, items): """Registers a views menu items into the metadata for the application. Skip if the item is already present Args: items (`list` of `MenuItem`): A list of `MenuItem`s Returns: `None` """
for itm in items: if itm.group in self.menu_items: # Only add the menu item if we don't already have it registered if itm not in self.menu_items[itm.group]['items']: self.menu_items[itm.group]['items'].append(itm) else: logger.warning('Tried registering menu item to unknown group {}'.format(itm.group))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __register_types(self): """Iterates all entry points for resource types and registers a `resource_type_id` to class mapping Returns: `None` """
try: for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.types']['plugins']: cls = entry_point.load() self.types[ResourceType.get(cls.resource_type).resource_type_id] = cls logger.debug('Registered resource type {}'.format(cls.__name__)) except SQLAlchemyError as ex: logger.warning('Failed loading type information: {}'.format(ex))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __register_notifiers(self): """Lists all notifiers to be able to provide metadata for the frontend Returns: `list` of `dict` """
notifiers = {} for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.notifiers']['plugins']: cls = entry_point.load() notifiers[cls.notifier_type] = cls.validation return notifiers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_views(self, app): """Iterates all entry points for views and auth systems and dynamically load and register the routes with Flask Args: app (`CINQFlask`): CINQFlask object to register views for Returns: `None` """
self.add_resource(LoginRedirectView, '/auth/login') self.add_resource(LogoutRedirectView, '/auth/logout') for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.auth']['plugins']: cls = entry_point.load() app.available_auth_systems[cls.name] = cls if app.register_auth_system(cls): for vcls in cls.views: self.add_resource(vcls, *vcls.URLS) logger.debug('Registered auth system view {} for paths: {}'.format( cls.__name__, ', '.join(vcls.URLS) )) if not app.active_auth_system: logger.error('No auth systems active, please enable an auth system and then start the system again') sys.exit(-1) for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.views']['plugins']: view = entry_point.load() self.add_resource(view, *view.URLS) app.register_menu_item(view.MENU_ITEMS) logger.debug('Registered view {} for paths: {}'.format(view.__name__, ', '.join(view.URLS)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_axfr_records(self, server, domains): """Return a `list` of `dict`s containing the zones and their records, obtained from the DNS server Returns: :obj:`list` of `dict` """
zones = [] for zoneName in domains: try: zone = { 'zone_id': get_resource_id('axfrz', zoneName), 'name': zoneName, 'source': 'AXFR', 'comment': None, 'tags': {}, 'records': [] } z = dns_zone.from_xfr(query.xfr(server, zoneName)) rdata_fields = ('name', 'ttl', 'rdata') for rr in [dict(zip(rdata_fields, x)) for x in z.iterate_rdatas()]: record_name = rr['name'].derelativize(z.origin).to_text() zone['records'].append( { 'id': get_resource_id('axfrr', record_name, ['{}={}'.format(k, str(v)) for k, v in rr.items()]), 'zone_id': zone['zone_id'], 'name': record_name, 'value': sorted([rr['rdata'].to_text()]), 'type': type_to_text(rr['rdata'].rdtype) }) if len(zone['records']) > 0: zones.append(zone) except Exception as ex: self.log.exception('Failed fetching DNS zone information for {}: {}'.format(zoneName, ex)) raise return zones
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_cloudflare_records(self, *, account): """Return a `list` of `dict`s containing the zones and their records, obtained from the CloudFlare API Returns: account (:obj:`CloudFlareAccount`): A CloudFlare Account object :obj:`list` of `dict` """
zones = [] for zobj in self.__cloudflare_list_zones(account=account): try: self.log.debug('Processing DNS zone CloudFlare/{}'.format(zobj['name'])) zone = { 'zone_id': get_resource_id('cfz', zobj['name']), 'name': zobj['name'], 'source': 'CloudFlare', 'comment': None, 'tags': {}, 'records': [] } for record in self.__cloudflare_list_zone_records(account=account, zoneID=zobj['id']): zone['records'].append({ 'id': get_resource_id('cfr', zobj['id'], ['{}={}'.format(k, v) for k, v in record.items()]), 'zone_id': zone['zone_id'], 'name': record['name'], 'value': record['value'], 'type': record['type'] }) if len(zone['records']) > 0: zones.append(zone) except CloudFlareError: self.log.exception('Failed getting records for CloudFlare zone {}'.format(zobj['name'])) return zones
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __cloudflare_request(self, *, account, path, args=None): """Helper function to interact with the CloudFlare API. Args: account (:obj:`CloudFlareAccount`): CloudFlare Account object path (`str`): URL endpoint to communicate with args (:obj:`dict` of `str`: `str`): A dictionary of arguments for the endpoint to consume Returns: `dict` """
if not args: args = {} if not self.cloudflare_initialized[account.account_id]: self.cloudflare_session[account.account_id] = requests.Session() self.cloudflare_session[account.account_id].headers.update({ 'X-Auth-Email': account.email, 'X-Auth-Key': account.api_key, 'Content-Type': 'application/json' }) self.cloudflare_initialized[account.account_id] = True if 'per_page' not in args: args['per_page'] = 100 response = self.cloudflare_session[account.account_id].get(account.endpoint + path, params=args) if response.status_code != 200: raise CloudFlareError('Request failed: {}'.format(response.text)) return response.json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __cloudflare_list_zones(self, *, account, **kwargs): """Helper function to list all zones registered in the CloudFlare system. Returns a `list` of the zones Args: account (:obj:`CloudFlareAccount`): A CloudFlare Account object **kwargs (`dict`): Extra arguments to pass to the API endpoint Returns: `list` of `dict` """
done = False zones = [] page = 1 while not done: kwargs['page'] = page response = self.__cloudflare_request(account=account, path='/zones', args=kwargs) info = response['result_info'] if 'total_pages' not in info or page == info['total_pages']: done = True else: page += 1 zones += response['result'] return zones
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __cloudflare_list_zone_records(self, *, account, zoneID, **kwargs): """Helper function to list all records on a CloudFlare DNS Zone. Returns a `dict` containing the records and their information. Args: account (:obj:`CloudFlareAccount`): A CloudFlare Account object zoneID (`int`): Internal CloudFlare ID of the DNS zone **kwargs (`dict`): Additional arguments to be consumed by the API endpoint Returns: :obj:`dict` of `str`: `dict` """
done = False records = {} page = 1 while not done: kwargs['page'] = page response = self.__cloudflare_request( account=account, path='/zones/{}/dns_records'.format(zoneID), args=kwargs ) info = response['result_info'] # Check if we have received all records, and if not iterate over the result set if 'total_pages' not in info or page >= info['total_pages']: done = True else: page += 1 for record in response['result']: if record['name'] in records: records[record['name']]['value'] = sorted(records[record['name']]['value'] + [record['content']]) else: records[record['name']] = { 'name': record['name'], 'value': sorted([record['content']]), 'type': record['type'] } return list(records.values())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, *args, **kwargs): """Entry point for the scheduler Args: *args: Optional arguments **kwargs: Optional keyword arguments Returns: None """
accounts = list(AWSAccount.get_all(include_disabled=False).values()) # S3 Bucket config s3_acl = get_template('cloudtrail_s3_bucket_policy.json') s3_bucket_name = self.dbconfig.get('bucket_name', self.ns) s3_bucket_region = self.dbconfig.get('bucket_region', self.ns, 'us-west-2') s3_bucket_account = AWSAccount.get(self.dbconfig.get('bucket_account', self.ns)) CloudTrail.create_s3_bucket(s3_bucket_name, s3_bucket_region, s3_bucket_account, s3_acl) self.validate_sqs_policy(accounts) for account in accounts: ct = CloudTrail(account, s3_bucket_name, s3_bucket_region, self.log) ct.run()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_sqs_policy(self, accounts): """Given a list of accounts, ensures that the SQS policy allows all the accounts to write to the queue Args: accounts (`list` of :obj:`Account`): List of accounts Returns: `None` """
sqs_queue_name = self.dbconfig.get('sqs_queue_name', self.ns) sqs_queue_region = self.dbconfig.get('sqs_queue_region', self.ns) sqs_account = AWSAccount.get(self.dbconfig.get('sqs_queue_account', self.ns)) session = get_aws_session(sqs_account) sqs = session.client('sqs', region_name=sqs_queue_region) sqs_queue_url = sqs.get_queue_url(QueueName=sqs_queue_name, QueueOwnerAWSAccountId=sqs_account.account_number) sqs_attribs = sqs.get_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], AttributeNames=['Policy']) policy = json.loads(sqs_attribs['Attributes']['Policy']) for account in accounts: arn = 'arn:aws:sns:*:{}:{}'.format(account.account_number, sqs_queue_name) if arn not in policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn']: self.log.warning('SQS policy is missing condition for ARN {}'.format(arn)) policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn'].append(arn) sqs.set_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], Attributes={'Policy': json.dumps(policy)})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """Configures and enables a CloudTrail trail and logging on a single AWS Account. Has the capability to create both single region and multi-region trails. Will automatically create SNS topics, subscribe to SQS queues and turn on logging for the account in question, as well as reverting any manual changes to the trails if applicable. Returns: None """
for aws_region in AWS_REGIONS: self.log.debug('Checking trails for {}/{}'.format( self.account.account_name, aws_region )) ct = self.session.client('cloudtrail', region_name=aws_region) trails = ct.describe_trails() if len(trails['trailList']) == 0: if aws_region == self.global_ct_region: self.create_cloudtrail(aws_region) else: for trail in trails['trailList']: if trail['Name'] in ('Default', self.trail_name): if not trail['IsMultiRegionTrail']: if trail['Name'] == self.trail_name and self.global_ct_region == aws_region: ct.update_trail( Name=trail['Name'], IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True ) auditlog( event='cloudtrail.update_trail', actor=self.ns, data={ 'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'changes': [ { 'setting': 'IsMultiRegionTrail', 'oldValue': False, 'newValue': True } ] } ) else: ct.delete_trail(name=trail['Name']) auditlog( event='cloudtrail.delete_trail', actor=self.ns, data={ 'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'reason': 'Incorrect region, name or not multi-regional' } ) else: if trail['HomeRegion'] == aws_region: if self.global_ct_region != aws_region or trail['Name'] == 'Default': ct.delete_trail(Name=trail['Name']) auditlog( event='cloudtrail.delete_trail', actor=self.ns, data={ 'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'reason': 'Incorrect name or region for multi-region trail' } ) trails = ct.describe_trails() for trail in trails['trailList']: if trail['Name'] == self.trail_name and trail['HomeRegion'] == aws_region: self.validate_trail_settings(ct, aws_region, trail)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_trail_settings(self, ct, aws_region, trail): """Validates logging, SNS and S3 settings for the global trail. Has the capability to: - start logging for the trail - create SNS topics & queues - configure or modify a S3 bucket for logging """
self.log.debug('Validating trail {}/{}/{}'.format( self.account.account_name, aws_region, trail['Name'] )) status = ct.get_trail_status(Name=trail['Name']) if not status['IsLogging']: self.log.warning('Logging is disabled for {}/{}/{}'.format( self.account.account_name, aws_region, trail['Name'] )) self.start_logging(aws_region, trail['Name']) if 'SnsTopicName' not in trail or not trail['SnsTopicName']: self.log.warning('SNS Notifications not enabled for {}/{}/{}'.format( self.account.account_name, aws_region, trail['Name'] )) self.create_sns_topic(aws_region) self.enable_sns_notification(aws_region, trail['Name']) if not self.validate_sns_topic_subscription(aws_region): self.log.warning( 'SNS Notification configured but not subscribed for {}/{}/{}'.format( self.account.account_name, aws_region, trail['Name'] ) ) self.subscribe_sns_topic_to_sqs(aws_region) if trail['S3BucketName'] != self.bucket_name: self.log.warning('CloudTrail is logging to an incorrect bucket for {}/{}/{}'.format( self.account.account_name, trail['S3BucketName'], trail['Name'] )) self.set_s3_bucket(aws_region, trail['Name'], self.bucket_name) if not trail.get('S3KeyPrefix') or trail['S3KeyPrefix'] != self.account.account_name: self.log.warning('Missing or incorrect S3KeyPrefix for {}/{}/{}'.format( self.account.account_name, aws_region, trail['Name'] )) self.set_s3_prefix(aws_region, trail['Name'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_sns_topic(self, region): """Creates an SNS topic if needed. Returns the ARN if the created SNS topic Args: region (str): Region name Returns: `str` """
sns = self.session.client('sns', region_name=region) self.log.info('Creating SNS topic for {}/{}'.format(self.account, region)) # Create the topic res = sns.create_topic(Name=self.topic_name) arn = res['TopicArn'] # Allow CloudTrail to publish messages with a policy update tmpl = get_template('cloudtrail_sns_policy.json') policy = tmpl.render(region=region, account_id=self.account.account_number, topic_name=self.topic_name) sns.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue=policy) auditlog( event='cloudtrail.create_sns_topic', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) return arn
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_sns_topic_subscription(self, region): """Validates SQS subscription to the SNS topic. Returns `True` if subscribed or `False` if not subscribed or topic is missing Args: region (str): Name of AWS Region Returns: `bool` """
sns = self.session.client('sns', region_name=region) arn = 'arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name) try: data = sns.list_subscriptions_by_topic(TopicArn=arn) except ClientError as ex: self.log.error('Failed to list subscriptions by topic in {} ({}): {}'.format( self.account.account_name, region, ex )) return False for sub in data['Subscriptions']: if sub['Endpoint'] == self.sqs_queue: if sub['SubscriptionArn'] == 'PendingConfirmation': self.log.warning('Subscription pending confirmation for {} in {}'.format( self.account.account_name, region )) return False return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def subscribe_sns_topic_to_sqs(self, region): """Subscribe SQS to the SNS topic. Returns the ARN of the SNS Topic subscribed Args: region (`str`): Name of the AWS region Returns: `str` """
sns = self.session.resource('sns', region_name=region) topic = sns.Topic('arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name)) topic.subscribe(Protocol='sqs', Endpoint=self.sqs_queue) auditlog( event='cloudtrail.subscribe_sns_topic_to_sqs', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) return topic.attributes['TopicArn']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_cloudtrail(self, region): """Creates a new CloudTrail Trail Args: region (str): Name of the AWS region Returns: `None` """
ct = self.session.client('cloudtrail', region_name=region) # Creating the sns topic for the trail prior to creation self.create_sns_topic(region) ct.create_trail( Name=self.trail_name, S3BucketName=self.bucket_name, S3KeyPrefix=self.account.account_name, IsMultiRegionTrail=True, IncludeGlobalServiceEvents=True, SnsTopicName=self.topic_name ) self.subscribe_sns_topic_to_sqs(region) auditlog( event='cloudtrail.create_cloudtrail', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) self.log.info('Created CloudTrail for {} in {} ({})'.format(self.account, region, self.bucket_name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enable_sns_notification(self, region, trailName): """Enable SNS notifications for a Trail Args: region (`str`): Name of the AWS region trailName (`str`): Name of the CloudTrail Trail Returns: `None` """
ct = self.session.client('cloudtrail', region_name=region) ct.update_trail(Name=trailName, SnsTopicName=self.topic_name) auditlog( event='cloudtrail.enable_sns_notification', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) self.log.info('Enabled SNS notifications for trail {} in {}/{}'.format( trailName, self.account.account_name, region ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_logging(self, region, name): """Turn on logging for a CloudTrail Trail Args: region (`str`): Name of the AWS region name (`str`): Name of the CloudTrail Trail Returns: `None` """
ct = self.session.client('cloudtrail', region_name=region) ct.start_logging(Name=name) auditlog( event='cloudtrail.start_logging', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) self.log.info('Enabled logging for {} ({})'.format(name, region))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_s3_prefix(self, region, name): """Sets the S3 prefix for a CloudTrail Trail Args: region (`str`): Name of the AWS region name (`str`): Name of the CloudTrail Trail Returns: `None` """
ct = self.session.client('cloudtrail', region_name=region) ct.update_trail(Name=name, S3KeyPrefix=self.account.account_name) auditlog( event='cloudtrail.set_s3_prefix', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) self.log.info('Updated S3KeyPrefix to {0} for {0}/{1}'.format( self.account.account_name, region ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_s3_bucket(self, region, name, bucketName): """Sets the S3 bucket location for logfile delivery Args: region (`str`): Name of the AWS region name (`str`): Name of the CloudTrail Trail bucketName (`str`): Name of the S3 bucket to deliver log files to Returns: `None` """
ct = self.session.client('cloudtrail', region_name=region) ct.update_trail(Name=name, S3BucketName=bucketName) auditlog( event='cloudtrail.set_s3_bucket', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) self.log.info('Updated S3BucketName to {} for {} in {}/{}'.format( bucketName, name, self.account.account_name, region ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_s3_bucket(cls, bucket_name, bucket_region, bucket_account, template): """Creates the S3 bucket on the account specified as the destination account for log files Args: bucket_name (`str`): Name of the S3 bucket bucket_region (`str`): AWS Region for the bucket bucket_account (:obj:`Account`): Account to create the S3 bucket in template (:obj:`Template`): Jinja2 Template object for the bucket policy Returns: `None` """
s3 = get_aws_session(bucket_account).client('s3', region_name=bucket_region) # Check to see if the bucket already exists and if we have access to it try: s3.head_bucket(Bucket=bucket_name) except ClientError as ex: status_code = ex.response['ResponseMetadata']['HTTPStatusCode'] # Bucket exists and we do not have access if status_code == 403: raise Exception('Bucket {} already exists but we do not have access to it and so cannot continue'.format( bucket_name )) # Bucket does not exist, lets create one elif status_code == 404: try: s3.create_bucket( Bucket=bucket_name, CreateBucketConfiguration={ 'LocationConstraint': bucket_region } ) auditlog( event='cloudtrail.create_s3_bucket', actor=cls.ns, data={ 'account': bucket_account.account_name, 'bucket_region': bucket_region, 'bucket_name': bucket_name } ) except Exception: raise Exception('An error occured while trying to create the bucket, cannot continue') try: bucket_acl = template.render( bucket_name=bucket_name, account_id=bucket_account.account_number ) s3.put_bucket_policy(Bucket=bucket_name, Policy=bucket_acl) except Exception as ex: raise Warning('An error occurred while setting bucket policy: {}'.format(ex))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """Main entry point for the auditor worker. Returns: `None` """
# Loop through all accounts that are marked as enabled accounts = list(AWSAccount.get_all(include_disabled=False).values()) for account in accounts: self.log.debug('Updating VPC Flow Logs for {}'.format(account)) self.session = get_aws_session(account) role_arn = self.confirm_iam_role(account) # region specific for aws_region in AWS_REGIONS: try: vpc_list = VPC.get_all(account, aws_region).values() need_vpc_flow_logs = [x for x in vpc_list if x.vpc_flow_logs_status != 'ACTIVE'] for vpc in need_vpc_flow_logs: if self.confirm_cw_log(account, aws_region, vpc.id): self.create_vpc_flow_logs(account, aws_region, vpc.id, role_arn) else: self.log.info('Failed to confirm log group for {}/{}'.format( account, aws_region )) except Exception: self.log.exception('Failed processing VPCs for {}/{}.'.format( account, aws_region )) db.session.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def confirm_iam_role(self, account): """Return the ARN of the IAM Role on the provided account as a string. Returns an `IAMRole` object from boto3 Args: account (:obj:`Account`): Account where to locate the role Returns: :obj:`IAMRole` """
try: iam = self.session.client('iam') rolearn = iam.get_role(RoleName=self.role_name)['Role']['Arn'] return rolearn except ClientError as e: if e.response['Error']['Code'] == 'NoSuchEntity': self.create_iam_role(account) else: raise except Exception as e: self.log.exception('Failed validating IAM role for VPC Flow Log Auditing for {}'.format(e))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_iam_role(self, account): """Create a new IAM role. Returns the ARN of the newly created role Args: account (:obj:`Account`): Account where to create the IAM role Returns: `str` """
try: iam = self.session.client('iam') trust = get_template('vpc_flow_logs_iam_role_trust.json').render() policy = get_template('vpc_flow_logs_role_policy.json').render() newrole = iam.create_role( Path='/', RoleName=self.role_name, AssumeRolePolicyDocument=trust )['Role']['Arn'] # Attach an inline policy to the role to avoid conflicts or hitting the Managed Policy Limit iam.put_role_policy( RoleName=self.role_name, PolicyName='VpcFlowPolicy', PolicyDocument=policy ) self.log.debug('Created VPC Flow Logs role & policy for {}'.format(account.account_name)) auditlog( event='vpc_flow_logs.create_iam_role', actor=self.ns, data={ 'account': account.account_name, 'roleName': self.role_name, 'trustRelationship': trust, 'inlinePolicy': policy } ) return newrole except Exception: self.log.exception('Failed creating the VPC Flow Logs role for {}.'.format(account))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def confirm_cw_log(self, account, region, vpcname): """Create a new CloudWatch log group based on the VPC Name if none exists. Returns `True` if succesful Args: account (:obj:`Account`): Account to create the log group in region (`str`): Region to create the log group in vpcname (`str`): Name of the VPC the log group is fow Returns: `bool` """
try: cw = self.session.client('logs', region) token = None log_groups = [] while True: result = cw.describe_log_groups() if not token else cw.describe_log_groups(nextToken=token) token = result.get('nextToken') log_groups.extend([x['logGroupName'] for x in result.get('logGroups', [])]) if not token: break if vpcname not in log_groups: cw.create_log_group(logGroupName=vpcname) cw_vpc = VPC.get(vpcname) cw_vpc.set_property('vpc_flow_logs_log_group', vpcname) self.log.info('Created log group {}/{}/{}'.format(account.account_name, region, vpcname)) auditlog( event='vpc_flow_logs.create_cw_log_group', actor=self.ns, data={ 'account': account.account_name, 'region': region, 'log_group_name': vpcname, 'vpc': vpcname } ) return True except Exception: self.log.exception('Failed creating log group for {}/{}/{}.'.format( account, region, vpcname ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_vpc_flow_logs(self, account, region, vpc_id, iam_role_arn): """Create a new VPC Flow log Args: account (:obj:`Account`): Account to create the flow in region (`str`): Region to create the flow in vpc_id (`str`): ID of the VPC to create the flow for iam_role_arn (`str`): ARN of the IAM role used to post logs to the log group Returns: `None` """
try: flow = self.session.client('ec2', region) flow.create_flow_logs( ResourceIds=[vpc_id], ResourceType='VPC', TrafficType='ALL', LogGroupName=vpc_id, DeliverLogsPermissionArn=iam_role_arn ) fvpc = VPC.get(vpc_id) fvpc.set_property('vpc_flow_logs_status', 'ACTIVE') self.log.info('Enabled VPC Logging {}/{}/{}'.format(account, region, vpc_id)) auditlog( event='vpc_flow_logs.create_vpc_flow', actor=self.ns, data={ 'account': account.account_name, 'region': region, 'vpcId': vpc_id, 'arn': iam_role_arn } ) except Exception: self.log.exception('Failed creating VPC Flow Logs for {}/{}/{}.'.format( account, region, vpc_id ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_contacts(self, issue): """Returns a list of contacts for an issue Args: issue (:obj:`RequiredTagsIssue`): Issue record Returns: `list` of `dict` """
# If the resources has been deleted, just return an empty list, to trigger issue deletion without notification if not issue.resource: return [] account_contacts = issue.resource.account.contacts try: resource_owners = issue.resource.get_owner_emails() # Double check get_owner_emails for it's return value if type(resource_owners) is list: for resource_owner in resource_owners: account_contacts.append({'type': 'email', 'value': resource_owner}) except AttributeError: pass return account_contacts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_actions(self, issues): """Returns a list of actions to executed Args: issues (`list` of :obj:`RequiredTagsIssue`): List of issues Returns: `list` of `dict` """
actions = [] try: for issue in issues: action_item = self.determine_action(issue) if action_item['action'] != AuditActions.IGNORE: action_item['owners'] = self.get_contacts(issue) actions.append(action_item) finally: db.session.rollback() return actions
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def determine_alert(self, action_schedule, issue_creation_time, last_alert): """Determine if we need to trigger an alert Args: action_schedule (`list`): A list contains the alert schedule issue_creation_time (`int`): Time we create the issue last_alert (`str`): Time we sent the last alert Returns: (`None` or `str`) None if no alert should be sent. Otherwise return the alert we should send """
issue_age = time.time() - issue_creation_time alert_schedule_lookup = {pytimeparse.parse(action_time): action_time for action_time in action_schedule} alert_schedule = sorted(alert_schedule_lookup.keys()) last_alert_time = pytimeparse.parse(last_alert) for alert_time in alert_schedule: if last_alert_time < alert_time <= issue_age and last_alert_time != alert_time: return alert_schedule_lookup[alert_time] else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def determine_action(self, issue): """Determine the action we should take for the issue Args: issue: Issue to determine action for Returns: `dict` """
resource_type = self.resource_types[issue.resource.resource_type_id] issue_alert_schedule = self.alert_schedule[resource_type] if \ resource_type in self.alert_schedule \ else self.alert_schedule['*'] action_item = { 'action': None, 'action_description': None, 'last_alert': issue.last_alert, 'issue': issue, 'resource': self.resource_classes[self.resource_types[issue.resource.resource_type_id]](issue.resource), 'owners': [], 'stop_after': issue_alert_schedule['stop'], 'remove_after': issue_alert_schedule['remove'], 'notes': issue.notes, 'missing_tags': issue.missing_tags } time_elapsed = time.time() - issue.created stop_schedule = pytimeparse.parse(issue_alert_schedule['stop']) remove_schedule = pytimeparse.parse(issue_alert_schedule['remove']) if self.collect_only: action_item['action'] = AuditActions.IGNORE elif remove_schedule and time_elapsed >= remove_schedule: action_item['action'] = AuditActions.REMOVE action_item['action_description'] = 'Resource removed' action_item['last_alert'] = remove_schedule if issue.update({'last_alert': remove_schedule}): db.session.add(issue.issue) elif stop_schedule and time_elapsed >= stop_schedule: action_item['action'] = AuditActions.STOP action_item['action_description'] = 'Resource stopped' action_item['last_alert'] = stop_schedule if issue.update({'last_alert': stop_schedule}): db.session.add(issue.issue) else: alert_selection = self.determine_alert( issue_alert_schedule['alert'], issue.get_property('created').value, issue.get_property('last_alert').value ) if alert_selection: action_item['action'] = AuditActions.ALERT action_item['action_description'] = '{} alert'.format(alert_selection) action_item['last_alert'] = alert_selection if issue.update({'last_alert': alert_selection}): db.session.add(issue.issue) else: action_item['action'] = AuditActions.IGNORE db.session.commit() return action_item
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_actions(self, actions): """Process the actions we want to take Args: actions (`list`): List of actions we want to take Returns: `list` of notifications """
notices = {} notification_contacts = {} for action in actions: resource = action['resource'] action_status = ActionStatus.SUCCEED try: if action['action'] == AuditActions.REMOVE: action_status = self.process_action( resource, AuditActions.REMOVE ) if action_status == ActionStatus.SUCCEED: db.session.delete(action['issue'].issue) elif action['action'] == AuditActions.STOP: action_status = self.process_action( resource, AuditActions.STOP ) if action_status == ActionStatus.SUCCEED: action['issue'].update({ 'missing_tags': action['missing_tags'], 'notes': action['notes'], 'last_alert': action['last_alert'], 'state': action['action'] }) elif action['action'] == AuditActions.FIXED: db.session.delete(action['issue'].issue) elif action['action'] == AuditActions.ALERT: action['issue'].update({ 'missing_tags': action['missing_tags'], 'notes': action['notes'], 'last_alert': action['last_alert'], 'state': action['action'] }) db.session.commit() if action_status == ActionStatus.SUCCEED: for owner in [ dict(t) for t in {tuple(d.items()) for d in (action['owners'] + self.permanent_emails)} ]: if owner['value'] not in notification_contacts: contact = NotificationContact(type=owner['type'], value=owner['value']) notification_contacts[owner['value']] = contact notices[contact] = { 'fixed': [], 'not_fixed': [] } else: contact = notification_contacts[owner['value']] if action['action'] == AuditActions.FIXED: notices[contact]['fixed'].append(action) else: notices[contact]['not_fixed'].append(action) except Exception as ex: self.log.exception('Unexpected error while processing resource {}/{}/{}/{}'.format( action['resource'].account.account_name, action['resource'].id, action['resource'], ex )) return notices
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_tag(self, key, value): """Check whether a tag value is valid Args: key: A tag key value: A tag value Returns: `(True or False)` A boolean indicating whether or not the value is valid """
if key == 'owner': return validate_email(value, self.partial_owner_match) elif key == self.gdpr_tag: return value in self.gdpr_tag_values else: return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_required_tags_compliance(self, resource): """Check whether a resource is compliance Args: resource: A single resource Returns: `(list, list)` A tuple contains missing tags (if there were any) and notes """
missing_tags = [] notes = [] resource_tags = {tag.key.lower(): tag.value for tag in resource.tags} # Do not audit this resource if it is not in the Account scope if resource.resource_type in self.alert_schedule: target_accounts = self.alert_schedule[resource.resource_type]['scope'] else: target_accounts = self.alert_schedule['*']['scope'] if not (resource.account.account_name in target_accounts or '*' in target_accounts): return missing_tags, notes # Do not audit this resource if the ignore tag was set if self.audit_ignore_tag.lower() in resource_tags: return missing_tags, notes required_tags = list(self.required_tags) # Add GDPR tag to required tags if the account must be GDPR compliant if self.gdpr_enabled and resource.account.account_name in self.gdpr_accounts: required_tags.append(self.gdpr_tag) ''' # Do not audit this resource if it is still in grace period if (datetime.utcnow() - resource.resource_creation_date).total_seconds() // 3600 < self.grace_period: return missing_tags, notes ''' # Check if the resource is missing required tags or has invalid tag values for key in [tag.lower() for tag in required_tags]: if key not in resource_tags: missing_tags.append(key) elif not self.validate_tag(key, resource_tags[key]): missing_tags.append(key) notes.append('{} tag is not valid'.format(key)) return missing_tags, notes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def notify(self, notices): """Send notifications to the recipients provided Args: notices (:obj:`dict` of `str`: `list`): A dictionary mapping notification messages to the recipient. Returns: `None` """
tmpl_html = get_template('required_tags_notice.html') tmpl_text = get_template('required_tags_notice.txt') for recipient, data in list(notices.items()): body_html = tmpl_html.render(data=data) body_text = tmpl_text.render(data=data) send_notification( subsystem=self.ns, recipients=[recipient], subject=self.email_subject, body_html=body_html, body_text=body_text )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_one(cls, enforcement_id): """ Return the properties of any enforcement action"""
qry = db.Enforcements.filter(enforcement_id == Enforcements.enforcement_id) return qry
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all(cls, account_id=None, location=None): """ Return all Enforcements args: `account_id` : Unique Account Identifier `location` : Region associated with the Resource returns: list of enforcement objects """
qry = db.Enforcements.filter() if account_id: qry = qry.filter(account_id == Enforcements.account_id) if location: qry = qry.join(Resource, Resource.location == location) return qry
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(cls, account_id, resource_id, action, timestamp, metrics): """ Set properties for an enforcement action"""
enforcement = Enforcements() enforcement.account_id = account_id enforcement.resource_id = resource_id enforcement.action = action enforcement.timestamp = timestamp enforcement.metrics = metrics try: db.session.add(enforcement) except SQLAlchemyError as e: logging.error('Could not add enforcement entry to database. {}'.format(e))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(cls, issue_type): """Returns the IssueType object for `issue_type`. If no existing object was found, a new type will be created in the database and returned Args: issue_type (str,int,IssueType): Issue type name, id or class Returns: :obj:`IssueType` """
if isinstance(issue_type, str): obj = getattr(db, cls.__name__).find_one(cls.issue_type == issue_type) elif isinstance(issue_type, int): obj = getattr(db, cls.__name__).find_one(cls.issue_type_id == issue_type) elif isinstance(issue_type, cls): return issue_type else: obj = None if not obj: obj = cls() obj.issue_type = issue_type db.session.add(obj) db.session.commit() db.session.refresh(obj) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(issue_id, issue_type_id): """Return issue by ID Args: issue_id (str): Unique Issue identifier issue_type_id (str): Type of issue to get Returns: :obj:`Issue`: Returns Issue object if found, else None """
return db.Issue.find_one( Issue.issue_id == issue_id, Issue.issue_type_id == issue_type_id )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, *args, **kwargs): """Main execution point for the auditor Args: *args: **kwargs: Returns: `None` """
self.log.debug('Starting EBSAuditor') data = self.update_data() notices = defaultdict(list) for account, issues in data.items(): for issue in issues: for recipient in account.contacts: notices[NotificationContact(type=recipient['type'], value=recipient['value'])].append(issue) self.notify(notices)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_unattached_volumes(self): """Build a list of all volumes missing tags and not ignored. Returns a `dict` keyed by the issue_id with the volume as the value Returns: :obj:`dict` of `str`: `EBSVolume` """
volumes = {} ignored_tags = dbconfig.get('ignore_tags', self.ns) for volume in EBSVolume.get_all().values(): issue_id = get_resource_id('evai', volume.id) if len(volume.attachments) == 0: if len(list(filter(set(ignored_tags).__contains__, [tag.key for tag in volume.tags]))): continue volumes[issue_id] = volume return volumes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_new_issues(self, volumes, existing_issues): """Takes a dict of existing volumes missing tags and a dict of existing issues, and finds any new or updated issues. Args: volumes (:obj:`dict` of `str`: `EBSVolume`): Dict of current volumes with issues existing_issues (:obj:`dict` of `str`: `EBSVolumeAuditIssue`): Current list of issues Returns: :obj:`dict` of `str`: `EBSVolumeAuditIssue` """
new_issues = {} for issue_id, volume in volumes.items(): state = EBSIssueState.DETECTED.value if issue_id in existing_issues: issue = existing_issues[issue_id] data = { 'state': state, 'notes': issue.notes, 'last_notice': issue.last_notice } if issue.update(data): new_issues.setdefault(issue.volume.account, []).append(issue) self.log.debug('Updated EBSVolumeAuditIssue {}'.format( issue_id )) else: properties = { 'volume_id': volume.id, 'account_id': volume.account_id, 'location': volume.location, 'state': state, 'last_change': datetime.now(), 'last_notice': None, 'notes': [] } issue = EBSVolumeAuditIssue.create(issue_id, properties=properties) new_issues.setdefault(issue.volume.account, []).append(issue) return new_issues
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_fixed_issues(self, volumes, existing_issues): """Provided a list of volumes and existing issues, returns a list of fixed issues to be deleted Args: volumes (`dict`): A dictionary keyed on the issue id, with the :obj:`Volume` object as the value existing_issues (`dict`): A dictionary keyed on the issue id, with the :obj:`EBSVolumeAuditIssue` object as the value Returns: :obj:`list` of :obj:`EBSVolumeAuditIssue` """
fixed_issues = [] for issue_id, issue in list(existing_issues.items()): if issue_id not in volumes: fixed_issues.append(issue) return fixed_issues
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def notify(self, notices): """Send notifications to the users via. the provided methods Args: notices (:obj:`dict` of `str`: `dict`): List of the notifications to send Returns: `None` """
issues_html = get_template('unattached_ebs_volume.html') issues_text = get_template('unattached_ebs_volume.txt') for recipient, issues in list(notices.items()): if issues: message_html = issues_html.render(issues=issues) message_text = issues_text.render(issues=issues) send_notification( subsystem=self.name, recipients=[recipient], subject=self.subject, body_html=message_html, body_text=message_text )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_local_aws_session(): """Returns a session for the local instance, not for a remote account Returns: :obj:`boto3:boto3.session.Session` """
if not all((app_config.aws_api.access_key, app_config.aws_api.secret_key)): return boto3.session.Session() else: # If we are not running on an EC2 instance, assume the instance role # first, then assume the remote role session_args = [app_config.aws_api.access_key, app_config.aws_api.secret_key] if app_config.aws_api.session_token: session_args.append(app_config.aws_api.session_token) return boto3.session.Session(*session_args)