docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Return the offset aligned to the nearest greater given alignment
Arguments:
- `offset`: An integer
- `alignment`: An integer
|
def align(offset, alignment):
if offset % alignment == 0:
return offset
return offset + (alignment - (offset % alignment))
| 331,184
|
Constructor.
Arguments:
- `value`: A string description.
|
def __init__(self, value):
super(BinaryParserException, self).__init__()
self._value = value
| 331,188
|
Constructor.
Arguments:
- `buf`: Byte string containing stuff to parse.
- `offset`: The offset into the buffer at which the block starts.
|
def __init__(self, buf, offset):
self._buf = buf
self._offset = offset
self._implicit_offset = 0
| 331,190
|
Declaratively add fields to this block.
This method will dynamically add corresponding
offset and unpacker methods to this block.
Arguments:
- `type`: A string. Should be one of the unpack_* types.
- `name`: A string.
- `offset`: A number.
- `length`: (Optional) A number. For (w)strings, length in chars.
|
def declare_field(self, type, name, offset=None, length=None):
if offset is None:
offset = self._implicit_offset
if length is None:
def no_length_handler():
f = getattr(self, "unpack_" + type)
return f(offset)
setattr(self, name, no_length_handler)
else:
def explicit_length_handler():
f = getattr(self, "unpack_" + type)
return f(offset, length)
setattr(self, name, explicit_length_handler)
setattr(self, "_off_" + name, offset)
if type == "byte":
self._implicit_offset = offset + 1
elif type == "int8":
self._implicit_offset = offset + 1
elif type == "word":
self._implicit_offset = offset + 2
elif type == "word_be":
self._implicit_offset = offset + 2
elif type == "int16":
self._implicit_offset = offset + 2
elif type == "dword":
self._implicit_offset = offset + 4
elif type == "dword_be":
self._implicit_offset = offset + 4
elif type == "int32":
self._implicit_offset = offset + 4
elif type == "qword":
self._implicit_offset = offset + 8
elif type == "int64":
self._implicit_offset = offset + 8
elif type == "float":
self._implicit_offset = offset + 4
elif type == "double":
self._implicit_offset = offset + 8
elif type == "dosdate":
self._implicit_offset = offset + 4
elif type == "filetime":
self._implicit_offset = offset + 8
elif type == "systemtime":
self._implicit_offset = offset + 8
elif type == "guid":
self._implicit_offset = offset + 16
elif type == "binary":
self._implicit_offset = offset + length
elif type == "string" and length is not None:
self._implicit_offset = offset + length
elif type == "wstring" and length is not None:
self._implicit_offset = offset + (2 * length)
elif "string" in type and length is None:
raise ParseException("Implicit offset not supported "
"for dynamic length strings")
else:
raise ParseException("Implicit offset not supported "
"for type: {}".format(type))
| 331,191
|
Returns a little-endian unsigned byte from the relative offset.
Arguments:
- `offset`: The relative offset from the start of the block.
Throws:
- `OverrunBufferException`
|
def unpack_byte(self, offset):
o = self._offset + offset
try:
return struct.unpack_from("<B", self._buf, o)[0]
except struct.error:
raise OverrunBufferException(o, len(self._buf))
| 331,192
|
Applies the little-endian WORD (2 bytes) to the relative offset.
Arguments:
- `offset`: The relative offset from the start of the block.
- `word`: The data to apply.
|
def pack_word(self, offset, word):
o = self._offset + offset
return struct.pack_into("<H", self._buf, o, word)
| 331,193
|
Returns raw binary data from the relative offset with the given length.
Arguments:
- `offset`: The relative offset from the start of the block.
- `length`: The length of the binary blob. If zero, the empty string
zero length is returned.
Throws:
- `OverrunBufferException`
|
def unpack_binary(self, offset, length=False):
if not length:
return bytes("".encode("ascii"))
o = self._offset + offset
try:
return bytes(struct.unpack_from("<{}s".format(length), self._buf, o)[0])
except struct.error:
raise OverrunBufferException(o, len(self._buf))
| 331,194
|
Returns a string from the relative offset with the given length,
where each character is a wchar (2 bytes)
Arguments:
- `offset`: The relative offset from the start of the block.
- `length`: The length of the string.
Throws:
- `UnicodeDecodeError`
|
def unpack_wstring(self, offset, length):
start = self._offset + offset
end = self._offset + offset + 2 * length
try:
return bytes(self._buf[start:end]).decode("utf16")
except AttributeError: # already a 'str' ?
return bytes(self._buf[start:end]).decode('utf16')
| 331,195
|
Returns a datetime from the DOSDATE and DOSTIME starting at
the relative offset.
Arguments:
- `offset`: The relative offset from the start of the block.
Throws:
- `OverrunBufferException`
|
def unpack_dosdate(self, offset):
try:
o = self._offset + offset
return dosdate(self._buf[o:o + 2], self._buf[o + 2:o + 4])
except struct.error:
raise OverrunBufferException(o, len(self._buf))
| 331,196
|
Returns a datetime from the QWORD Windows SYSTEMTIME timestamp
starting at the relative offset.
See http://msdn.microsoft.com/en-us/library/ms724950%28VS.85%29.aspx
Arguments:
- `offset`: The relative offset from the start of the block.
Throws:
- `OverrunBufferException`
|
def unpack_systemtime(self, offset):
o = self._offset + offset
try:
parts = struct.unpack_from("<HHHHHHHH", self._buf, o)
except struct.error:
raise OverrunBufferException(o, len(self._buf))
return datetime(parts[0], parts[1],
parts[3], # skip part 2 (day of week)
parts[4], parts[5],
parts[6], parts[7])
| 331,197
|
Returns a string containing a GUID starting at the relative offset.
Arguments:
- `offset`: The relative offset from the start of the block.
Throws:
- `OverrunBufferException`
|
def unpack_guid(self, offset):
o = self._offset + offset
try:
_bin = bytes(self._buf[o:o + 16])
except IndexError:
raise OverrunBufferException(o, len(self._buf))
# Yeah, this is ugly
h = [six.indexbytes(_bin, i) for i in range(len(_bin))]
return .format(
h[3], h[2], h[1], h[0],
h[5], h[4],
h[7], h[6],
h[8], h[9],
h[10], h[11], h[12], h[13], h[14], h[15])
| 331,198
|
escape the given string such that it can be placed in an XML attribute, like:
<foo bar='$value'>
Args:
s (str): the string to escape.
Returns:
str: the escaped string.
|
def escape_attr(s):
esc = xml.sax.saxutils.quoteattr(s)
esc = esc.encode('ascii', 'xmlcharrefreplace').decode('ascii')
esc = RESTRICTED_CHARS.sub('', esc)
return esc
| 331,211
|
escape the given string such that it can be placed in an XML value location, like:
<foo>
$value
</foo>
Args:
s (str): the string to escape.
Returns:
str: the escaped string.
|
def escape_value(s):
esc = xml.sax.saxutils.escape(s)
esc = esc.encode('ascii', 'xmlcharrefreplace').decode('ascii')
esc = RESTRICTED_CHARS.sub('', esc)
return esc
| 331,212
|
render the given root node using the given substitutions into XML.
Args:
root_node (e_nodes.RootNode): the node to render.
subs (list[str]): the substitutions that maybe included in the XML.
Returns:
str: the rendered XML document.
|
def render_root_node_with_subs(root_node, subs):
def rec(node, acc):
if isinstance(node, e_nodes.EndOfStreamNode):
pass # intended
elif isinstance(node, e_nodes.OpenStartElementNode):
acc.append("<")
acc.append(node.tag_name())
for child in node.children():
if isinstance(child, e_nodes.AttributeNode):
acc.append(" ")
acc.append(validate_name(child.attribute_name().string()))
acc.append("=\"")
# TODO: should use xml.sax.saxutils.quoteattr here
# but to do so, we'd need to ensure we're not double-quoting this value.
rec(child.attribute_value(), acc)
acc.append("\"")
acc.append(">")
for child in node.children():
rec(child, acc)
acc.append("</")
acc.append(validate_name(node.tag_name()))
acc.append(">\n")
elif isinstance(node, e_nodes.CloseStartElementNode):
pass # intended
elif isinstance(node, e_nodes.CloseEmptyElementNode):
pass # intended
elif isinstance(node, e_nodes.CloseElementNode):
pass # intended
elif isinstance(node, e_nodes.ValueNode):
acc.append(escape_value(node.children()[0].string()))
elif isinstance(node, e_nodes.AttributeNode):
pass # intended
elif isinstance(node, e_nodes.CDataSectionNode):
acc.append("<![CDATA[")
# TODO: is this correct escaping???
acc.append(escape_value(node.cdata()))
acc.append("]]>")
elif isinstance(node, e_nodes.EntityReferenceNode):
acc.append(escape_value(node.entity_reference()))
elif isinstance(node, e_nodes.ProcessingInstructionTargetNode):
acc.append(escape_value(node.processing_instruction_target()))
elif isinstance(node, e_nodes.ProcessingInstructionDataNode):
acc.append(escape_value(node.string()))
elif isinstance(node, e_nodes.TemplateInstanceNode):
raise UnexpectedElementException("TemplateInstanceNode")
elif isinstance(node, e_nodes.NormalSubstitutionNode):
sub = subs[node.index()]
if isinstance(sub, e_nodes.BXmlTypeNode):
sub = render_root_node(sub.root())
else:
sub = escape_value(sub.string())
acc.append(sub)
elif isinstance(node, e_nodes.ConditionalSubstitutionNode):
sub = subs[node.index()]
if isinstance(sub, e_nodes.BXmlTypeNode):
sub = render_root_node(sub.root())
else:
sub = escape_value(sub.string())
acc.append(sub)
elif isinstance(node, e_nodes.StreamStartNode):
pass # intended
acc = []
for c in root_node.template().children():
rec(c, acc)
return "".join(acc)
| 331,213
|
Generate XML representations of the records in an EVTX chunk.
Does not include the XML <?xml... header.
Records are ordered by chunk.records()
Args:
chunk (Evtx.Chunk): the chunk to render.
Yields:
tuple[str, Evtx.Record]: the rendered XML document and the raw record.
|
def evtx_chunk_xml_view(chunk):
for record in chunk.records():
record_str = evtx_record_xml_view(record)
yield record_str, record
| 331,215
|
Generate XML representations of the records in an EVTX file.
Does not include the XML <?xml... header.
Records are ordered by file_header.chunks(), and then by chunk.records()
Args:
chunk (Evtx.FileHeader): the file header to render.
Yields:
tuple[str, Evtx.Record]: the rendered XML document and the raw record.
|
def evtx_file_xml_view(file_header):
for chunk in file_header.chunks():
for record in chunk.records():
record_str = evtx_record_xml_view(record)
yield record_str, record
| 331,216
|
Update the cache of all DNS entries and perform checks
Args:
*args: Optional list of arguments
**kwargs: Optional list of keyword arguments
Returns:
None
|
def run(self, *args, **kwargs):
try:
zones = list(DNSZone.get_all().values())
buckets = {k.lower(): v for k, v in S3Bucket.get_all().items()}
dists = list(CloudFrontDist.get_all().values())
ec2_public_ips = [x.public_ip for x in EC2Instance.get_all().values() if x.public_ip]
beanstalks = {x.cname.lower(): x for x in BeanStalk.get_all().values()}
existing_issues = DomainHijackIssue.get_all()
issues = []
# List of different types of domain audits
auditors = [
ElasticBeanstalkAudit(beanstalks),
S3Audit(buckets),
S3WithoutEndpointAudit(buckets),
EC2PublicDns(ec2_public_ips),
]
# region Build list of active issues
for zone in zones:
for record in zone.records:
for auditor in auditors:
if auditor.match(record):
issues.extend(auditor.audit(record, zone))
for dist in dists:
for org in dist.origins:
if org['type'] == 's3':
bucket = self.return_resource_name(org['source'], 's3')
if bucket not in buckets:
key = '{} ({})'.format(bucket, dist.type)
issues.append({
'key': key,
'value': 'S3Bucket {} doesnt exist on any known account. Referenced by {} on {}'.format(
bucket,
dist.domain_name,
dist.account,
)
})
# endregion
# region Process new, old, fixed issue lists
old_issues = {}
new_issues = {}
fixed_issues = []
for data in issues:
issue_id = get_resource_id('dhi', ['{}={}'.format(k, v) for k, v in data.items()])
if issue_id in existing_issues:
issue = existing_issues[issue_id]
if issue.update({'state': 'EXISTING', 'end': None}):
db.session.add(issue.issue)
old_issues[issue_id] = issue
else:
properties = {
'issue_hash': issue_id,
'state': 'NEW',
'start': datetime.now(),
'end': None,
'source': data['key'],
'description': data['value']
}
new_issues[issue_id] = DomainHijackIssue.create(issue_id, properties=properties)
db.session.commit()
for issue in list(existing_issues.values()):
if issue.id not in new_issues and issue.id not in old_issues:
fixed_issues.append(issue.to_json())
db.session.delete(issue.issue)
# endregion
# Only alert if its been more than a day since the last alert
alert_cutoff = datetime.now() - timedelta(hours=self.alert_frequency)
old_alerts = []
for issue_id, issue in old_issues.items():
if issue.last_alert and issue.last_alert < alert_cutoff:
if issue.update({'last_alert': datetime.now()}):
db.session.add(issue.issue)
old_alerts.append(issue)
db.session.commit()
self.notify(
[x.to_json() for x in new_issues.values()],
[x.to_json() for x in old_alerts],
fixed_issues
)
finally:
db.session.rollback()
| 331,256
|
Send notifications (email, slack, etc.) for any issues that are currently open or has just been closed
Args:
new_issues (`list` of :obj:`DomainHijackIssue`): List of newly discovered issues
existing_issues (`list` of :obj:`DomainHijackIssue`): List of existing open issues
fixed_issues (`list` of `dict`): List of fixed issues
Returns:
None
|
def notify(self, new_issues, existing_issues, fixed_issues):
if len(new_issues + existing_issues + fixed_issues) > 0:
maxlen = max(len(x['properties']['source']) for x in (new_issues + existing_issues + fixed_issues)) + 2
text_tmpl = get_template('domain_hijacking.txt')
html_tmpl = get_template('domain_hijacking.html')
issues_text = text_tmpl.render(
new_issues=new_issues,
existing_issues=existing_issues,
fixed_issues=fixed_issues,
maxlen=maxlen
)
issues_html = html_tmpl.render(
new_issues=new_issues,
existing_issues=existing_issues,
fixed_issues=fixed_issues,
maxlen=maxlen
)
try:
send_notification(
subsystem=self.name,
recipients=[NotificationContact('email', addr) for addr in self.recipients],
subject=self.subject,
body_html=issues_html,
body_text=issues_text
)
except Exception as ex:
self.log.exception('Failed sending notification email: {}'.format(ex))
| 331,257
|
Removes the trailing AWS domain from a DNS record
to return the resource name
e.g bucketname.s3.amazonaws.com will return bucketname
Args:
record (str): DNS record
resource_type: AWS Resource type (i.e. S3 Bucket, Elastic Beanstalk, etc..)
|
def return_resource_name(self, record, resource_type):
try:
if resource_type == 's3':
regex = re.compile('.*(\.(?:s3-|s3){1}(?:.*)?\.amazonaws\.com)')
bucket_name = record.replace(regex.match(record).group(1), '')
return bucket_name
except Exception as e:
self.log.error('Unable to parse DNS record {} for resource type {}/{}'.format(record, resource_type, e))
return record
| 331,258
|
Returns a dict representation of the object
Args:
is_admin (`bool`): If true, include information about the account that should be avaiable only to admins
Returns:
`dict`
|
def to_json(self, is_admin=False):
if is_admin:
return {
'accountId': self.account_id,
'accountName': self.account_name,
'accountType': self.account_type,
'contacts': self.contacts,
'enabled': True if self.enabled == 1 else False,
'requiredRoles': self.required_roles,
'properties': {to_camelcase(prop.name): prop.value for prop in self.account.properties}
}
else:
return {
'accountId': self.account_id,
'accountName': self.account_name,
'contacts': self.contacts
}
| 331,272
|
Updates the object information based on live data, if there were any changes made. Any changes will be
automatically applied to the object, but will not be automatically persisted. You must manually call
`db.session.add(object)` on the object.
Args:
**kwargs (:obj:): AWS API Resource object fetched from AWS API
Returns:
`bool`
|
def update(self, **kwargs):
updated = False
for prop in self.class_properties:
key = prop['key']
kwarg_key = to_camelcase(key)
if kwarg_key in kwargs:
if prop['required'] and not kwargs[kwarg_key]:
raise InquisitorError('Missing required property {}'.format(prop['name']))
updated |= self.set_property(key, kwargs[kwarg_key])
return updated
| 331,273
|
Returns the class object identified by `account_id`
Args:
account (`int`, `str`): Unique ID of the account to load from database
Returns:
`Account` object if found, else None
|
def get(account):
account = Account.get(account)
if not account:
return None
acct_type = AccountType.get(account.account_type_id).account_type
account_class = get_plugin_by_name(PLUGIN_NAMESPACES['accounts'], acct_type)
return account_class(account)
| 331,274
|
Returns a list of all accounts of a given type
Args:
include_disabled (`bool`): Include disabled accounts. Default: `True`
Returns:
list of account objects
|
def get_all(cls, include_disabled=True):
if cls == BaseAccount:
raise InquisitorError('get_all on BaseAccount is not supported')
account_type_id = db.AccountType.find_one(account_type=cls.account_type).account_type_id
qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name)
if not include_disabled:
qry = qry.filter(Account.enabled == 1)
accounts = qry.find(Account.account_type_id == account_type_id)
return {res.account_id: cls(res) for res in accounts}
| 331,276
|
Send a message to the `status_queue` to update a job's status.
Returns `True` if the message was sent, else `False`
Args:
object_id (`str`): ID of the job that was executed
status (:obj:`SchedulerStatus`): Status of the job
Returns:
`bool`
|
def send_status_message(self, object_id, status):
try:
body = json.dumps({
'id': object_id,
'status': status
})
self.status_queue.send_message(
MessageBody=body,
MessageGroupId='job_status',
MessageDeduplicationId=get_hash((object_id, status))
)
return True
except Exception as ex:
print(ex)
return False
| 331,285
|
Iterate through all AWS accounts and apply roles and policies from Github
Args:
*args: Optional list of arguments
**kwargs: Optional list of keyword arguments
Returns:
`None`
|
def run(self, *args, **kwargs):
accounts = list(AWSAccount.get_all(include_disabled=False).values())
self.manage_policies(accounts)
| 331,287
|
Returns a list of all the policies currently applied to an AWS Account. Returns a list containing all the
policies for the specified scope
Args:
client (:obj:`boto3.session.Session`): A boto3 Session object
scope (`str`): The policy scope to use. Default: Local
Returns:
:obj:`list` of `dict`
|
def get_policies_from_aws(client, scope='Local'):
done = False
marker = None
policies = []
while not done:
if marker:
response = client.list_policies(Marker=marker, Scope=scope)
else:
response = client.list_policies(Scope=scope)
policies += response['Policies']
if response['IsTruncated']:
marker = response['Marker']
else:
done = True
return policies
| 331,292
|
Returns a list of all the roles for an account. Returns a list containing all the roles for the account.
Args:
client (:obj:`boto3.session.Session`): A boto3 Session object
Returns:
:obj:`list` of `dict`
|
def get_roles(client):
done = False
marker = None
roles = []
while not done:
if marker:
response = client.list_roles(Marker=marker)
else:
response = client.list_roles()
roles += response['Roles']
if response['IsTruncated']:
marker = response['Marker']
else:
done = True
return roles
| 331,293
|
Import templates from disk into database
Reads all templates from disk and adds them to the database. By default, any template that has been modified by
the user will not be updated. This can however be changed by setting `force` to `True`, which causes all templates
to be imported regardless of status
Args:
force (`bool`): Force overwrite any templates with local changes made. Default: `False`
Returns:
`None`
|
def _import_templates(force=False):
tmplpath = os.path.join(resource_filename('cloud_inquisitor', 'data'), 'templates')
disk_templates = {f: os.path.join(root, f) for root, directory, files in os.walk(tmplpath) for f in files}
db_templates = {tmpl.template_name: tmpl for tmpl in db.Template.find()}
for name, template_file in disk_templates.items():
with open(template_file, 'r') as f:
body = f.read()
disk_hash = get_hash(body)
if name not in db_templates:
template = Template()
template.template_name = name
template.template = body
db.session.add(template)
auditlog(
event='template.import',
actor='init',
data={
'template_name': name,
'template': body
}
)
logger.info('Imported template {}'.format(name))
else:
template = db_templates[name]
db_hash = get_hash(template.template)
if db_hash != disk_hash:
if force or not db_templates[name].is_modified:
template.template = body
db.session.add(template)
auditlog(
event='template.update',
actor='init',
data={
'template_name': name,
'template_diff': diff(template.template, body)
}
)
logger.info('Updated template {}'.format(name))
else:
logger.warning(
'Updated template available for {}. Will not import as it would'
' overwrite user edited content and force is not enabled'.format(name)
)
| 331,307
|
Modifies the response object prior to sending it to the client. Used to add CORS headers to the request
Args:
response (response): Flask response object
Returns:
`None`
|
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
| 331,310
|
Register a given authentication system with the framework. Returns `True` if the `auth_system` is registered
as the active auth system, else `False`
Args:
auth_system (:obj:`BaseAuthPlugin`): A subclass of the `BaseAuthPlugin` class to register
Returns:
`bool`
|
def register_auth_system(self, auth_system):
auth_system_settings = dbconfig.get('auth_system')
if auth_system.name not in auth_system_settings['available']:
auth_system_settings['available'].append(auth_system.name)
dbconfig.set('default', 'auth_system', DBCChoice(auth_system_settings))
if auth_system.name == auth_system_settings['enabled'][0]:
self.active_auth_system = auth_system
auth_system().bootstrap()
logger.debug('Registered {} as the active auth system'.format(auth_system.name))
return True
else:
logger.debug('Not trying to load the {} auth system as it is disabled by config'.format(auth_system.name))
return False
| 331,314
|
Registers a views menu items into the metadata for the application. Skip if the item is already present
Args:
items (`list` of `MenuItem`): A list of `MenuItem`s
Returns:
`None`
|
def register_menu_item(self, items):
for itm in items:
if itm.group in self.menu_items:
# Only add the menu item if we don't already have it registered
if itm not in self.menu_items[itm.group]['items']:
self.menu_items[itm.group]['items'].append(itm)
else:
logger.warning('Tried registering menu item to unknown group {}'.format(itm.group))
| 331,315
|
Iterates all entry points for views and auth systems and dynamically load and register the routes with Flask
Args:
app (`CINQFlask`): CINQFlask object to register views for
Returns:
`None`
|
def register_views(self, app):
self.add_resource(LoginRedirectView, '/auth/login')
self.add_resource(LogoutRedirectView, '/auth/logout')
for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.auth']['plugins']:
cls = entry_point.load()
app.available_auth_systems[cls.name] = cls
if app.register_auth_system(cls):
for vcls in cls.views:
self.add_resource(vcls, *vcls.URLS)
logger.debug('Registered auth system view {} for paths: {}'.format(
cls.__name__,
', '.join(vcls.URLS)
))
if not app.active_auth_system:
logger.error('No auth systems active, please enable an auth system and then start the system again')
sys.exit(-1)
for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.views']['plugins']:
view = entry_point.load()
self.add_resource(view, *view.URLS)
app.register_menu_item(view.MENU_ITEMS)
logger.debug('Registered view {} for paths: {}'.format(view.__name__, ', '.join(view.URLS)))
| 331,318
|
Helper function to interact with the CloudFlare API.
Args:
account (:obj:`CloudFlareAccount`): CloudFlare Account object
path (`str`): URL endpoint to communicate with
args (:obj:`dict` of `str`: `str`): A dictionary of arguments for the endpoint to consume
Returns:
`dict`
|
def __cloudflare_request(self, *, account, path, args=None):
if not args:
args = {}
if not self.cloudflare_initialized[account.account_id]:
self.cloudflare_session[account.account_id] = requests.Session()
self.cloudflare_session[account.account_id].headers.update({
'X-Auth-Email': account.email,
'X-Auth-Key': account.api_key,
'Content-Type': 'application/json'
})
self.cloudflare_initialized[account.account_id] = True
if 'per_page' not in args:
args['per_page'] = 100
response = self.cloudflare_session[account.account_id].get(account.endpoint + path, params=args)
if response.status_code != 200:
raise CloudFlareError('Request failed: {}'.format(response.text))
return response.json()
| 331,327
|
Helper function to list all zones registered in the CloudFlare system. Returns a `list` of the zones
Args:
account (:obj:`CloudFlareAccount`): A CloudFlare Account object
**kwargs (`dict`): Extra arguments to pass to the API endpoint
Returns:
`list` of `dict`
|
def __cloudflare_list_zones(self, *, account, **kwargs):
done = False
zones = []
page = 1
while not done:
kwargs['page'] = page
response = self.__cloudflare_request(account=account, path='/zones', args=kwargs)
info = response['result_info']
if 'total_pages' not in info or page == info['total_pages']:
done = True
else:
page += 1
zones += response['result']
return zones
| 331,328
|
Helper function to list all records on a CloudFlare DNS Zone. Returns a `dict` containing the records and
their information.
Args:
account (:obj:`CloudFlareAccount`): A CloudFlare Account object
zoneID (`int`): Internal CloudFlare ID of the DNS zone
**kwargs (`dict`): Additional arguments to be consumed by the API endpoint
Returns:
:obj:`dict` of `str`: `dict`
|
def __cloudflare_list_zone_records(self, *, account, zoneID, **kwargs):
done = False
records = {}
page = 1
while not done:
kwargs['page'] = page
response = self.__cloudflare_request(
account=account,
path='/zones/{}/dns_records'.format(zoneID),
args=kwargs
)
info = response['result_info']
# Check if we have received all records, and if not iterate over the result set
if 'total_pages' not in info or page >= info['total_pages']:
done = True
else:
page += 1
for record in response['result']:
if record['name'] in records:
records[record['name']]['value'] = sorted(records[record['name']]['value'] + [record['content']])
else:
records[record['name']] = {
'name': record['name'],
'value': sorted([record['content']]),
'type': record['type']
}
return list(records.values())
| 331,329
|
Entry point for the scheduler
Args:
*args: Optional arguments
**kwargs: Optional keyword arguments
Returns:
None
|
def run(self, *args, **kwargs):
accounts = list(AWSAccount.get_all(include_disabled=False).values())
# S3 Bucket config
s3_acl = get_template('cloudtrail_s3_bucket_policy.json')
s3_bucket_name = self.dbconfig.get('bucket_name', self.ns)
s3_bucket_region = self.dbconfig.get('bucket_region', self.ns, 'us-west-2')
s3_bucket_account = AWSAccount.get(self.dbconfig.get('bucket_account', self.ns))
CloudTrail.create_s3_bucket(s3_bucket_name, s3_bucket_region, s3_bucket_account, s3_acl)
self.validate_sqs_policy(accounts)
for account in accounts:
ct = CloudTrail(account, s3_bucket_name, s3_bucket_region, self.log)
ct.run()
| 331,330
|
Given a list of accounts, ensures that the SQS policy allows all the accounts to write to the queue
Args:
accounts (`list` of :obj:`Account`): List of accounts
Returns:
`None`
|
def validate_sqs_policy(self, accounts):
sqs_queue_name = self.dbconfig.get('sqs_queue_name', self.ns)
sqs_queue_region = self.dbconfig.get('sqs_queue_region', self.ns)
sqs_account = AWSAccount.get(self.dbconfig.get('sqs_queue_account', self.ns))
session = get_aws_session(sqs_account)
sqs = session.client('sqs', region_name=sqs_queue_region)
sqs_queue_url = sqs.get_queue_url(QueueName=sqs_queue_name, QueueOwnerAWSAccountId=sqs_account.account_number)
sqs_attribs = sqs.get_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], AttributeNames=['Policy'])
policy = json.loads(sqs_attribs['Attributes']['Policy'])
for account in accounts:
arn = 'arn:aws:sns:*:{}:{}'.format(account.account_number, sqs_queue_name)
if arn not in policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn']:
self.log.warning('SQS policy is missing condition for ARN {}'.format(arn))
policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn'].append(arn)
sqs.set_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], Attributes={'Policy': json.dumps(policy)})
| 331,331
|
Creates an SNS topic if needed. Returns the ARN if the created SNS topic
Args:
region (str): Region name
Returns:
`str`
|
def create_sns_topic(self, region):
sns = self.session.client('sns', region_name=region)
self.log.info('Creating SNS topic for {}/{}'.format(self.account, region))
# Create the topic
res = sns.create_topic(Name=self.topic_name)
arn = res['TopicArn']
# Allow CloudTrail to publish messages with a policy update
tmpl = get_template('cloudtrail_sns_policy.json')
policy = tmpl.render(region=region, account_id=self.account.account_number, topic_name=self.topic_name)
sns.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue=policy)
auditlog(
event='cloudtrail.create_sns_topic',
actor=self.ns,
data={
'account': self.account.account_name,
'region': region
}
)
return arn
| 331,335
|
Validates SQS subscription to the SNS topic. Returns `True` if subscribed or `False` if not subscribed
or topic is missing
Args:
region (str): Name of AWS Region
Returns:
`bool`
|
def validate_sns_topic_subscription(self, region):
sns = self.session.client('sns', region_name=region)
arn = 'arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name)
try:
data = sns.list_subscriptions_by_topic(TopicArn=arn)
except ClientError as ex:
self.log.error('Failed to list subscriptions by topic in {} ({}): {}'.format(
self.account.account_name,
region,
ex
))
return False
for sub in data['Subscriptions']:
if sub['Endpoint'] == self.sqs_queue:
if sub['SubscriptionArn'] == 'PendingConfirmation':
self.log.warning('Subscription pending confirmation for {} in {}'.format(
self.account.account_name,
region
))
return False
return True
return False
| 331,336
|
Subscribe SQS to the SNS topic. Returns the ARN of the SNS Topic subscribed
Args:
region (`str`): Name of the AWS region
Returns:
`str`
|
def subscribe_sns_topic_to_sqs(self, region):
sns = self.session.resource('sns', region_name=region)
topic = sns.Topic('arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name))
topic.subscribe(Protocol='sqs', Endpoint=self.sqs_queue)
auditlog(
event='cloudtrail.subscribe_sns_topic_to_sqs',
actor=self.ns,
data={
'account': self.account.account_name,
'region': region
}
)
return topic.attributes['TopicArn']
| 331,337
|
Creates a new CloudTrail Trail
Args:
region (str): Name of the AWS region
Returns:
`None`
|
def create_cloudtrail(self, region):
ct = self.session.client('cloudtrail', region_name=region)
# Creating the sns topic for the trail prior to creation
self.create_sns_topic(region)
ct.create_trail(
Name=self.trail_name,
S3BucketName=self.bucket_name,
S3KeyPrefix=self.account.account_name,
IsMultiRegionTrail=True,
IncludeGlobalServiceEvents=True,
SnsTopicName=self.topic_name
)
self.subscribe_sns_topic_to_sqs(region)
auditlog(
event='cloudtrail.create_cloudtrail',
actor=self.ns,
data={
'account': self.account.account_name,
'region': region
}
)
self.log.info('Created CloudTrail for {} in {} ({})'.format(self.account, region, self.bucket_name))
| 331,338
|
Enable SNS notifications for a Trail
Args:
region (`str`): Name of the AWS region
trailName (`str`): Name of the CloudTrail Trail
Returns:
`None`
|
def enable_sns_notification(self, region, trailName):
ct = self.session.client('cloudtrail', region_name=region)
ct.update_trail(Name=trailName, SnsTopicName=self.topic_name)
auditlog(
event='cloudtrail.enable_sns_notification',
actor=self.ns,
data={
'account': self.account.account_name,
'region': region
}
)
self.log.info('Enabled SNS notifications for trail {} in {}/{}'.format(
trailName,
self.account.account_name,
region
))
| 331,339
|
Turn on logging for a CloudTrail Trail
Args:
region (`str`): Name of the AWS region
name (`str`): Name of the CloudTrail Trail
Returns:
`None`
|
def start_logging(self, region, name):
ct = self.session.client('cloudtrail', region_name=region)
ct.start_logging(Name=name)
auditlog(
event='cloudtrail.start_logging',
actor=self.ns,
data={
'account': self.account.account_name,
'region': region
}
)
self.log.info('Enabled logging for {} ({})'.format(name, region))
| 331,340
|
Sets the S3 prefix for a CloudTrail Trail
Args:
region (`str`): Name of the AWS region
name (`str`): Name of the CloudTrail Trail
Returns:
`None`
|
def set_s3_prefix(self, region, name):
ct = self.session.client('cloudtrail', region_name=region)
ct.update_trail(Name=name, S3KeyPrefix=self.account.account_name)
auditlog(
event='cloudtrail.set_s3_prefix',
actor=self.ns,
data={
'account': self.account.account_name,
'region': region
}
)
self.log.info('Updated S3KeyPrefix to {0} for {0}/{1}'.format(
self.account.account_name,
region
))
| 331,341
|
Sets the S3 bucket location for logfile delivery
Args:
region (`str`): Name of the AWS region
name (`str`): Name of the CloudTrail Trail
bucketName (`str`): Name of the S3 bucket to deliver log files to
Returns:
`None`
|
def set_s3_bucket(self, region, name, bucketName):
ct = self.session.client('cloudtrail', region_name=region)
ct.update_trail(Name=name, S3BucketName=bucketName)
auditlog(
event='cloudtrail.set_s3_bucket',
actor=self.ns,
data={
'account': self.account.account_name,
'region': region
}
)
self.log.info('Updated S3BucketName to {} for {} in {}/{}'.format(
bucketName,
name,
self.account.account_name,
region
))
| 331,342
|
Creates the S3 bucket on the account specified as the destination account for log files
Args:
bucket_name (`str`): Name of the S3 bucket
bucket_region (`str`): AWS Region for the bucket
bucket_account (:obj:`Account`): Account to create the S3 bucket in
template (:obj:`Template`): Jinja2 Template object for the bucket policy
Returns:
`None`
|
def create_s3_bucket(cls, bucket_name, bucket_region, bucket_account, template):
s3 = get_aws_session(bucket_account).client('s3', region_name=bucket_region)
# Check to see if the bucket already exists and if we have access to it
try:
s3.head_bucket(Bucket=bucket_name)
except ClientError as ex:
status_code = ex.response['ResponseMetadata']['HTTPStatusCode']
# Bucket exists and we do not have access
if status_code == 403:
raise Exception('Bucket {} already exists but we do not have access to it and so cannot continue'.format(
bucket_name
))
# Bucket does not exist, lets create one
elif status_code == 404:
try:
s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': bucket_region
}
)
auditlog(
event='cloudtrail.create_s3_bucket',
actor=cls.ns,
data={
'account': bucket_account.account_name,
'bucket_region': bucket_region,
'bucket_name': bucket_name
}
)
except Exception:
raise Exception('An error occured while trying to create the bucket, cannot continue')
try:
bucket_acl = template.render(
bucket_name=bucket_name,
account_id=bucket_account.account_number
)
s3.put_bucket_policy(Bucket=bucket_name, Policy=bucket_acl)
except Exception as ex:
raise Warning('An error occurred while setting bucket policy: {}'.format(ex))
| 331,343
|
Return the ARN of the IAM Role on the provided account as a string. Returns an `IAMRole` object from boto3
Args:
account (:obj:`Account`): Account where to locate the role
Returns:
:obj:`IAMRole`
|
def confirm_iam_role(self, account):
try:
iam = self.session.client('iam')
rolearn = iam.get_role(RoleName=self.role_name)['Role']['Arn']
return rolearn
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
self.create_iam_role(account)
else:
raise
except Exception as e:
self.log.exception('Failed validating IAM role for VPC Flow Log Auditing for {}'.format(e))
| 331,345
|
Create a new IAM role. Returns the ARN of the newly created role
Args:
account (:obj:`Account`): Account where to create the IAM role
Returns:
`str`
|
def create_iam_role(self, account):
try:
iam = self.session.client('iam')
trust = get_template('vpc_flow_logs_iam_role_trust.json').render()
policy = get_template('vpc_flow_logs_role_policy.json').render()
newrole = iam.create_role(
Path='/',
RoleName=self.role_name,
AssumeRolePolicyDocument=trust
)['Role']['Arn']
# Attach an inline policy to the role to avoid conflicts or hitting the Managed Policy Limit
iam.put_role_policy(
RoleName=self.role_name,
PolicyName='VpcFlowPolicy',
PolicyDocument=policy
)
self.log.debug('Created VPC Flow Logs role & policy for {}'.format(account.account_name))
auditlog(
event='vpc_flow_logs.create_iam_role',
actor=self.ns,
data={
'account': account.account_name,
'roleName': self.role_name,
'trustRelationship': trust,
'inlinePolicy': policy
}
)
return newrole
except Exception:
self.log.exception('Failed creating the VPC Flow Logs role for {}.'.format(account))
| 331,346
|
Create a new CloudWatch log group based on the VPC Name if none exists. Returns `True` if succesful
Args:
account (:obj:`Account`): Account to create the log group in
region (`str`): Region to create the log group in
vpcname (`str`): Name of the VPC the log group is fow
Returns:
`bool`
|
def confirm_cw_log(self, account, region, vpcname):
try:
cw = self.session.client('logs', region)
token = None
log_groups = []
while True:
result = cw.describe_log_groups() if not token else cw.describe_log_groups(nextToken=token)
token = result.get('nextToken')
log_groups.extend([x['logGroupName'] for x in result.get('logGroups', [])])
if not token:
break
if vpcname not in log_groups:
cw.create_log_group(logGroupName=vpcname)
cw_vpc = VPC.get(vpcname)
cw_vpc.set_property('vpc_flow_logs_log_group', vpcname)
self.log.info('Created log group {}/{}/{}'.format(account.account_name, region, vpcname))
auditlog(
event='vpc_flow_logs.create_cw_log_group',
actor=self.ns,
data={
'account': account.account_name,
'region': region,
'log_group_name': vpcname,
'vpc': vpcname
}
)
return True
except Exception:
self.log.exception('Failed creating log group for {}/{}/{}.'.format(
account,
region, vpcname
))
| 331,347
|
Create a new VPC Flow log
Args:
account (:obj:`Account`): Account to create the flow in
region (`str`): Region to create the flow in
vpc_id (`str`): ID of the VPC to create the flow for
iam_role_arn (`str`): ARN of the IAM role used to post logs to the log group
Returns:
`None`
|
def create_vpc_flow_logs(self, account, region, vpc_id, iam_role_arn):
try:
flow = self.session.client('ec2', region)
flow.create_flow_logs(
ResourceIds=[vpc_id],
ResourceType='VPC',
TrafficType='ALL',
LogGroupName=vpc_id,
DeliverLogsPermissionArn=iam_role_arn
)
fvpc = VPC.get(vpc_id)
fvpc.set_property('vpc_flow_logs_status', 'ACTIVE')
self.log.info('Enabled VPC Logging {}/{}/{}'.format(account, region, vpc_id))
auditlog(
event='vpc_flow_logs.create_vpc_flow',
actor=self.ns,
data={
'account': account.account_name,
'region': region,
'vpcId': vpc_id,
'arn': iam_role_arn
}
)
except Exception:
self.log.exception('Failed creating VPC Flow Logs for {}/{}/{}.'.format(
account,
region,
vpc_id
))
| 331,348
|
Returns a list of contacts for an issue
Args:
issue (:obj:`RequiredTagsIssue`): Issue record
Returns:
`list` of `dict`
|
def get_contacts(self, issue):
# If the resources has been deleted, just return an empty list, to trigger issue deletion without notification
if not issue.resource:
return []
account_contacts = issue.resource.account.contacts
try:
resource_owners = issue.resource.get_owner_emails()
# Double check get_owner_emails for it's return value
if type(resource_owners) is list:
for resource_owner in resource_owners:
account_contacts.append({'type': 'email', 'value': resource_owner})
except AttributeError:
pass
return account_contacts
| 331,362
|
Returns a list of actions to executed
Args:
issues (`list` of :obj:`RequiredTagsIssue`): List of issues
Returns:
`list` of `dict`
|
def get_actions(self, issues):
actions = []
try:
for issue in issues:
action_item = self.determine_action(issue)
if action_item['action'] != AuditActions.IGNORE:
action_item['owners'] = self.get_contacts(issue)
actions.append(action_item)
finally:
db.session.rollback()
return actions
| 331,363
|
Determine if we need to trigger an alert
Args:
action_schedule (`list`): A list contains the alert schedule
issue_creation_time (`int`): Time we create the issue
last_alert (`str`): Time we sent the last alert
Returns:
(`None` or `str`)
None if no alert should be sent. Otherwise return the alert we should send
|
def determine_alert(self, action_schedule, issue_creation_time, last_alert):
issue_age = time.time() - issue_creation_time
alert_schedule_lookup = {pytimeparse.parse(action_time): action_time for action_time in action_schedule}
alert_schedule = sorted(alert_schedule_lookup.keys())
last_alert_time = pytimeparse.parse(last_alert)
for alert_time in alert_schedule:
if last_alert_time < alert_time <= issue_age and last_alert_time != alert_time:
return alert_schedule_lookup[alert_time]
else:
return None
| 331,364
|
Determine the action we should take for the issue
Args:
issue: Issue to determine action for
Returns:
`dict`
|
def determine_action(self, issue):
resource_type = self.resource_types[issue.resource.resource_type_id]
issue_alert_schedule = self.alert_schedule[resource_type] if \
resource_type in self.alert_schedule \
else self.alert_schedule['*']
action_item = {
'action': None,
'action_description': None,
'last_alert': issue.last_alert,
'issue': issue,
'resource': self.resource_classes[self.resource_types[issue.resource.resource_type_id]](issue.resource),
'owners': [],
'stop_after': issue_alert_schedule['stop'],
'remove_after': issue_alert_schedule['remove'],
'notes': issue.notes,
'missing_tags': issue.missing_tags
}
time_elapsed = time.time() - issue.created
stop_schedule = pytimeparse.parse(issue_alert_schedule['stop'])
remove_schedule = pytimeparse.parse(issue_alert_schedule['remove'])
if self.collect_only:
action_item['action'] = AuditActions.IGNORE
elif remove_schedule and time_elapsed >= remove_schedule:
action_item['action'] = AuditActions.REMOVE
action_item['action_description'] = 'Resource removed'
action_item['last_alert'] = remove_schedule
if issue.update({'last_alert': remove_schedule}):
db.session.add(issue.issue)
elif stop_schedule and time_elapsed >= stop_schedule:
action_item['action'] = AuditActions.STOP
action_item['action_description'] = 'Resource stopped'
action_item['last_alert'] = stop_schedule
if issue.update({'last_alert': stop_schedule}):
db.session.add(issue.issue)
else:
alert_selection = self.determine_alert(
issue_alert_schedule['alert'],
issue.get_property('created').value,
issue.get_property('last_alert').value
)
if alert_selection:
action_item['action'] = AuditActions.ALERT
action_item['action_description'] = '{} alert'.format(alert_selection)
action_item['last_alert'] = alert_selection
if issue.update({'last_alert': alert_selection}):
db.session.add(issue.issue)
else:
action_item['action'] = AuditActions.IGNORE
db.session.commit()
return action_item
| 331,365
|
Process the actions we want to take
Args:
actions (`list`): List of actions we want to take
Returns:
`list` of notifications
|
def process_actions(self, actions):
notices = {}
notification_contacts = {}
for action in actions:
resource = action['resource']
action_status = ActionStatus.SUCCEED
try:
if action['action'] == AuditActions.REMOVE:
action_status = self.process_action(
resource,
AuditActions.REMOVE
)
if action_status == ActionStatus.SUCCEED:
db.session.delete(action['issue'].issue)
elif action['action'] == AuditActions.STOP:
action_status = self.process_action(
resource,
AuditActions.STOP
)
if action_status == ActionStatus.SUCCEED:
action['issue'].update({
'missing_tags': action['missing_tags'],
'notes': action['notes'],
'last_alert': action['last_alert'],
'state': action['action']
})
elif action['action'] == AuditActions.FIXED:
db.session.delete(action['issue'].issue)
elif action['action'] == AuditActions.ALERT:
action['issue'].update({
'missing_tags': action['missing_tags'],
'notes': action['notes'],
'last_alert': action['last_alert'],
'state': action['action']
})
db.session.commit()
if action_status == ActionStatus.SUCCEED:
for owner in [
dict(t) for t in {tuple(d.items()) for d in (action['owners'] + self.permanent_emails)}
]:
if owner['value'] not in notification_contacts:
contact = NotificationContact(type=owner['type'], value=owner['value'])
notification_contacts[owner['value']] = contact
notices[contact] = {
'fixed': [],
'not_fixed': []
}
else:
contact = notification_contacts[owner['value']]
if action['action'] == AuditActions.FIXED:
notices[contact]['fixed'].append(action)
else:
notices[contact]['not_fixed'].append(action)
except Exception as ex:
self.log.exception('Unexpected error while processing resource {}/{}/{}/{}'.format(
action['resource'].account.account_name,
action['resource'].id,
action['resource'],
ex
))
return notices
| 331,366
|
Check whether a tag value is valid
Args:
key: A tag key
value: A tag value
Returns:
`(True or False)`
A boolean indicating whether or not the value is valid
|
def validate_tag(self, key, value):
if key == 'owner':
return validate_email(value, self.partial_owner_match)
elif key == self.gdpr_tag:
return value in self.gdpr_tag_values
else:
return True
| 331,367
|
Check whether a resource is compliance
Args:
resource: A single resource
Returns:
`(list, list)`
A tuple contains missing tags (if there were any) and notes
|
def check_required_tags_compliance(self, resource):
missing_tags = []
notes = []
resource_tags = {tag.key.lower(): tag.value for tag in resource.tags}
# Do not audit this resource if it is not in the Account scope
if resource.resource_type in self.alert_schedule:
target_accounts = self.alert_schedule[resource.resource_type]['scope']
else:
target_accounts = self.alert_schedule['*']['scope']
if not (resource.account.account_name in target_accounts or '*' in target_accounts):
return missing_tags, notes
# Do not audit this resource if the ignore tag was set
if self.audit_ignore_tag.lower() in resource_tags:
return missing_tags, notes
required_tags = list(self.required_tags)
# Add GDPR tag to required tags if the account must be GDPR compliant
if self.gdpr_enabled and resource.account.account_name in self.gdpr_accounts:
required_tags.append(self.gdpr_tag)
# Check if the resource is missing required tags or has invalid tag values
for key in [tag.lower() for tag in required_tags]:
if key not in resource_tags:
missing_tags.append(key)
elif not self.validate_tag(key, resource_tags[key]):
missing_tags.append(key)
notes.append('{} tag is not valid'.format(key))
return missing_tags, notes
| 331,368
|
Send notifications to the recipients provided
Args:
notices (:obj:`dict` of `str`: `list`): A dictionary mapping notification messages to the recipient.
Returns:
`None`
|
def notify(self, notices):
tmpl_html = get_template('required_tags_notice.html')
tmpl_text = get_template('required_tags_notice.txt')
for recipient, data in list(notices.items()):
body_html = tmpl_html.render(data=data)
body_text = tmpl_text.render(data=data)
send_notification(
subsystem=self.ns,
recipients=[recipient],
subject=self.email_subject,
body_html=body_html,
body_text=body_text
)
| 331,369
|
Return all Enforcements
args:
`account_id` : Unique Account Identifier
`location` : Region associated with the Resource
returns:
list of enforcement objects
|
def get_all(cls, account_id=None, location=None):
qry = db.Enforcements.filter()
if account_id:
qry = qry.filter(account_id == Enforcements.account_id)
if location:
qry = qry.join(Resource, Resource.location == location)
return qry
| 331,373
|
Returns the IssueType object for `issue_type`. If no existing object was found, a new type will
be created in the database and returned
Args:
issue_type (str,int,IssueType): Issue type name, id or class
Returns:
:obj:`IssueType`
|
def get(cls, issue_type):
if isinstance(issue_type, str):
obj = getattr(db, cls.__name__).find_one(cls.issue_type == issue_type)
elif isinstance(issue_type, int):
obj = getattr(db, cls.__name__).find_one(cls.issue_type_id == issue_type)
elif isinstance(issue_type, cls):
return issue_type
else:
obj = None
if not obj:
obj = cls()
obj.issue_type = issue_type
db.session.add(obj)
db.session.commit()
db.session.refresh(obj)
return obj
| 331,375
|
Return issue by ID
Args:
issue_id (str): Unique Issue identifier
issue_type_id (str): Type of issue to get
Returns:
:obj:`Issue`: Returns Issue object if found, else None
|
def get(issue_id, issue_type_id):
return db.Issue.find_one(
Issue.issue_id == issue_id,
Issue.issue_type_id == issue_type_id
)
| 331,377
|
Main execution point for the auditor
Args:
*args:
**kwargs:
Returns:
`None`
|
def run(self, *args, **kwargs):
self.log.debug('Starting EBSAuditor')
data = self.update_data()
notices = defaultdict(list)
for account, issues in data.items():
for issue in issues:
for recipient in account.contacts:
notices[NotificationContact(type=recipient['type'], value=recipient['value'])].append(issue)
self.notify(notices)
| 331,381
|
Takes a dict of existing volumes missing tags and a dict of existing issues, and finds any new or updated
issues.
Args:
volumes (:obj:`dict` of `str`: `EBSVolume`): Dict of current volumes with issues
existing_issues (:obj:`dict` of `str`: `EBSVolumeAuditIssue`): Current list of issues
Returns:
:obj:`dict` of `str`: `EBSVolumeAuditIssue`
|
def process_new_issues(self, volumes, existing_issues):
new_issues = {}
for issue_id, volume in volumes.items():
state = EBSIssueState.DETECTED.value
if issue_id in existing_issues:
issue = existing_issues[issue_id]
data = {
'state': state,
'notes': issue.notes,
'last_notice': issue.last_notice
}
if issue.update(data):
new_issues.setdefault(issue.volume.account, []).append(issue)
self.log.debug('Updated EBSVolumeAuditIssue {}'.format(
issue_id
))
else:
properties = {
'volume_id': volume.id,
'account_id': volume.account_id,
'location': volume.location,
'state': state,
'last_change': datetime.now(),
'last_notice': None,
'notes': []
}
issue = EBSVolumeAuditIssue.create(issue_id, properties=properties)
new_issues.setdefault(issue.volume.account, []).append(issue)
return new_issues
| 331,384
|
Provided a list of volumes and existing issues, returns a list of fixed issues to be deleted
Args:
volumes (`dict`): A dictionary keyed on the issue id, with the :obj:`Volume` object as the value
existing_issues (`dict`): A dictionary keyed on the issue id, with the :obj:`EBSVolumeAuditIssue` object as
the value
Returns:
:obj:`list` of :obj:`EBSVolumeAuditIssue`
|
def process_fixed_issues(self, volumes, existing_issues):
fixed_issues = []
for issue_id, issue in list(existing_issues.items()):
if issue_id not in volumes:
fixed_issues.append(issue)
return fixed_issues
| 331,385
|
Send notifications to the users via. the provided methods
Args:
notices (:obj:`dict` of `str`: `dict`): List of the notifications to send
Returns:
`None`
|
def notify(self, notices):
issues_html = get_template('unattached_ebs_volume.html')
issues_text = get_template('unattached_ebs_volume.txt')
for recipient, issues in list(notices.items()):
if issues:
message_html = issues_html.render(issues=issues)
message_text = issues_text.render(issues=issues)
send_notification(
subsystem=self.name,
recipients=[recipient],
subject=self.subject,
body_html=message_html,
body_text=message_text
)
| 331,386
|
Function to return a boto3 Session based on the account passed in the first argument.
Args:
account (:obj:`Account`): Account to create the session object for
Returns:
:obj:`boto3:boto3.session.Session`
|
def get_aws_session(account):
from cloud_inquisitor.config import dbconfig
from cloud_inquisitor.plugins.types.accounts import AWSAccount
if not isinstance(account, AWSAccount):
raise InquisitorError('Non AWSAccount passed to get_aws_session, got {}'.format(account.__class__.__name__))
# If no keys are on supplied for the account, use sts.assume_role instead
session = get_local_aws_session()
if session.get_credentials().method in ['iam-role', 'env', 'explicit']:
sts = session.client('sts')
else:
# If we are not running on an EC2 instance, assume the instance role
# first, then assume the remote role
temp_sts = session.client('sts')
audit_sts_role = temp_sts.assume_role(
RoleArn=app_config.aws_api.instance_role_arn,
RoleSessionName='inquisitor'
)
sts = boto3.session.Session(
audit_sts_role['Credentials']['AccessKeyId'],
audit_sts_role['Credentials']['SecretAccessKey'],
audit_sts_role['Credentials']['SessionToken']
).client('sts')
role = sts.assume_role(
RoleArn='arn:aws:iam::{}:role/{}'.format(
account.account_number,
dbconfig.get('role_name', default='cinq_role')
),
RoleSessionName='inquisitor'
)
sess = boto3.session.Session(
role['Credentials']['AccessKeyId'],
role['Credentials']['SecretAccessKey'],
role['Credentials']['SessionToken']
)
return sess
| 331,400
|
Load a list of AWS regions from the AWS static data.
Args:
force (`bool`): Force fetch list of regions even if we already have a cached version
Returns:
:obj:`list` of `str`
|
def get_aws_regions(*, force=False):
from cloud_inquisitor.config import dbconfig
global __regions
if force or not __regions:
logger.debug('Loading list of AWS regions from static data')
data = requests.get('https://ip-ranges.amazonaws.com/ip-ranges.json').json()
rgx = re.compile(dbconfig.get('ignored_aws_regions_regexp', default='(^cn-|GLOBAL|-gov)'), re.I)
__regions = sorted(list({x['region'] for x in data['prefixes'] if not rgx.search(x['region'])}))
return __regions
| 331,401
|
Returns a dict containing the tags for a CloudFront distribution
Args:
client (botocore.client.CloudFront): Boto3 CloudFront client object
arn (str): ARN of the distribution to get tags for
Returns:
`dict`
|
def __get_distribution_tags(self, client, arn):
return {
t['Key']: t['Value'] for t in client.list_tags_for_resource(
Resource=arn
)['Tags']['Items']
}
| 331,416
|
Return all resource records for a specific Route53 zone
Args:
zone_id (`str`): Name / ID of the hosted zone
Returns:
`dict`
|
def __fetch_route53_zone_records(self, zone_id):
route53 = self.session.client('route53')
done = False
nextName = nextType = None
records = {}
try:
while not done:
if nextName and nextType:
response = route53.list_resource_record_sets(
HostedZoneId=zone_id,
StartRecordName=nextName,
StartRecordType=nextType
)
else:
response = route53.list_resource_record_sets(HostedZoneId=zone_id)
if response['IsTruncated']:
nextName = response['NextRecordName']
nextType = response['NextRecordType']
else:
done = True
if 'ResourceRecordSets' in response:
for record in response['ResourceRecordSets']:
# Cannot make this a list, due to a race-condition in the AWS api that might return the same
# record more than once, so we use a dict instead to ensure that if we get duplicate records
# we simply just overwrite the one already there with the same info.
record_id = self._get_resource_hash(zone_id, record)
if 'AliasTarget' in record:
value = record['AliasTarget']['DNSName']
records[record_id] = {
'id': record_id,
'name': record['Name'].rstrip('.'),
'type': 'ALIAS',
'ttl': 0,
'value': [value]
}
else:
value = [y['Value'] for y in record['ResourceRecords']]
records[record_id] = {
'id': record_id,
'name': record['Name'].rstrip('.'),
'type': record['Type'],
'ttl': record['TTL'],
'value': value
}
return list(records.values())
finally:
del route53
| 331,418
|
Return a dict with the tags for the zone
Args:
zone_id (`str`): ID of the hosted zone
Returns:
:obj:`dict` of `str`: `str`
|
def __fetch_route53_zone_tags(self, zone_id):
route53 = self.session.client('route53')
try:
return {
tag['Key']: tag['Value'] for tag in
route53.list_tags_for_resource(
ResourceType='hostedzone',
ResourceId=zone_id.split('/')[-1]
)['ResourceTagSet']['Tags']
}
finally:
del route53
| 331,419
|
Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique
resource IDs
Args:
zone_name (`str`): The name of the DNS Zone the record belongs to
record (`dict`): A record dict to generate the hash from
Returns:
`str`
|
def _get_resource_hash(zone_name, record):
record_data = defaultdict(int, record)
if type(record_data['GeoLocation']) == dict:
record_data['GeoLocation'] = ":".join(["{}={}".format(k, v) for k, v in record_data['GeoLocation'].items()])
args = [
zone_name,
record_data['Name'],
record_data['Type'],
record_data['Weight'],
record_data['Region'],
record_data['GeoLocation'],
record_data['Failover'],
record_data['HealthCheckId'],
record_data['TrafficPolicyInstanceId']
]
return get_resource_id('r53r', args)
| 331,420
|
Returns datapoints from cloudwatch for bucket statistics.
Args:
bucket_name `(str)`: The name of the bucket
statistic `(str)`: The statistic you want to fetch from
days `(int)`: Sample period for the statistic
|
def _get_bucket_statistics(self, bucket_name, bucket_region, storage_type, statistic, days):
cw = self.session.client('cloudwatch', region_name=bucket_region)
# gather cw stats
try:
obj_stats = cw.get_metric_statistics(
Namespace='AWS/S3',
MetricName=statistic,
Dimensions=[
{
'Name': 'StorageType',
'Value': storage_type
},
{
'Name': 'BucketName',
'Value': bucket_name
}
],
Period=86400,
StartTime=datetime.utcnow() - timedelta(days=days),
EndTime=datetime.utcnow(),
Statistics=[
'Average'
]
)
stat_value = obj_stats['Datapoints'][0]['Average'] if obj_stats['Datapoints'] else 'NO_DATA'
return stat_value
except Exception as e:
self.log.error(
'Could not get bucket statistic for account {} / bucket {} / {}'.format(self.account.account_name,
bucket_name, e))
finally:
del cw
| 331,421
|
Returns the ResourceType object for `resource_type`. If no existing object was found, a new type will
be created in the database and returned
Args:
resource_type (str): Resource type name
Returns:
:obj:`ResourceType`
|
def get(cls, resource_type):
if isinstance(resource_type, str):
obj = getattr(db, cls.__name__).find_one(cls.resource_type == resource_type)
elif isinstance(resource_type, int):
obj = getattr(db, cls.__name__).find_one(cls.resource_type_id == resource_type)
elif isinstance(resource_type, cls):
return resource_type
else:
obj = None
if not obj:
obj = cls()
obj.resource_type = resource_type
db.session.add(obj)
db.session.commit()
db.session.refresh(obj)
return obj
| 331,424
|
Return account by ID and type
Args:
account_id (`int`, `str`): Unique Account identifier
account_type_id (str): Type of account to get
Returns:
:obj:`Account`: Returns an Account object if found, else None
|
def get(account_id, account_type_id=None):
if type(account_id) == str:
args = {'account_name': account_id}
else:
args = {'account_id': account_id}
if account_type_id:
args['account_type_id'] = account_type_id
return db.Account.find_one(**args)
| 331,442
|
Check if a user has access to view information for the account
Args:
user (:obj:`User`): User object to check
Returns:
True if user has access to the account, else false
|
def user_has_access(self, user):
if ROLE_ADMIN in user.roles:
return True
# Non-admin users should only see active accounts
if self.enabled:
if not self.required_roles:
return True
for role in self.required_roles:
if role in user.roles:
return True
return False
| 331,443
|
Process an audit action for a resource, if possible
Args:
resource (:obj:`Resource`): A resource object to perform the action on
action (`str`): Type of action to perform (`kill` or `stop`)
action_issuer (`str`): The issuer of the action
Returns:
`ActionStatus`
|
def process_action(resource, action, action_issuer='unknown'):
from cinq_collector_aws import AWSRegionCollector
func_action = action_mapper[resource.resource_type][action]
extra_info = {}
action_status = ActionStatus.UNKNOWN
if func_action:
if action_mapper[resource.resource_type]['service_name'] == 'lambda':
client = get_aws_session(
AWSAccount.get(dbconfig.get('rds_collector_account', AWSRegionCollector.ns, ''))
).client(
'lambda',
dbconfig.get('rds_collector_region', AWSRegionCollector.ns, '')
)
else:
client = get_aws_session(AWSAccount(resource.account)).client(
action_mapper[resource.resource_type]['service_name'],
region_name=resource.location
)
try:
logger.info(f'Trying to {action} resource {resource.id} for account {resource.account.account_name} / region {resource.location}')
action_status, extra_info = func_action(client, resource)
Enforcement.create(resource.account.account_id, resource.id, action, datetime.now(), extra_info)
except Exception as ex:
action_status = ActionStatus.FAILED
logger.exception('Failed to apply action {} to {}: {}'.format(action, resource.id, ex))
finally:
auditlog(
event='{}.{}.{}.{}'.format(action_issuer, resource.resource_type, action, action_status),
actor=action_issuer,
data={
'resource_id': resource.id,
'account_name': resource.account.account_name,
'location': resource.location,
'info': extra_info
}
)
return action_status
else:
logger.error('Failed to apply action {} to {}: Not supported'.format(action, resource.id))
return ActionStatus.FAILED
| 331,458
|
Stop an EC2 Instance
This function will attempt to stop a running instance.
Args:
client (:obj:`boto3.session.Session.client`): A boto3 client object
resource (:obj:`Resource`): The resource object to stop
Returns:
`ActionStatus`
|
def stop_ec2_instance(client, resource):
instance = EC2Instance.get(resource.id)
if instance.state in ('stopped', 'terminated'):
return ActionStatus.IGNORED, {}
client.stop_instances(InstanceIds=[resource.id])
return ActionStatus.SUCCEED, {'instance_type': resource.instance_type, 'public_ip': resource.public_ip}
| 331,459
|
Terminate an EC2 Instance
This function will terminate an EC2 Instance.
Args:
client (:obj:`boto3.session.Session.client`): A boto3 client object
resource (:obj:`Resource`): The resource object to terminate
Returns:
`ActionStatus`
|
def terminate_ec2_instance(client, resource):
# TODO: Implement disabling of TerminationProtection
instance = EC2Instance.get(resource.id)
if instance.state == 'terminated':
return ActionStatus.IGNORED, {}
client.terminate_instances(InstanceIds=[resource.id])
return ActionStatus.SUCCEED, {'instance_type': resource.instance_type, 'public_ip': resource.public_ip}
| 331,460
|
Delete an S3 bucket
This function will try to delete an S3 bucket
Args:
client (:obj:`boto3.session.Session.client`): A boto3 client object
resource (:obj:`Resource`): The resource object to terminate
Returns:
`ActionStatus`
|
def delete_s3_bucket(client, resource):
if dbconfig.get('enable_delete_s3_buckets', NS_AUDITOR_REQUIRED_TAGS, False):
client.delete_bucket(Bucket=resource.id)
return ActionStatus.SUCCEED, resource.metrics()
| 331,462
|
Returns the class object identified by `resource_id`
Args:
resource_id (str): Unique EC2 Instance ID to load from database
Returns:
EC2 Instance object if found, else None
|
def get(cls, resource_id):
res = Resource.get(resource_id)
return cls(res) if res else None
| 331,465
|
Return a named property for a resource, if available. Will raise an `AttributeError` if the property
does not exist
Args:
name (str): Name of the property to return
Returns:
`ResourceProperty`
|
def get_property(self, name):
for prop in self.resource.properties:
if prop.name == name:
return prop
raise AttributeError(name)
| 331,470
|
Create or set the value of a property. Returns `True` if the property was created or updated, or `False` if
there were no changes to the value of the property.
Args:
name (str): Name of the property to create or update
value (any): Value of the property. This can be any type of JSON serializable data
update_session (bool): Automatically add the change to the SQLAlchemy session. Default: True
Returns:
`bool`
|
def set_property(self, name, value, update_session=True):
if type(value) == datetime:
value = value.isoformat()
else:
value = value
try:
prop = self.get_property(name)
if prop.value == value:
return False
prop.value = value
except AttributeError:
prop = ResourceProperty()
prop.resource_id = self.id
prop.name = name
prop.value = value
if update_session:
db.session.add(prop)
return True
| 331,471
|
Return a tag by key, if found
Args:
key (str): Name/key of the tag to locate
case_sensitive (bool): Should tag keys be treated case-sensitive (default: true)
Returns:
`Tag`,`None`
|
def get_tag(self, key, *, case_sensitive=True):
key = key if case_sensitive else key.lower()
for tag in self.resource.tags:
if not case_sensitive:
if tag.key.lower() == key:
return tag
elif key == tag.key:
return tag
return None
| 331,472
|
Create or set the value of the tag with `key` to `value`. Returns `True` if the tag was created or updated or
`False` if there were no changes to be made.
Args:
key (str): Key of the tag
value (str): Value of the tag
update_session (bool): Automatically add the change to the SQLAlchemy session. Default: True
Returns:
`bool`
|
def set_tag(self, key, value, update_session=True):
existing_tags = {x.key: x for x in self.tags}
if key in existing_tags:
tag = existing_tags[key]
if tag.value == value:
return False
tag.value = value
else:
tag = Tag()
tag.resource_id = self.id
tag.key = key
tag.value = value
self.tags.append(tag)
if update_session:
db.session.add(tag)
return True
| 331,473
|
Removes a tag from a resource based on the tag key. Returns `True` if the tag was removed or `False` if the
tag didn't exist
Args:
key (str): Key of the tag to delete
update_session (bool): Automatically add the change to the SQLAlchemy session. Default: True
Returns:
|
def delete_tag(self, key, update_session=True):
existing_tags = {x.key: x for x in self.tags}
if key in existing_tags:
if update_session:
db.session.delete(existing_tags[key])
self.tags.remove(existing_tags[key])
return True
return False
| 331,474
|
Save the resource to the database
Args:
auto_commit (bool): Automatically commit the transaction. Default: `False`
Returns:
`None`
|
def save(self, *, auto_commit=False):
try:
db.session.add(self.resource)
if auto_commit:
db.session.commit()
except SQLAlchemyError as ex:
self.log.exception('Failed updating resource: {}'.format(ex))
db.session.rollback()
| 331,475
|
Removes a resource from the database
Args:
auto_commit (bool): Automatically commit the transaction. Default: `False`
Returns:
`None`
|
def delete(self, *, auto_commit=False):
try:
db.session.delete(self.resource)
if auto_commit:
db.session.commit()
except SQLAlchemyError:
self.log.exception('Failed deleting resource: {}'.format(self.id))
db.session.rollback()
| 331,476
|
Updates the object information based on live data, if there were any changes made. Any changes will be
automatically applied to the object, but will not be automatically persisted. You must manually call
`db.session.add(instance)` on the object.
Args:
data (:obj:): AWS API Resource object fetched from AWS API
Returns:
True if there were any changes to the object, else false
|
def update(self, data):
# If the instance was terminated, remove it
if data.state['Name'] == 'terminated':
self.delete(auto_commit=False)
return True
updated = self.set_property('launch_date', to_utc_date(data.launch_time).isoformat())
updated |= self.set_property('state', data.state['Name'])
updated |= self.set_property('instance_type', data.instance_type)
updated |= self.set_property('public_ip', data.public_ip_address or None)
updated |= self.set_property('public_dns', data.public_dns_name or None)
tags = {x['Key']: x['Value'] for x in data.tags or {}}
existing_tags = {x.key: x for x in self.tags}
# Check for new tags
for key, value in list(tags.items()):
updated |= self.set_tag(key, value)
# Check for updated or removed tags
for key in list(existing_tags.keys()):
if key not in tags:
updated |= self.delete_tag(key)
return updated
| 331,479
|
Returns the name of an instance if existant, else return the instance id
Args:
with_id (bool): Include the instance ID even if the name is found (default: False)
Returns:
Name and/or instance ID of the instance object
|
def get_name_or_instance_id(self, with_id=False):
name = self.get_tag('Name', case_sensitive=False)
if name and len(name.value.strip()) > 0:
return '{0} ({1})'.format(name.value, self.id) if with_id else name.value
return self.id
| 331,480
|
Updates the object information based on live data, if there were any changes made. Any changes will be
automatically applied to the object, but will not be automatically persisted. You must manually call
`db.session.add(ami)` on the object.
Args:
data (bunch): Data fetched from AWS API
Returns:
True if there were any changes to the object, else false
|
def update(self, data):
updated = self.set_property('description', data.description)
updated |= self.set_property('state', data.state)
tags = {x['Key']: x['Value'] for x in data.tags or {}}
existing_tags = {x.key: x for x in self.tags}
# Check for new tags
for key, value in list(tags.items()):
updated |= self.set_tag(key, value)
# Check for updated or removed tags
for key in list(existing_tags.keys()):
if key not in tags:
updated |= self.delete_tag(key)
return updated
| 331,484
|
Remove a DNSRecord
Args:
record (:obj:`DNSRecord`): :obj:`DNSRecord` to remove
Returns:
`None`
|
def delete_record(self, record):
self.children.remove(record.resource)
record.delete()
| 331,485
|
Take an event type argument and return a python logging format
In order to properly format the syslog messages to current standard, load the template and perform necessary
replacements and return the string.
Args:
event_type (str): Event type name
Returns:
`str`
|
def _get_syslog_format(event_type):
syslog_format_template = get_template('syslog_format.json')
fmt = syslog_format_template.render(
event_type=event_type,
host=dbconfig.get('instance_name', default='local')
)
# Load and redump string, to get rid of any extraneous whitespaces
return json.dumps(json.loads(fmt))
| 331,499
|
Generate and insert a new event
Args:
event (`str`): Action performed
actor (`str`): Actor (user or subsystem) triggering the event
data (`dict`): Any extra data necessary for describing the event
level (`str` or `int`): Log level for the message. Uses standard python logging level names / numbers
Returns:
`None`
|
def auditlog(*, event, actor, data, level=logging.INFO):
try:
entry = AuditLog()
entry.event = event
entry.actor = actor
entry.data = data
db.session.add(entry)
db.session.commit()
_AUDIT_LOGGER.log(
logging.getLevelName(level) if type(level) == str else level,
{
'event': event,
'actor': actor,
'data': data,
}
)
except Exception:
logging.getLogger(__name__).exception('Failed adding audit log event')
db.session.rollback()
| 331,501
|
Persist a record into the database
Args:
record (`logging.Record`): The logging.Record object to store
Returns:
`None`
|
def emit(self, record):
# Skip records less than min_level
if record.levelno < logging.getLevelName(self.min_level):
return
evt = LogEvent()
evt.level = record.levelname
evt.levelno = record.levelno
evt.timestamp = datetime.fromtimestamp(record.created)
evt.message = record.message
evt.filename = record.filename
evt.lineno = record.lineno
evt.module = record.module
evt.funcname = record.funcName
evt.pathname = record.pathname
evt.process_id = record.process
# Only log stacktraces if its the level is ERROR or higher
if record.levelno >= 40:
evt.stacktrace = traceback.format_exc()
try:
db.session.add(evt)
db.session.commit()
except Exception:
db.session.rollback()
| 331,502
|
Exports the object to a JSON friendly dict
Args:
include_body (bool): Include the body of the message in the output
Returns:
Dict representation of object type
|
def to_json(self, include_body=False):
message = {
'emailId': self.email_id,
'timestamp': isoformat(self.timestamp),
'subsystem': self.subsystem,
'subject': self.subject,
'sender': self.sender,
'recipients': self.recipients,
'uuid': self.uuid,
'messageHtml': None,
'messageText': None
}
if include_body:
message['messageHtml'] = self.message_html
message['messageText'] = self.message_text
return message
| 331,505
|
Fetch an item by namespace and key
Args:
ns (str): Namespace prefix
key (str): Item key
Returns:
:obj:`Configitem`: Returns config item object if found, else `None`
|
def get(cls, ns, key):
return getattr(db, cls.__name__).find_one(
ConfigItem.namespace_prefix == ns,
ConfigItem.key == key
)
| 331,507
|
Return object based on JSON / dict input
Args:
data (dict): Dictionary containing a serialized Role object
Returns:
:obj:`Role`: Role object representing the data
|
def from_json(cls, data):
role = cls()
role.role_id = data['roleId']
role.name = data['name']
role.color = data['color']
return role
| 331,509
|
Map roles for user in database
Args:
user (User): User to add roles to
roles ([Role]): List of roles to add
Returns:
None
|
def add_role(user, roles):
def _add_role(role):
user_role = UserRole()
user_role.user_id = user.user_id
user_role.role_id = role.role_id
db.session.add(user_role)
db.session.commit()
[_add_role(role) for role in roles]
| 331,511
|
Return object based on JSON / dict input
Args:
data (dict): Dictionary containing a serialized User object
Returns:
:obj:`User`: User object representing the data
|
def from_json(cls, data):
user = cls()
user.user_id = data['userId']
user.username = data['username']
user.auth_system = data['authSystem']
user.roles = data['roles']
return user
| 331,512
|
Generate and insert a new event
Args:
event (str): Action performed
actor (str): Actor (user or subsystem) triggering the event
data (dict): Any extra data necessary for describing the event
Returns:
`None`
|
def log(cls, event=None, actor=None, data=None):
from cloud_inquisitor.log import auditlog
auditlog(event=event, actor=actor, data=data)
| 331,514
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.