id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
162,124 | import gzip
import json
import sys
import time
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError
from consoleme.config import config
from consoleme.exceptions.exceptions import (
DataNotRetrievable,
ExpiredData,
UnsupportedRedisDataType,
)
from consoleme.lib.asyncio import run_in_parallel
from consoleme.lib.json_encoder import SetEncoder
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler
from consoleme.lib.s3_helpers import get_object, put_object
async def retrieve_json_data_from_redis_or_s3(
redis_key: str = None,
redis_data_type: str = "str",
s3_bucket: str = None,
s3_key: str = None,
cache_to_redis_if_data_in_s3: bool = True,
max_age: Optional[int] = None,
default: Optional = None,
json_object_hook: Optional = None,
json_encoder: Optional = None,
):
"""
Retrieve data from Redis as a priority. If data is unavailable in Redis, fall back to S3 and attempt to store
data in Redis for quicker retrieval later.
:param redis_data_type: "str" or "hash", depending on how the data is stored in Redis
:param redis_key: Redis Key to retrieve data from
:param s3_bucket: S3 bucket to retrieve data from
:param s3_key: S3 key to retrieve data from
:param cache_to_redis_if_data_in_s3: Cache the data in Redis if the data is in S3 but not Redis
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
last_updated_redis_key = config.get(
"store_json_results_in_redis_and_s3.last_updated_redis_key",
"STORE_JSON_RESULTS_IN_REDIS_AND_S3_LAST_UPDATED",
)
stats.count(
f"{function}.called",
tags={"redis_key": redis_key, "s3_bucket": s3_bucket, "s3_key": s3_key},
)
# If we've defined an S3 key, but not a bucket, let's use the default bucket if it's defined in configuration.
if s3_key and not s3_bucket:
s3_bucket = config.get("consoleme_s3_bucket")
data = None
if redis_key:
if redis_data_type == "str":
data_s = red.get(redis_key)
if data_s:
data = json.loads(data_s, object_hook=json_object_hook)
elif redis_data_type == "hash":
data = red.hgetall(redis_key)
else:
raise UnsupportedRedisDataType("Unsupported redis_data_type passed")
if data and max_age:
current_time = int(time.time())
last_updated = int(red.hget(last_updated_redis_key, redis_key))
if current_time - last_updated > max_age:
data = None
# Fall back to S3 if expired.
if not s3_bucket or not s3_key:
raise ExpiredData(f"Data in Redis is older than {max_age} seconds.")
# Fall back to S3 if there's no data
if not data and s3_bucket and s3_key:
try:
s3_object = get_object(Bucket=s3_bucket, Key=s3_key)
except ClientError as e:
if str(e) == (
"An error occurred (NoSuchKey) when calling the GetObject operation: "
"The specified key does not exist."
):
if default is not None:
return default
raise
s3_object_content = await sync_to_async(s3_object["Body"].read)()
if s3_key.endswith(".gz"):
s3_object_content = gzip.decompress(s3_object_content)
data_object = json.loads(s3_object_content, object_hook=json_object_hook)
data = data_object["data"]
if data and max_age:
current_time = int(time.time())
last_updated = data_object["last_updated"]
if current_time - last_updated > max_age:
raise ExpiredData(f"Data in S3 is older than {max_age} seconds.")
if redis_key and cache_to_redis_if_data_in_s3:
await store_json_results_in_redis_and_s3(
data,
redis_key=redis_key,
redis_data_type=redis_data_type,
json_encoder=json_encoder,
)
if data is not None:
return data
if default is not None:
return default
raise DataNotRetrievable("Unable to retrieve expected data.")
async def run_in_parallel(task_list: List, threads=os.cpu_count(), sync=True):
async def run():
sem = asyncio.Semaphore(threads)
futures = []
for task in task_list:
if sync:
futures.append(
asyncio.ensure_future(
bound_fetch_sync(
sem,
task.get("fn"),
task.get("args", ()),
task.get("kwargs", {}),
)
)
)
else:
futures.append(
asyncio.ensure_future(
bound_fetch(
sem,
task.get("fn"),
task.get("args", ()),
task.get("kwargs", {}),
)
)
)
responses = asyncio.gather(*futures)
return await responses
return await run()
The provided code snippet includes necessary dependencies for implementing the `retrieve_json_data_from_s3_bulk` function. Write a Python function `async def retrieve_json_data_from_s3_bulk( s3_bucket: str = None, s3_keys: Optional[List[str]] = None, max_age: Optional[int] = None, json_object_hook: Optional = None, json_encoder: Optional = None, )` to solve the following problem:
Retrieve data from multiple S3 keys in the same bucket, and combine the data. Useful for combining output of disparate resource caching functions (ex: combining the output of functions that determine IAM users on each of your accounts) :param s3_bucket: S3 bucket to retrieve data from :param s3_keys: S3 keys to retrieve data from :return:
Here is the function:
async def retrieve_json_data_from_s3_bulk(
s3_bucket: str = None,
s3_keys: Optional[List[str]] = None,
max_age: Optional[int] = None,
json_object_hook: Optional = None,
json_encoder: Optional = None,
):
"""
Retrieve data from multiple S3 keys in the same bucket, and combine the data. Useful for combining output of
disparate resource caching functions (ex: combining the output of functions that determine IAM users on each of your
accounts)
:param s3_bucket: S3 bucket to retrieve data from
:param s3_keys: S3 keys to retrieve data from
:return:
"""
tasks = []
for s3_key in s3_keys:
tasks.append(
{
"fn": retrieve_json_data_from_redis_or_s3,
"kwargs": {
"s3_bucket": s3_bucket,
"s3_key": s3_key,
"max_age": max_age,
"json_object_hook": json_object_hook,
"json_encoder": json_encoder,
},
}
)
parallelized_task_results = await run_in_parallel(tasks, sync=False)
all_function_results = []
for parallelized_task_result in parallelized_task_results:
all_function_results.append(parallelized_task_result["result"])
results = []
for result in all_function_results:
results.extend(result)
return results | Retrieve data from multiple S3 keys in the same bucket, and combine the data. Useful for combining output of disparate resource caching functions (ex: combining the output of functions that determine IAM users on each of your accounts) :param s3_bucket: S3 bucket to retrieve data from :param s3_keys: S3 keys to retrieve data from :return: |
162,125 | import pkgutil
from typing import Any
import pkg_resources
def iter_namespace(ns_pkg):
# Specifying the second argument (prefix) to iter_modules makes the
# returned name an absolute name instead of a relative one. This allows
# import_module to work without having to do additional modification to
# the name.
return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".") | null |
162,126 | import pkgutil
from typing import Any
import pkg_resources
The provided code snippet includes necessary dependencies for implementing the `import_class_by_name` function. Write a Python function `def import_class_by_name(class_full_path: str)` to solve the following problem:
Import a class by a dot-delimited class name. i.e: import_class("consoleme.default_plugins.plugins.metrics.default_metrics.DefaultMetric") --> <class 'consoleme.default_plugins.plugins.metrics.default_metrics.DefaultMetric'>
Here is the function:
def import_class_by_name(class_full_path: str):
"""
Import a class by a dot-delimited class name.
i.e: import_class("consoleme.default_plugins.plugins.metrics.default_metrics.DefaultMetric")
--> <class 'consoleme.default_plugins.plugins.metrics.default_metrics.DefaultMetric'>
"""
d = class_full_path.rfind(".")
class_name = class_full_path[d + 1 : len(class_full_path)]
m = __import__(class_full_path[0:d], globals(), locals(), [class_name])
return getattr(m, class_name) | Import a class by a dot-delimited class name. i.e: import_class("consoleme.default_plugins.plugins.metrics.default_metrics.DefaultMetric") --> <class 'consoleme.default_plugins.plugins.metrics.default_metrics.DefaultMetric'> |
162,127 | from consoleme.config import config
async def authenticate_user_by_credentials(request):
# If request is a POST and it has username, password, etc, then validate and return
# If request doesn't have these parameters, render login page
email = None
groups = None
if request.request.path == "/auth":
# If request is post and has credentials, validate or return error
pass
if email and groups and config.get("auth.set_auth_cookie"):
pass
# encoded_cookie = await generate_jwt_token(email, groups)
# request.set_cookie(config.get("auth_cookie_name", "consoleme_auth"), encoded_cookie)
if request.request.path != "/auth":
request.redirect("/auth") | null |
162,128 | import base64
import boto3
def get_aws_secret(secret_name, region):
session = boto3.session.Session()
client = session.client(
service_name="secretsmanager",
region_name=region,
)
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
if "SecretString" in get_secret_value_response:
return get_secret_value_response["SecretString"]
else:
return base64.b64decode(get_secret_value_response["SecretBinary"]) | null |
162,129 | import base64
import sys
from datetime import datetime, timedelta
import jwt
import pytz
import tornado.httpclient
import ujson as json
from jwt.algorithms import ECAlgorithm, RSAAlgorithm
from jwt.exceptions import DecodeError
from tornado import httputil
from consoleme.config import config
from consoleme.exceptions.exceptions import (
InvalidRedirectUrl,
MissingConfigurationValue,
UnableToAuthenticate,
)
from consoleme.lib.generic import should_force_redirect
from consoleme.lib.jwt import generate_jwt_token
log = config.get_logger()
async def populate_oidc_config():
http_client = tornado.httpclient.AsyncHTTPClient()
metadata_url = config.get("get_user_by_oidc_settings.metadata_url")
if metadata_url:
res = await http_client.fetch(
metadata_url,
method="GET",
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
},
)
oidc_config = json.loads(res.body)
else:
authorization_endpoint = config.get(
"get_user_by_oidc_settings.authorization_endpoint"
)
token_endpoint = config.get("get_user_by_oidc_settings.token_endpoint")
jwks_uri = config.get("get_user_by_oidc_settings.jwks_uri")
if not (authorization_endpoint or token_endpoint or jwks_uri):
raise MissingConfigurationValue("Missing OIDC Configuration.")
oidc_config = {
"authorization_endpoint": authorization_endpoint,
"token_endpoint": token_endpoint,
"jwks_uri": jwks_uri,
}
client_id = config.get("oidc_secrets.client_id")
client_secret = config.get("oidc_secrets.secret")
if not (client_id or client_secret):
raise MissingConfigurationValue("Missing OIDC Secrets")
oidc_config["client_id"] = client_id
oidc_config["client_secret"] = client_secret
oidc_config["jwt_keys"] = {}
jwks_uris = [oidc_config["jwks_uri"]]
jwks_uris.extend(config.get("get_user_by_oidc_settings.extra_jwks_uri", []))
for jwks_uri in jwks_uris:
# Fetch jwks_uri for jwt validation
res = await http_client.fetch(
jwks_uri,
method="GET",
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
},
)
jwks_data = json.loads(res.body)
for k in jwks_data["keys"]:
key_type = k["kty"]
key_id = k["kid"]
if key_type == "RSA":
oidc_config["jwt_keys"][key_id] = RSAAlgorithm.from_jwk(json.dumps(k))
elif key_type == "EC":
oidc_config["jwt_keys"][key_id] = ECAlgorithm.from_jwk(json.dumps(k))
else:
raise MissingConfigurationValue(
f"OIDC/OAuth2 key type not recognized. Detected key type: {key_type}."
)
return oidc_config
async def validate_redirect_uri(request, redirect_uri: str):
"""Ensure a redirect URI begins with the ConsoleMe server URL"""
base_url = config.get("url")
if not redirect_uri.startswith(base_url):
raise InvalidRedirectUrl(
f"invalid redirect url {redirect_uri}, must start with server address {base_url}"
)
import jwt
async def generate_jwt_token(
email,
groups,
nbf=datetime.utcnow() - timedelta(seconds=5),
iat=datetime.utcnow(),
exp=datetime.utcnow() + timedelta(hours=config.get("jwt.expiration_hours", 1)),
):
session = {
"nbf": nbf,
"iat": iat,
"exp": exp,
config.get("jwt.attributes.email", "email"): email,
config.get("jwt.attributes.groups", "groups"): groups,
}
encoded_cookie = await sync_to_async(jwt.encode)(
session, jwt_secret, algorithm="HS256"
)
return encoded_cookie
class UnableToAuthenticate(BaseException):
"""Unable to authenticate user or app"""
def __init__(self, msg=""):
stats.count("UnableToAuthenticate")
super().__init__(msg)
async def should_force_redirect(req):
"""
ConsoleMe should only force a 302 redirect for non-XHR requests
"""
if req.headers.get("X-Requested-With", "") == "XMLHttpRequest":
return False
if req.headers.get("Accept") == "application/json":
return False
return True
async def authenticate_user_by_oidc(request):
email = None
groups = []
decoded_access_token = {}
oidc_config = await populate_oidc_config()
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {"function": function}
code = request.get_argument("code", None)
protocol = request.request.protocol
if "https://" in config.get("url"):
# If we're behind a load balancer that terminates tls for us, request.request.protocol will be "http://" and our
# oidc redirect will be invalid
protocol = "https"
force_redirect = await should_force_redirect(request.request)
# The endpoint where we want our OIDC provider to redirect us back to perform auth
oidc_redirect_uri = f"{protocol}://{request.request.host}/auth"
# The endpoint where the user wants to be sent after authentication. This will be stored in the state
after_redirect_uri = request.request.arguments.get("redirect_url", [""])[0]
if not after_redirect_uri:
after_redirect_uri = request.request.arguments.get("state", [""])[0]
if after_redirect_uri and isinstance(after_redirect_uri, bytes):
after_redirect_uri = after_redirect_uri.decode("utf-8")
if not after_redirect_uri:
after_redirect_uri = config.get("url", f"{protocol}://{request.request.host}/")
await validate_redirect_uri(request, after_redirect_uri)
if not code:
args = {"response_type": "code"}
client_scope = config.get("oidc_secrets.client_scope")
if request.request.uri is not None:
args["redirect_uri"] = oidc_redirect_uri
args["client_id"] = oidc_config["client_id"]
if client_scope:
args["scope"] = " ".join(client_scope)
# TODO: Sign and verify redirect URI with expiration
args["state"] = after_redirect_uri
if force_redirect:
request.redirect(
httputil.url_concat(oidc_config["authorization_endpoint"], args)
)
else:
request.set_status(403)
request.write(
{
"type": "redirect",
"redirect_url": httputil.url_concat(
oidc_config["authorization_endpoint"], args
),
"reason": "unauthenticated",
"message": "User is not authenticated. Redirect to authenticate",
}
)
request.finish()
return
try:
# exchange the authorization code with the access token
http_client = tornado.httpclient.AsyncHTTPClient()
grant_type = config.get(
"get_user_by_oidc_settings.grant_type", "authorization_code"
)
authorization_header = (
f"{oidc_config['client_id']}:{oidc_config['client_secret']}"
)
authorization_header_encoded = base64.b64encode(
authorization_header.encode("UTF-8")
).decode("UTF-8")
url = f"{oidc_config['token_endpoint']}"
client_scope = config.get("oidc_secrets.client_scope")
if client_scope:
client_scope = " ".join(client_scope)
token_exchange_response = await http_client.fetch(
url,
method="POST",
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Basic %s" % authorization_header_encoded,
"Accept": "application/json",
},
body=f"grant_type={grant_type}&code={code}&redirect_uri={oidc_redirect_uri}&scope={client_scope}",
)
token_exchange_response_body_dict = json.loads(token_exchange_response.body)
id_token = token_exchange_response_body_dict.get(
config.get("get_user_by_oidc_settings.id_token_response_key", "id_token")
)
access_token = token_exchange_response_body_dict.get(
config.get(
"get_user_by_oidc_settings.access_token_response_key", "access_token"
)
)
jwt_verify = config.get("get_user_by_oidc_settings.jwt_verify", True)
if jwt_verify:
header = jwt.get_unverified_header(id_token)
key_id = header["kid"]
algorithm = header["alg"]
if algorithm == "none" or not algorithm:
raise UnableToAuthenticate(
"ID Token header does not specify a signing algorithm."
)
pub_key = oidc_config["jwt_keys"][key_id]
# This will raises errors if the audience isn't right or if the token is expired or has other errors.
decoded_id_token = jwt.decode(
id_token,
pub_key,
audience=oidc_config["client_id"],
algorithms=algorithm,
)
email = decoded_id_token.get(
config.get("get_user_by_oidc_settings.jwt_email_key", "email")
)
# For google auth, the access_token does not contain JWT-parsable claims.
if config.get(
"get_user_by_oidc_settings.get_groups_from_access_token", True
):
try:
header = jwt.get_unverified_header(access_token)
key_id = header["kid"]
algorithm = header["alg"]
if algorithm == "none" or not algorithm:
raise UnableToAuthenticate(
"Access Token header does not specify a signing algorithm."
)
pub_key = oidc_config["jwt_keys"][key_id]
# This will raises errors if the audience isn't right or if the token is expired or has other
# errors.
decoded_access_token = jwt.decode(
access_token,
pub_key,
audience=config.get(
"get_user_by_oidc_settings.access_token_audience"
),
algorithms=algorithm,
)
except (DecodeError, KeyError) as e:
# This exception occurs when the access token is opaque or otherwise not JWT-parsable.
# It is expected with some IdPs.
log.debug(
{
**log_data,
"message": (
"Unable to derive user's groups from access_token. Attempting to get groups through "
"userinfo endpoint. "
),
"error": e,
"user": email,
}
)
log.debug(log_data, exc_info=True)
groups = []
else:
decoded_id_token = jwt.decode(id_token, verify=jwt_verify)
decoded_access_token = jwt.decode(access_token, verify=jwt_verify)
email = email or decoded_id_token.get(
config.get("get_user_by_oidc_settings.jwt_email_key", "email")
)
if not email:
raise UnableToAuthenticate("Unable to determine user from ID Token")
groups = (
groups
or decoded_access_token.get(
config.get("get_user_by_oidc_settings.jwt_groups_key", "groups"),
)
or decoded_id_token.get(
config.get("get_user_by_oidc_settings.jwt_groups_key", "groups"), []
)
)
if (
not groups
and oidc_config.get("userinfo_endpoint")
and config.get(
"get_user_by_oidc_settings.get_groups_from_userinfo_endpoint", True
)
):
user_info = await http_client.fetch(
oidc_config["userinfo_endpoint"],
method="GET",
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Bearer %s" % access_token,
"Accept": "application/json",
},
)
groups = json.loads(user_info.body).get(
config.get("get_user_by_oidc_settings.user_info_groups_key", "groups"),
[],
)
if config.get("auth.set_auth_cookie"):
expiration = datetime.utcnow().replace(tzinfo=pytz.UTC) + timedelta(
minutes=config.get("jwt.expiration_minutes", 60)
)
encoded_cookie = await generate_jwt_token(email, groups, exp=expiration)
request.set_cookie(
config.get("auth_cookie_name", "consoleme_auth"),
encoded_cookie,
expires=expiration,
secure=config.get(
"auth.cookie.secure",
"https://" in config.get("url"),
),
httponly=config.get("auth.cookie.httponly", True),
samesite=config.get("auth.cookie.samesite", True),
)
request.redirect(after_redirect_uri)
request.finish()
except Exception as e:
log_data["error"] = e
log.error(log_data, exc_info=True)
return | null |
162,130 | from typing import Any, Dict, List
from consoleme.config import config
from consoleme.models import WebResponse
log = config.get_logger()
class WebResponse(BaseModel):
status: Optional[Status2] = None
reason: Optional[str] = Field(
None,
example=["authenticated_redirect", "authentication_failure", "not_configured"],
)
redirect_url: Optional[str] = None
status_code: Optional[int] = None
message: Optional[str] = None
errors: Optional[List[str]] = None
count: Optional[int] = None
total: Optional[int] = None
page: Optional[int] = None
last_page: Optional[int] = None
data: Optional[Union[Dict[str, Any], List]] = None
The provided code snippet includes necessary dependencies for implementing the `handle_generic_error_response` function. Write a Python function `async def handle_generic_error_response( request, message: str, errors: List[str], status_code: int, reason: str, log_data: Dict[str, Any], ) -> bool` to solve the following problem:
Args: request: Tornado web request message: Message to be logged reason: One line reason for the response (easier for frontend to parse) errors: List of errors to be logged, and to be returned to user status_code: Status code to return to end-user log_data: Dictionary of data to log, typically containing function and information about the user. Returns: boolean
Here is the function:
async def handle_generic_error_response(
request,
message: str,
errors: List[str],
status_code: int,
reason: str,
log_data: Dict[str, Any],
) -> bool:
"""
Args:
request: Tornado web request
message: Message to be logged
reason: One line reason for the response (easier for frontend to parse)
errors: List of errors to be logged, and to be returned to user
status_code: Status code to return to end-user
log_data: Dictionary of data to log, typically containing function and information about the user.
Returns:
boolean
"""
log.error({**log_data, "message": message, "errors": errors})
res = WebResponse(
status="error", status_code=status_code, errors=errors, reason=reason
)
request.set_status(status_code)
request.write(res.json(exclude_unset=True))
await request.finish()
return True | Args: request: Tornado web request message: Message to be logged reason: One line reason for the response (easier for frontend to parse) errors: List of errors to be logged, and to be returned to user status_code: Status code to return to end-user log_data: Dictionary of data to log, typically containing function and information about the user. Returns: boolean |
162,131 | from typing import Any
from consoleme.config import config
def can_user_request_group_based_on_domain(user: str, group_info: Any) -> bool:
if not group_info.allow_cross_domain_users:
user_domain = user.split("@")[1]
if user_domain != group_info.domain:
return False
return True | null |
162,132 | from typing import Any
from consoleme.config import config
def get_accessui_group_url(group):
return "{}/groups/{}".format(config.get("accessui_url"), group) | null |
162,133 | from typing import Dict, Set
from pydantic import BaseModel
class RoleAuthorizations(BaseModel):
# roles that the user can get credentials for via CLI. Users will see these roles in the ConsoleMe UI and can
# receive an authenticated web console url for the role
authorized_roles: Set[str] = set()
# roles that the user can get credentials for only via CLI (They won't see these in the consoleme web UI)
authorized_roles_cli_only: Set[str] = set()
def RoleAuthorizationsDecoder(obj):
if "authorized_roles" in obj and "authorized_roles_cli_only" in obj:
return RoleAuthorizations.parse_obj(obj)
return obj | null |
162,134 | import asyncio
import time
from typing import Any
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.exceptions.exceptions import NoMatchingRequest
from consoleme.lib.auth import can_admin_all
from consoleme.lib.cache import store_json_results_in_redis_and_s3
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
def can_admin_all(user: str, user_groups: List[str]):
application_admin = config.get("application_admin")
if application_admin:
if user == application_admin or application_admin in user_groups:
return True
if is_in_group(
user,
user_groups,
[
*config.get("groups.can_admin", []),
*config.get("dynamic_config.groups.can_admin", []),
],
):
return True
return False
async def can_approve_reject_request(user, secondary_approvers, groups):
# Allow admins to approve and reject all requests
if can_admin_all(user, groups):
return True
if secondary_approvers:
for g in secondary_approvers:
if g in groups or g == user:
return True
return False | null |
162,135 | import asyncio
import time
from typing import Any
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.exceptions.exceptions import NoMatchingRequest
from consoleme.lib.auth import can_admin_all
from consoleme.lib.cache import store_json_results_in_redis_and_s3
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
def can_admin_all(user: str, user_groups: List[str]):
application_admin = config.get("application_admin")
if application_admin:
if user == application_admin or application_admin in user_groups:
return True
if is_in_group(
user,
user_groups,
[
*config.get("groups.can_admin", []),
*config.get("dynamic_config.groups.can_admin", []),
],
):
return True
return False
async def can_cancel_request(current_user, requesting_user, groups):
# Allow the requesting user to cancel their own request
if current_user == requesting_user:
return True
# Allow admins to cancel requests
if can_admin_all(current_user, groups):
return True
# Allow restricted admins to cancel requests
for g in config.get("groups.can_admin_restricted"):
if g in groups:
return True
return False | null |
162,136 | import asyncio
import time
from typing import Any
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.exceptions.exceptions import NoMatchingRequest
from consoleme.lib.auth import can_admin_all
from consoleme.lib.cache import store_json_results_in_redis_and_s3
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
def can_admin_all(user: str, user_groups: List[str]):
application_admin = config.get("application_admin")
if application_admin:
if user == application_admin or application_admin in user_groups:
return True
if is_in_group(
user,
user_groups,
[
*config.get("groups.can_admin", []),
*config.get("dynamic_config.groups.can_admin", []),
],
):
return True
return False
async def can_move_back_to_pending(current_user, request, groups):
# Don't allow returning requests to pending state if more than a day has passed since the last update
if request.get("last_updated", 0) < int(time.time()) - 86400:
return False
# Allow admins to return requests back to pending state
if can_admin_all(current_user, groups):
return True
return False | null |
162,137 | import asyncio
import time
from typing import Any
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.exceptions.exceptions import NoMatchingRequest
from consoleme.lib.auth import can_admin_all
from consoleme.lib.cache import store_json_results_in_redis_and_s3
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
auth = get_plugin_by_name(config.get("plugins.auth", "default_auth"))()
class NoMatchingRequest(BaseException):
"""Cannot find a matching request"""
def __init__(self, msg=""):
stats.count("NoMatchingRequest")
super().__init__(msg)
class UserDynamoHandler(BaseDynamoHandler):
def __init__(self, user_email: Optional[str] = None) -> None:
try:
self.requests_table = self._get_dynamo_table(
config.get("aws.requests_dynamo_table", "consoleme_requests_global")
)
self.users_table = self._get_dynamo_table(
config.get("aws.users_dynamo_table", "consoleme_users_global")
)
self.group_log = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_audit_global")
)
self.dynamic_config = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_config_global")
)
self.policy_requests_table = self._get_dynamo_table(
config.get(
"aws.policy_requests_dynamo_table", "consoleme_policy_requests"
)
)
self.api_health_roles_table = self._get_dynamo_table(
config.get(
"aws.api_health_apps_table_dynamo_table",
"consoleme_api_health_apps",
)
)
self.resource_cache_table = self._get_dynamo_table(
config.get(
"aws.resource_cache_dynamo_table", "consoleme_resource_cache"
)
)
self.cloudtrail_table = self._get_dynamo_table(
config.get("aws.cloudtrail_table", "consoleme_cloudtrail")
)
self.notifications_table = self._get_dynamo_table(
config.get("aws.notifications_table", "consoleme_notifications")
)
if user_email:
self.user = self.get_or_create_user(user_email)
self.affected_user = self.user
except Exception:
if config.get("development"):
log.error(
"Unable to connect to Dynamo. Trying to set user via development configuration",
exc_info=True,
)
self.user = self.sign_request(
{
"last_updated": int(time.time()),
"username": user_email,
"requests": [],
}
)
self.affected_user = self.user
else:
log.error("Unable to get Dynamo table.", exc_info=True)
raise
def write_resource_cache_data(self, data):
self.parallel_write_table(
self.resource_cache_table, data, ["resourceId", "resourceType"]
)
async def get_dynamic_config_yaml(self) -> bytes:
"""Retrieve dynamic configuration yaml."""
return await sync_to_async(self.get_dynamic_config_yaml_sync)()
def get_dynamic_config_yaml_sync(self) -> bytes:
"""Retrieve dynamic configuration yaml synchronously"""
c = b""
try:
current_config = self.dynamic_config.get_item(Key={"id": "master"})
if not current_config:
return c
compressed_config = current_config.get("Item", {}).get("config", "")
if not compressed_config:
return c
c = zlib.decompress(compressed_config.value)
except Exception: # noqa
sentry_sdk.capture_exception()
return c
def get_dynamic_config_dict(self) -> dict:
"""Retrieve dynamic configuration dictionary that can be merged with primary configuration dictionary."""
try:
loop = asyncio.get_running_loop()
except RuntimeError: # if cleanup: 'RuntimeError: There is no current event loop..'
loop = None
if loop and loop.is_running():
current_config_yaml = self.get_dynamic_config_yaml_sync()
else:
current_config_yaml = asyncio.run(self.get_dynamic_config_yaml())
config_d = yaml.safe_load(current_config_yaml)
return config_d
async def get_all_api_health_alerts(self) -> list:
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
response: dict = self.api_health_roles_table.scan()
items = response.get("Items", [])
while "LastEvaluatedKey" in response:
response = self.api_health_roles_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_api_health_alert_app(self, app_name) -> dict:
resp: dict = await sync_to_async(self.api_health_roles_table.get_item)(
Key={"appName": app_name}
)
return resp.get("Item", None)
async def write_api_health_alert_info(self, request, user_email: str):
"""
Writes a health alert role to the appropriate DynamoDB table
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
request["app_create_time"]: int = int(time.time())
request["updated_by"]: str = user_email
request["last_updated"]: int = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception:
error = {
"message": "Unable to add new api_health info request",
"request": request,
"function": function,
}
log.error(error, exc_info=True)
raise
return request
async def update_api_health_alert_info(
self, request: dict, user_email=None, update_by=None, last_updated=None
):
"""
Update api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
if update_by:
request["updated_by"] = update_by
else:
request["updated_by"] = user_email
if last_updated:
request["last_updated"] = last_updated
else:
request["last_updated"] = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error: dict = {
"function": function,
"message": "Unable to update api_health_info request",
"request": request,
"error": str(e),
}
log.error(error, exc_info=True)
raise Exception(error)
return request
async def delete_api_health_alert_info(self, app: str) -> None:
"""
Delete api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
try:
await sync_to_async(self.api_health_roles_table.delete_item)(
Key={"appName": app}
)
except Exception:
error: dict = {
"function": function,
"message": "Unable to delete api_health info",
"app": app,
}
log.error(error, exc_info=True)
raise
async def write_policy_request(
self,
user_email: str,
justification: str,
arn: str,
policy_name: str,
policy_changes: dict,
resources: List[str],
resource_policies: List[Dict],
request_time: int = None,
request_uuid=None,
policy_status="pending",
cross_account_request: bool = False,
dry_run: bool = False,
):
"""
Writes a policy request to the appropriate DynamoDB table
dry_run will create the request format, but won't actually write it
Sample run:
write_policy_request(policy_changes)
"""
request_time = request_time or int(time.time())
# Craft the new request json
timestamp = int(time.time())
request_id = request_uuid or str(uuid.uuid4())
new_request = {
"request_id": request_id,
"arn": arn,
"status": policy_status,
"justification": justification,
"request_time": request_time,
"updated_by": user_email,
"last_updated": timestamp,
"username": user_email,
"policy_name": policy_name,
"policy_changes": json.dumps(policy_changes),
"resources": resources,
"resource_policies": resource_policies,
"cross_account_request": cross_account_request,
}
if not dry_run:
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
except Exception as e:
error = f"Unable to add new policy request: {new_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
else:
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"request": new_request,
"message": "Dry run, skipping adding request to dynamo",
}
log.debug(log_data)
return new_request
async def write_policy_request_v2(self, extended_request: ExtendedRequestModel):
"""
Writes a policy request v2 to the appropriate DynamoDB table
Sample run:
write_policy_request_v2(request)
"""
new_request = {
"request_id": extended_request.id,
"principal": extended_request.principal.dict(),
"status": extended_request.request_status.value,
"justification": extended_request.justification,
"request_time": extended_request.timestamp,
"last_updated": int(time.time()),
"version": "2",
"extended_request": json.loads(extended_request.json()),
"username": extended_request.requester_email,
}
if extended_request.principal.principal_type == "AwsResource":
new_request["arn"] = extended_request.principal.principal_arn
elif extended_request.principal.principal_type == "HoneybeeAwsResourceTemplate":
repository_name = extended_request.principal.repository_name
resource_identifier = extended_request.principal.resource_identifier
new_request["arn"] = f"{repository_name}-{resource_identifier}"
else:
raise Exception("Invalid principal type")
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Writing policy request v2 to Dynamo",
"request": new_request,
}
log.debug(log_data)
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
log_data[
"message"
] = "Successfully finished writing policy request v2 to Dynamo"
log.debug(log_data)
except Exception as e:
log_data["message"] = "Error occurred writing policy request v2 to Dynamo"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
error = f"{log_data['message']}: {str(e)}"
raise Exception(error)
return new_request
async def update_policy_request(self, updated_request):
"""
Update a policy request by request ID
Sample run:
update_policy_request(policy_changes)
"""
updated_request["last_updated"] = int(time.time())
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(updated_request)
)
except Exception as e:
error = f"Unable to add updated policy request: {updated_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return updated_request
async def get_policy_requests(self, arn=None, request_id=None):
"""Reads a policy request from the appropriate DynamoDB table"""
if not arn and not request_id:
raise Exception("Must pass in ARN or policy request ID")
if request_id:
requests = self.policy_requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
else:
requests = self.policy_requests_table.query(
KeyConditionExpression="arn = :arn",
ExpressionAttributeValues={":arn": arn},
)
matching_requests = []
if requests["Items"]:
items = self._data_from_dynamo_replace(requests["Items"])
items = await self.convert_policy_requests_to_v3(items)
matching_requests.extend(items)
return matching_requests
async def convert_policy_requests_to_v3(self, requests):
# Remove this function and calls to this function after a grace period of
changed = False
for request in requests:
if not request.get("version") in ["2"]:
continue
if request.get("extended_request") and not request.get("principal"):
principal_arn = request.pop("arn")
request["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request["extended_request"]["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request.pop("arn", None)
changes = (
request.get("extended_request", {})
.get("changes", {})
.get("changes", [])
)
for change in changes:
if not change.get("principal_arn"):
continue
if not change.get("version") in ["2.0", "2", 2]:
continue
change["principal"] = {
"principal_arn": change["principal_arn"],
"principal_type": "AwsResource",
}
change.pop("principal_arn")
change["version"] = "3.0"
changed = True
if changed:
self.parallel_write_table(self.policy_requests_table, requests)
return requests
async def get_all_policy_requests(
self, status: Optional[str] = "pending"
) -> List[Dict[str, Union[int, List[str], str]]]:
"""Return all policy requests. If a status is specified, only requests with the specified status will be
returned.
:param status:
:return:
"""
requests = await sync_to_async(self.parallel_scan_table)(
self.policy_requests_table
)
requests = await self.convert_policy_requests_to_v3(requests)
return_value = []
if status:
for item in requests:
if status and item["status"] == status:
return_value.append(item)
else:
return_value = requests
return return_value
async def update_dynamic_config(self, config: str, updated_by: str) -> None:
"""Take a YAML config and writes to DDB (The reason we use YAML instead of JSON is to preserve comments)."""
# Validate that config loads as yaml, raises exception if not
yaml.safe_load(config)
stats.count("update_dynamic_config", tags={"updated_by": updated_by})
current_config_entry = self.dynamic_config.get_item(Key={"id": "master"})
if current_config_entry.get("Item"):
old_config = {
"id": current_config_entry["Item"]["updated_at"],
"updated_by": current_config_entry["Item"]["updated_by"],
"config": current_config_entry["Item"]["config"],
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(old_config))
new_config = {
"id": "master",
"config": zlib.compress(config.encode()),
"updated_by": updated_by,
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(new_config))
def validate_signature(self, items):
signature = items.pop("signature")
if isinstance(signature, Binary):
signature = signature.value
json_request = json.dumps(items, sort_keys=True)
if not crypto.verify(json_request, signature):
raise Exception(f"Invalid signature for request: {json_request}")
def sign_request(
self, user_entry: Dict[str, Union[Decimal, List[str], Binary, str]]
) -> Dict[str, Union[Decimal, List[str], str, bytes]]:
"""
Sign the request and returned request with signature
:param user_entry:
:return:
"""
# Remove old signature if it exists
user_entry.pop("signature", None)
user_entry = self._data_from_dynamo_replace(user_entry)
json_request = json.dumps(user_entry, sort_keys=True, use_decimal=True)
sig = crypto.sign(json_request)
user_entry["signature"] = sig
return user_entry
async def authenticate_user(self, login_attempt) -> AuthenticationResponse:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {
"function": function,
"user_email": login_attempt.username,
"after_redirect_uri": login_attempt.after_redirect_uri,
}
user_entry = await sync_to_async(self.users_table.query)(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": login_attempt.username},
)
user = None
generic_error = ["User doesn't exist, or password is incorrect. "]
if user_entry and "Items" in user_entry and len(user_entry["Items"]) == 1:
user = user_entry["Items"][0]
if not user:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = f"Unable to find user: {login_attempt.username}"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
if not user.get("password"):
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "User exists, but doesn't have a password stored in the database"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
password_hash_matches = bcrypt.checkpw(
login_attempt.password.encode("utf-8"), user["password"].value
)
if not password_hash_matches:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "Password does not match. "
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
return AuthenticationResponse(
authenticated=True, username=user["username"], groups=user["groups"]
)
def create_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
timestamp = int(time.time())
unsigned_user_entry = {
"created": timestamp,
"last_updated": timestamp,
"username": user_email,
"requests": [],
"groups": groups,
}
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
unsigned_user_entry["password"] = bcrypt.hashpw(pw, salt)
user_entry = self.sign_request(unsigned_user_entry)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def update_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
user_ddb = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
user = None
if user_ddb and "Items" in user_ddb and len(user_ddb["Items"]) == 1:
user = user_ddb["Items"][0]
if not user:
raise DataNotRetrievable(f"Unable to find user: {user_email}")
timestamp = int(time.time())
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
user["password"] = bcrypt.hashpw(pw, salt)
if groups:
user["groups"] = groups
user["last_updated"] = timestamp
user_entry = self.sign_request(user)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def delete_user(self, user_email):
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user_entry = {"username": user_email}
try:
self.users_table.delete_item(Key=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to delete user: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
async def get_user(
self, user_email: str
) -> Optional[Dict[str, Union[Decimal, List[str], Binary, str]]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
if user and "Items" in user and len(user["Items"]) == 1:
return user["Items"][0]
return None
def get_or_create_user(
self, user_email: str
) -> Dict[str, Union[Decimal, List[str], Binary, str]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
items = []
if user and "Items" in user:
items = user["Items"]
if not items:
return self.create_user(user_email)
return items[0]
def resolve_request_ids(
self, request_ids: List[str]
) -> List[Dict[str, Union[int, str]]]:
requests = []
for request_id in request_ids:
request = self.requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
if request["Items"]:
items = self._data_from_dynamo_replace(request["Items"])
requests.append(items[0])
else:
raise NoMatchingRequest(
f"No matching request for request_id: {request_id}"
)
return requests
def add_request_id_to_user(
self,
affected_user: Dict[str, Union[Decimal, List[str], Binary, str]],
request_id: str,
) -> None:
affected_user["requests"].append(request_id)
self.users_table.put_item(
Item=self._data_to_dynamo_replace(self.sign_request(affected_user))
)
def add_request(
self,
user_email: str,
group: str,
justification: str,
request_time: None = None,
status: str = "pending",
updated_by: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Add a user request to the dynamo table
Sample run:
add_request("user@example.com", "engtest", "because")
:param user_email: Email address of user
:param group: Name of group user is requesting access to
:param justification:
:param request_id:
:param request_time:
:param status:
:param updated_by:
:return:
"""
"""
Request:
group
justification
role
request_time
approval_time
updated_by
approval_reason
status
user@example.com:
requests: []
last_updated: 1
signature: xxxx
#pending: []
#expired: []
# How to expire requests if soemeone maliciously deletes content
# How to query for all approved requests for group X
# What if we want to send email saying your request is expiring in 7 days? Maybe celery to query all
# What about concept of request ID? Maybe base64 encoded thing?
# Need an all-in-one page to show all pending requests, all expired/approved requests
"""
request_time = request_time or int(time.time())
stats.count("new_group_request", tags={"user": user_email, "group": group})
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
# Get current user. Create if they do not already exist
# self.user = self.get_or_create_user(user_email)
# Get current user requests, which will validate existing signature
# existing_request_ids = self.user["requests"]
# existing_requests = self.resolve_request_ids(existing_request_ids)
existing_pending_requests_for_group = self.get_requests_by_user(
user_email, group, status="pending"
)
# Craft the new request json
timestamp = int(time.time())
request_id = str(uuid.uuid4())
new_request = {
"request_id": request_id,
"group": group,
"status": status,
"justification": justification,
"request_time": request_time,
"updated_by": updated_by,
"last_updated": timestamp,
"username": user_email,
}
# See if user already has an active or pending request for the group
if existing_pending_requests_for_group:
for request in existing_pending_requests_for_group:
raise PendingRequestAlreadyExists(
f"Pending request for group: {group} already exists: {request}"
)
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(new_request))
except Exception as e:
error = {
"error": f"Unable to add user request: {str(e)}",
"request": new_request,
}
log.error(error, exc_info=True)
raise Exception(error)
self.add_request_id_to_user(self.affected_user, request_id)
return new_request
async def get_all_requests(self, status=None):
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
items = await sync_to_async(self.parallel_scan_table)(self.requests_table)
return_value = []
if status:
for item in items:
new_json = []
for j in item["json"]:
if j["status"] == status:
new_json.append(j)
item["json"] = new_json
if new_json:
return_value.append(item)
else:
return_value = items
return return_value
def get_requests_by_user(
self,
user_email: str,
group: str = None,
status: str = None,
use_cache: bool = False,
) -> dict:
"""Get requests by user. Group and status can also be specified to filter results.
:param user_email:
:param group:
:param status:
:return:
"""
red_key = f"USER_REQUESTS_{user_email}-{group}-{status}"
if use_cache:
requests_to_return = red.get(red_key)
if requests_to_return:
return json.loads(requests_to_return)
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
existing_request_ids = self.affected_user["requests"]
existing_requests = self.resolve_request_ids(existing_request_ids)
requests_to_return = []
if existing_requests:
for request in existing_requests:
if group and request["group"] != group:
continue
if status and request["status"] != status:
continue
requests_to_return.append(request)
if use_cache:
red.setex(red_key, 120, json.dumps(requests_to_return))
return requests_to_return
def change_request_status(
self, user_email, group, new_status, updated_by=None, reviewer_comments=None
):
"""
:param user:
:param status:
:param request_id:
:return:
"""
stats.count(
"update_group_request",
tags={
"user": user_email,
"group": group,
"new_status": new_status,
"updated_by": updated_by,
},
)
modified_request = None
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
timestamp = int(time.time())
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
existing_requests = self.get_requests_by_user(user_email)
if existing_requests:
updated = False
for request in existing_requests:
if request["group"] == group:
request["updated_by"] = updated_by
request["status"] = new_status
request["last_updated"] = timestamp
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error = f"Unable to add user request: {request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
updated = True
if not updated:
raise NoExistingRequest(
f"Unable to find existing request for user: {user_email} and group: {group}."
)
else:
raise NoExistingRequest(
f"Unable to find existing requests for user: {user_email}"
)
return modified_request
def change_request_status_by_id(
self,
request_id: str,
new_status: str,
updated_by: Optional[str] = None,
reviewer_comments: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Change request status by ID
:param request_id:
:param new_status:
:param updated_by:
:return: new requests
"""
modified_request = None
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
requests = self.resolve_request_ids([request_id])
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
for request in requests:
request["status"] = new_status
request["updated_by"] = updated_by
request["last_updated"] = int(time.time())
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(request))
except Exception as e:
error = f"Unable to add user request: {request} : {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return modified_request
def get_all_policies(self):
"""Return all policies."""
response = self.policies_table.scan()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = self.policies_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def create_group_log_entry(
self,
group: str,
username: str,
updated_by: str,
action: str,
updated_at: None = None,
extra: None = None,
) -> None:
updated_at = updated_at or int(time.time())
log_id = str(uuid.uuid4())
log_entry = {
"uuid": log_id,
"group": group,
"username": username,
"updated_by": updated_by,
"updated_at": updated_at,
"action": action,
"extra": extra,
}
self.group_log.put_item(Item=self._data_to_dynamo_replace(log_entry))
async def get_all_audit_logs(self) -> List[Dict[str, Union[int, None, str]]]:
response = await sync_to_async(self.group_log.scan)()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = await sync_to_async(self.group_log.scan)(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_all_pending_requests(self):
return await self.get_all_requests(status="pending")
def batch_write_cloudtrail_events(self, items):
with self.cloudtrail_table.batch_writer(
overwrite_by_pkeys=["arn", "request_id"]
) as batch:
for item in items:
batch.put_item(Item=self._data_to_dynamo_replace(item))
return True
async def get_top_cloudtrail_errors_by_arn(self, arn, n=5):
response: dict = await sync_to_async(self.cloudtrail_table.query)(
KeyConditionExpression=Key("arn").eq(arn)
)
items = response.get("Items", [])
aggregated_errors = defaultdict(dict)
for item in items:
if int(item["ttl"]) < int(time.time()):
continue
event_call = item["event_call"]
event_resource = item.get("resource", "")
event_string = f"{event_call}|||{event_resource}"
if not aggregated_errors.get(event_string):
aggregated_errors[event_string]["count"] = 0
aggregated_errors[event_string]["generated_policy"] = item.get(
"generated_policy", {}
)
aggregated_errors[event_string]["count"] += 1
top_n_errors = {
k: v
for k, v in sorted(
aggregated_errors.items(),
key=lambda item: item[1]["count"],
reverse=True,
)[:n]
}
return top_n_errors
def count_arn_errors(self, error_count, items):
for item in items:
arn = item.get("arn")
if not error_count.get(arn):
error_count[arn] = 0
error_count[arn] += item.get("count", 1)
return error_count
The provided code snippet includes necessary dependencies for implementing the `get_request_by_id` function. Write a Python function `async def get_request_by_id(user, request_id)` to solve the following problem:
Get request matching id and add the group's secondary approvers
Here is the function:
async def get_request_by_id(user, request_id):
"""Get request matching id and add the group's secondary approvers"""
dynamo_handler = UserDynamoHandler(user)
try:
requests = await sync_to_async(dynamo_handler.resolve_request_ids)([request_id])
for req in requests:
group = req.get("group")
secondary_approvers = await auth.get_secondary_approvers(group)
req["secondary_approvers"] = ",".join(secondary_approvers)
except NoMatchingRequest:
requests = []
return next(iter(requests), None) | Get request matching id and add the group's secondary approvers |
162,138 | import asyncio
import time
from typing import Any
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.exceptions.exceptions import NoMatchingRequest
from consoleme.lib.auth import can_admin_all
from consoleme.lib.cache import store_json_results_in_redis_and_s3
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
async def get_all_pending_requests_api(user):
"""Get all pending requests and add the group's secondary approvers"""
dynamo_handler = UserDynamoHandler(user)
all_requests = await dynamo_handler.get_all_requests()
pending_requests = []
# Get secondary approvers for groups asynchronously, otherwise this can be a bottleneck
tasks = []
for req in all_requests:
if req.get("status") == "pending":
group = req.get("group")
task = asyncio.ensure_future(
auth.get_secondary_approvers(group, return_dict=True)
)
tasks.append(task)
pending_requests.append(req)
secondary_approver_responses = asyncio.gather(*tasks)
secondary_approver_mapping = {}
for mapping in await secondary_approver_responses:
for group, secondary_approvers in mapping.items():
secondary_approver_mapping[group] = ",".join(secondary_approvers)
for req in pending_requests:
req["secondary_approvers"] = secondary_approver_mapping.get(
req.get("group"), ""
)
return pending_requests
async def get_app_pending_requests_policies(user):
dynamo_handler = UserDynamoHandler(user)
all_policy_requests = await dynamo_handler.get_all_policy_requests(status="pending")
if not all_policy_requests:
all_policy_requests = []
return all_policy_requests
The provided code snippet includes necessary dependencies for implementing the `get_all_pending_requests` function. Write a Python function `async def get_all_pending_requests(user, groups)` to solve the following problem:
Get all pending requests sorted into three buckets: - all_pending_requests - my_pending_requests - pending_requests_waiting_my_approval Note: This will get pending requests for both POLICIES and GROUPS. If you are re-writing this feature, you may only want one or the other.
Here is the function:
async def get_all_pending_requests(user, groups):
"""Get all pending requests sorted into three buckets:
- all_pending_requests
- my_pending_requests
- pending_requests_waiting_my_approval
Note: This will get pending requests for both POLICIES and GROUPS. If you are re-writing this feature, you may only
want one or the other.
"""
all_requests = await get_all_pending_requests_api(user)
pending_requests = {
"all_pending_requests": all_requests,
"my_pending_requests": [],
"pending_requests_waiting_my_approval": [],
}
for req in all_requests:
req["secondary_approvers"] = req.get("secondary_approvers").split(",")
if user == req.get("username", ""):
pending_requests["my_pending_requests"].append(req)
for sa in req["secondary_approvers"]:
if sa in groups or sa == user:
pending_requests["pending_requests_waiting_my_approval"].append(req)
break
all_policy_requests = await get_app_pending_requests_policies(user)
pending_requests["all_pending_requests"].extend(all_policy_requests)
for req in all_policy_requests:
req["secondary_approvers"] = config.get("groups.can_admin_policies")
for sa in req["secondary_approvers"]:
if sa in groups or sa == user:
pending_requests["pending_requests_waiting_my_approval"].append(req)
break
if user == req.get("username", ""):
pending_requests["my_pending_requests"].append(req)
return pending_requests | Get all pending requests sorted into three buckets: - all_pending_requests - my_pending_requests - pending_requests_waiting_my_approval Note: This will get pending requests for both POLICIES and GROUPS. If you are re-writing this feature, you may only want one or the other. |
162,139 | import asyncio
import time
from typing import Any
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.exceptions.exceptions import NoMatchingRequest
from consoleme.lib.auth import can_admin_all
from consoleme.lib.cache import store_json_results_in_redis_and_s3
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
auth = get_plugin_by_name(config.get("plugins.auth", "default_auth"))()
class UserDynamoHandler(BaseDynamoHandler):
def __init__(self, user_email: Optional[str] = None) -> None:
try:
self.requests_table = self._get_dynamo_table(
config.get("aws.requests_dynamo_table", "consoleme_requests_global")
)
self.users_table = self._get_dynamo_table(
config.get("aws.users_dynamo_table", "consoleme_users_global")
)
self.group_log = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_audit_global")
)
self.dynamic_config = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_config_global")
)
self.policy_requests_table = self._get_dynamo_table(
config.get(
"aws.policy_requests_dynamo_table", "consoleme_policy_requests"
)
)
self.api_health_roles_table = self._get_dynamo_table(
config.get(
"aws.api_health_apps_table_dynamo_table",
"consoleme_api_health_apps",
)
)
self.resource_cache_table = self._get_dynamo_table(
config.get(
"aws.resource_cache_dynamo_table", "consoleme_resource_cache"
)
)
self.cloudtrail_table = self._get_dynamo_table(
config.get("aws.cloudtrail_table", "consoleme_cloudtrail")
)
self.notifications_table = self._get_dynamo_table(
config.get("aws.notifications_table", "consoleme_notifications")
)
if user_email:
self.user = self.get_or_create_user(user_email)
self.affected_user = self.user
except Exception:
if config.get("development"):
log.error(
"Unable to connect to Dynamo. Trying to set user via development configuration",
exc_info=True,
)
self.user = self.sign_request(
{
"last_updated": int(time.time()),
"username": user_email,
"requests": [],
}
)
self.affected_user = self.user
else:
log.error("Unable to get Dynamo table.", exc_info=True)
raise
def write_resource_cache_data(self, data):
self.parallel_write_table(
self.resource_cache_table, data, ["resourceId", "resourceType"]
)
async def get_dynamic_config_yaml(self) -> bytes:
"""Retrieve dynamic configuration yaml."""
return await sync_to_async(self.get_dynamic_config_yaml_sync)()
def get_dynamic_config_yaml_sync(self) -> bytes:
"""Retrieve dynamic configuration yaml synchronously"""
c = b""
try:
current_config = self.dynamic_config.get_item(Key={"id": "master"})
if not current_config:
return c
compressed_config = current_config.get("Item", {}).get("config", "")
if not compressed_config:
return c
c = zlib.decompress(compressed_config.value)
except Exception: # noqa
sentry_sdk.capture_exception()
return c
def get_dynamic_config_dict(self) -> dict:
"""Retrieve dynamic configuration dictionary that can be merged with primary configuration dictionary."""
try:
loop = asyncio.get_running_loop()
except RuntimeError: # if cleanup: 'RuntimeError: There is no current event loop..'
loop = None
if loop and loop.is_running():
current_config_yaml = self.get_dynamic_config_yaml_sync()
else:
current_config_yaml = asyncio.run(self.get_dynamic_config_yaml())
config_d = yaml.safe_load(current_config_yaml)
return config_d
async def get_all_api_health_alerts(self) -> list:
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
response: dict = self.api_health_roles_table.scan()
items = response.get("Items", [])
while "LastEvaluatedKey" in response:
response = self.api_health_roles_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_api_health_alert_app(self, app_name) -> dict:
resp: dict = await sync_to_async(self.api_health_roles_table.get_item)(
Key={"appName": app_name}
)
return resp.get("Item", None)
async def write_api_health_alert_info(self, request, user_email: str):
"""
Writes a health alert role to the appropriate DynamoDB table
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
request["app_create_time"]: int = int(time.time())
request["updated_by"]: str = user_email
request["last_updated"]: int = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception:
error = {
"message": "Unable to add new api_health info request",
"request": request,
"function": function,
}
log.error(error, exc_info=True)
raise
return request
async def update_api_health_alert_info(
self, request: dict, user_email=None, update_by=None, last_updated=None
):
"""
Update api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
if update_by:
request["updated_by"] = update_by
else:
request["updated_by"] = user_email
if last_updated:
request["last_updated"] = last_updated
else:
request["last_updated"] = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error: dict = {
"function": function,
"message": "Unable to update api_health_info request",
"request": request,
"error": str(e),
}
log.error(error, exc_info=True)
raise Exception(error)
return request
async def delete_api_health_alert_info(self, app: str) -> None:
"""
Delete api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
try:
await sync_to_async(self.api_health_roles_table.delete_item)(
Key={"appName": app}
)
except Exception:
error: dict = {
"function": function,
"message": "Unable to delete api_health info",
"app": app,
}
log.error(error, exc_info=True)
raise
async def write_policy_request(
self,
user_email: str,
justification: str,
arn: str,
policy_name: str,
policy_changes: dict,
resources: List[str],
resource_policies: List[Dict],
request_time: int = None,
request_uuid=None,
policy_status="pending",
cross_account_request: bool = False,
dry_run: bool = False,
):
"""
Writes a policy request to the appropriate DynamoDB table
dry_run will create the request format, but won't actually write it
Sample run:
write_policy_request(policy_changes)
"""
request_time = request_time or int(time.time())
# Craft the new request json
timestamp = int(time.time())
request_id = request_uuid or str(uuid.uuid4())
new_request = {
"request_id": request_id,
"arn": arn,
"status": policy_status,
"justification": justification,
"request_time": request_time,
"updated_by": user_email,
"last_updated": timestamp,
"username": user_email,
"policy_name": policy_name,
"policy_changes": json.dumps(policy_changes),
"resources": resources,
"resource_policies": resource_policies,
"cross_account_request": cross_account_request,
}
if not dry_run:
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
except Exception as e:
error = f"Unable to add new policy request: {new_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
else:
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"request": new_request,
"message": "Dry run, skipping adding request to dynamo",
}
log.debug(log_data)
return new_request
async def write_policy_request_v2(self, extended_request: ExtendedRequestModel):
"""
Writes a policy request v2 to the appropriate DynamoDB table
Sample run:
write_policy_request_v2(request)
"""
new_request = {
"request_id": extended_request.id,
"principal": extended_request.principal.dict(),
"status": extended_request.request_status.value,
"justification": extended_request.justification,
"request_time": extended_request.timestamp,
"last_updated": int(time.time()),
"version": "2",
"extended_request": json.loads(extended_request.json()),
"username": extended_request.requester_email,
}
if extended_request.principal.principal_type == "AwsResource":
new_request["arn"] = extended_request.principal.principal_arn
elif extended_request.principal.principal_type == "HoneybeeAwsResourceTemplate":
repository_name = extended_request.principal.repository_name
resource_identifier = extended_request.principal.resource_identifier
new_request["arn"] = f"{repository_name}-{resource_identifier}"
else:
raise Exception("Invalid principal type")
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Writing policy request v2 to Dynamo",
"request": new_request,
}
log.debug(log_data)
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
log_data[
"message"
] = "Successfully finished writing policy request v2 to Dynamo"
log.debug(log_data)
except Exception as e:
log_data["message"] = "Error occurred writing policy request v2 to Dynamo"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
error = f"{log_data['message']}: {str(e)}"
raise Exception(error)
return new_request
async def update_policy_request(self, updated_request):
"""
Update a policy request by request ID
Sample run:
update_policy_request(policy_changes)
"""
updated_request["last_updated"] = int(time.time())
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(updated_request)
)
except Exception as e:
error = f"Unable to add updated policy request: {updated_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return updated_request
async def get_policy_requests(self, arn=None, request_id=None):
"""Reads a policy request from the appropriate DynamoDB table"""
if not arn and not request_id:
raise Exception("Must pass in ARN or policy request ID")
if request_id:
requests = self.policy_requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
else:
requests = self.policy_requests_table.query(
KeyConditionExpression="arn = :arn",
ExpressionAttributeValues={":arn": arn},
)
matching_requests = []
if requests["Items"]:
items = self._data_from_dynamo_replace(requests["Items"])
items = await self.convert_policy_requests_to_v3(items)
matching_requests.extend(items)
return matching_requests
async def convert_policy_requests_to_v3(self, requests):
# Remove this function and calls to this function after a grace period of
changed = False
for request in requests:
if not request.get("version") in ["2"]:
continue
if request.get("extended_request") and not request.get("principal"):
principal_arn = request.pop("arn")
request["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request["extended_request"]["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request.pop("arn", None)
changes = (
request.get("extended_request", {})
.get("changes", {})
.get("changes", [])
)
for change in changes:
if not change.get("principal_arn"):
continue
if not change.get("version") in ["2.0", "2", 2]:
continue
change["principal"] = {
"principal_arn": change["principal_arn"],
"principal_type": "AwsResource",
}
change.pop("principal_arn")
change["version"] = "3.0"
changed = True
if changed:
self.parallel_write_table(self.policy_requests_table, requests)
return requests
async def get_all_policy_requests(
self, status: Optional[str] = "pending"
) -> List[Dict[str, Union[int, List[str], str]]]:
"""Return all policy requests. If a status is specified, only requests with the specified status will be
returned.
:param status:
:return:
"""
requests = await sync_to_async(self.parallel_scan_table)(
self.policy_requests_table
)
requests = await self.convert_policy_requests_to_v3(requests)
return_value = []
if status:
for item in requests:
if status and item["status"] == status:
return_value.append(item)
else:
return_value = requests
return return_value
async def update_dynamic_config(self, config: str, updated_by: str) -> None:
"""Take a YAML config and writes to DDB (The reason we use YAML instead of JSON is to preserve comments)."""
# Validate that config loads as yaml, raises exception if not
yaml.safe_load(config)
stats.count("update_dynamic_config", tags={"updated_by": updated_by})
current_config_entry = self.dynamic_config.get_item(Key={"id": "master"})
if current_config_entry.get("Item"):
old_config = {
"id": current_config_entry["Item"]["updated_at"],
"updated_by": current_config_entry["Item"]["updated_by"],
"config": current_config_entry["Item"]["config"],
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(old_config))
new_config = {
"id": "master",
"config": zlib.compress(config.encode()),
"updated_by": updated_by,
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(new_config))
def validate_signature(self, items):
signature = items.pop("signature")
if isinstance(signature, Binary):
signature = signature.value
json_request = json.dumps(items, sort_keys=True)
if not crypto.verify(json_request, signature):
raise Exception(f"Invalid signature for request: {json_request}")
def sign_request(
self, user_entry: Dict[str, Union[Decimal, List[str], Binary, str]]
) -> Dict[str, Union[Decimal, List[str], str, bytes]]:
"""
Sign the request and returned request with signature
:param user_entry:
:return:
"""
# Remove old signature if it exists
user_entry.pop("signature", None)
user_entry = self._data_from_dynamo_replace(user_entry)
json_request = json.dumps(user_entry, sort_keys=True, use_decimal=True)
sig = crypto.sign(json_request)
user_entry["signature"] = sig
return user_entry
async def authenticate_user(self, login_attempt) -> AuthenticationResponse:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {
"function": function,
"user_email": login_attempt.username,
"after_redirect_uri": login_attempt.after_redirect_uri,
}
user_entry = await sync_to_async(self.users_table.query)(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": login_attempt.username},
)
user = None
generic_error = ["User doesn't exist, or password is incorrect. "]
if user_entry and "Items" in user_entry and len(user_entry["Items"]) == 1:
user = user_entry["Items"][0]
if not user:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = f"Unable to find user: {login_attempt.username}"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
if not user.get("password"):
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "User exists, but doesn't have a password stored in the database"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
password_hash_matches = bcrypt.checkpw(
login_attempt.password.encode("utf-8"), user["password"].value
)
if not password_hash_matches:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "Password does not match. "
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
return AuthenticationResponse(
authenticated=True, username=user["username"], groups=user["groups"]
)
def create_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
timestamp = int(time.time())
unsigned_user_entry = {
"created": timestamp,
"last_updated": timestamp,
"username": user_email,
"requests": [],
"groups": groups,
}
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
unsigned_user_entry["password"] = bcrypt.hashpw(pw, salt)
user_entry = self.sign_request(unsigned_user_entry)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def update_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
user_ddb = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
user = None
if user_ddb and "Items" in user_ddb and len(user_ddb["Items"]) == 1:
user = user_ddb["Items"][0]
if not user:
raise DataNotRetrievable(f"Unable to find user: {user_email}")
timestamp = int(time.time())
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
user["password"] = bcrypt.hashpw(pw, salt)
if groups:
user["groups"] = groups
user["last_updated"] = timestamp
user_entry = self.sign_request(user)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def delete_user(self, user_email):
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user_entry = {"username": user_email}
try:
self.users_table.delete_item(Key=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to delete user: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
async def get_user(
self, user_email: str
) -> Optional[Dict[str, Union[Decimal, List[str], Binary, str]]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
if user and "Items" in user and len(user["Items"]) == 1:
return user["Items"][0]
return None
def get_or_create_user(
self, user_email: str
) -> Dict[str, Union[Decimal, List[str], Binary, str]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
items = []
if user and "Items" in user:
items = user["Items"]
if not items:
return self.create_user(user_email)
return items[0]
def resolve_request_ids(
self, request_ids: List[str]
) -> List[Dict[str, Union[int, str]]]:
requests = []
for request_id in request_ids:
request = self.requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
if request["Items"]:
items = self._data_from_dynamo_replace(request["Items"])
requests.append(items[0])
else:
raise NoMatchingRequest(
f"No matching request for request_id: {request_id}"
)
return requests
def add_request_id_to_user(
self,
affected_user: Dict[str, Union[Decimal, List[str], Binary, str]],
request_id: str,
) -> None:
affected_user["requests"].append(request_id)
self.users_table.put_item(
Item=self._data_to_dynamo_replace(self.sign_request(affected_user))
)
def add_request(
self,
user_email: str,
group: str,
justification: str,
request_time: None = None,
status: str = "pending",
updated_by: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Add a user request to the dynamo table
Sample run:
add_request("user@example.com", "engtest", "because")
:param user_email: Email address of user
:param group: Name of group user is requesting access to
:param justification:
:param request_id:
:param request_time:
:param status:
:param updated_by:
:return:
"""
"""
Request:
group
justification
role
request_time
approval_time
updated_by
approval_reason
status
user@example.com:
requests: []
last_updated: 1
signature: xxxx
#pending: []
#expired: []
# How to expire requests if soemeone maliciously deletes content
# How to query for all approved requests for group X
# What if we want to send email saying your request is expiring in 7 days? Maybe celery to query all
# What about concept of request ID? Maybe base64 encoded thing?
# Need an all-in-one page to show all pending requests, all expired/approved requests
"""
request_time = request_time or int(time.time())
stats.count("new_group_request", tags={"user": user_email, "group": group})
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
# Get current user. Create if they do not already exist
# self.user = self.get_or_create_user(user_email)
# Get current user requests, which will validate existing signature
# existing_request_ids = self.user["requests"]
# existing_requests = self.resolve_request_ids(existing_request_ids)
existing_pending_requests_for_group = self.get_requests_by_user(
user_email, group, status="pending"
)
# Craft the new request json
timestamp = int(time.time())
request_id = str(uuid.uuid4())
new_request = {
"request_id": request_id,
"group": group,
"status": status,
"justification": justification,
"request_time": request_time,
"updated_by": updated_by,
"last_updated": timestamp,
"username": user_email,
}
# See if user already has an active or pending request for the group
if existing_pending_requests_for_group:
for request in existing_pending_requests_for_group:
raise PendingRequestAlreadyExists(
f"Pending request for group: {group} already exists: {request}"
)
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(new_request))
except Exception as e:
error = {
"error": f"Unable to add user request: {str(e)}",
"request": new_request,
}
log.error(error, exc_info=True)
raise Exception(error)
self.add_request_id_to_user(self.affected_user, request_id)
return new_request
async def get_all_requests(self, status=None):
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
items = await sync_to_async(self.parallel_scan_table)(self.requests_table)
return_value = []
if status:
for item in items:
new_json = []
for j in item["json"]:
if j["status"] == status:
new_json.append(j)
item["json"] = new_json
if new_json:
return_value.append(item)
else:
return_value = items
return return_value
def get_requests_by_user(
self,
user_email: str,
group: str = None,
status: str = None,
use_cache: bool = False,
) -> dict:
"""Get requests by user. Group and status can also be specified to filter results.
:param user_email:
:param group:
:param status:
:return:
"""
red_key = f"USER_REQUESTS_{user_email}-{group}-{status}"
if use_cache:
requests_to_return = red.get(red_key)
if requests_to_return:
return json.loads(requests_to_return)
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
existing_request_ids = self.affected_user["requests"]
existing_requests = self.resolve_request_ids(existing_request_ids)
requests_to_return = []
if existing_requests:
for request in existing_requests:
if group and request["group"] != group:
continue
if status and request["status"] != status:
continue
requests_to_return.append(request)
if use_cache:
red.setex(red_key, 120, json.dumps(requests_to_return))
return requests_to_return
def change_request_status(
self, user_email, group, new_status, updated_by=None, reviewer_comments=None
):
"""
:param user:
:param status:
:param request_id:
:return:
"""
stats.count(
"update_group_request",
tags={
"user": user_email,
"group": group,
"new_status": new_status,
"updated_by": updated_by,
},
)
modified_request = None
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
timestamp = int(time.time())
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
existing_requests = self.get_requests_by_user(user_email)
if existing_requests:
updated = False
for request in existing_requests:
if request["group"] == group:
request["updated_by"] = updated_by
request["status"] = new_status
request["last_updated"] = timestamp
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error = f"Unable to add user request: {request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
updated = True
if not updated:
raise NoExistingRequest(
f"Unable to find existing request for user: {user_email} and group: {group}."
)
else:
raise NoExistingRequest(
f"Unable to find existing requests for user: {user_email}"
)
return modified_request
def change_request_status_by_id(
self,
request_id: str,
new_status: str,
updated_by: Optional[str] = None,
reviewer_comments: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Change request status by ID
:param request_id:
:param new_status:
:param updated_by:
:return: new requests
"""
modified_request = None
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
requests = self.resolve_request_ids([request_id])
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
for request in requests:
request["status"] = new_status
request["updated_by"] = updated_by
request["last_updated"] = int(time.time())
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(request))
except Exception as e:
error = f"Unable to add user request: {request} : {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return modified_request
def get_all_policies(self):
"""Return all policies."""
response = self.policies_table.scan()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = self.policies_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def create_group_log_entry(
self,
group: str,
username: str,
updated_by: str,
action: str,
updated_at: None = None,
extra: None = None,
) -> None:
updated_at = updated_at or int(time.time())
log_id = str(uuid.uuid4())
log_entry = {
"uuid": log_id,
"group": group,
"username": username,
"updated_by": updated_by,
"updated_at": updated_at,
"action": action,
"extra": extra,
}
self.group_log.put_item(Item=self._data_to_dynamo_replace(log_entry))
async def get_all_audit_logs(self) -> List[Dict[str, Union[int, None, str]]]:
response = await sync_to_async(self.group_log.scan)()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = await sync_to_async(self.group_log.scan)(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_all_pending_requests(self):
return await self.get_all_requests(status="pending")
def batch_write_cloudtrail_events(self, items):
with self.cloudtrail_table.batch_writer(
overwrite_by_pkeys=["arn", "request_id"]
) as batch:
for item in items:
batch.put_item(Item=self._data_to_dynamo_replace(item))
return True
async def get_top_cloudtrail_errors_by_arn(self, arn, n=5):
response: dict = await sync_to_async(self.cloudtrail_table.query)(
KeyConditionExpression=Key("arn").eq(arn)
)
items = response.get("Items", [])
aggregated_errors = defaultdict(dict)
for item in items:
if int(item["ttl"]) < int(time.time()):
continue
event_call = item["event_call"]
event_resource = item.get("resource", "")
event_string = f"{event_call}|||{event_resource}"
if not aggregated_errors.get(event_string):
aggregated_errors[event_string]["count"] = 0
aggregated_errors[event_string]["generated_policy"] = item.get(
"generated_policy", {}
)
aggregated_errors[event_string]["count"] += 1
top_n_errors = {
k: v
for k, v in sorted(
aggregated_errors.items(),
key=lambda item: item[1]["count"],
reverse=True,
)[:n]
}
return top_n_errors
def count_arn_errors(self, error_count, items):
for item in items:
arn = item.get("arn")
if not error_count.get(arn):
error_count[arn] = 0
error_count[arn] += item.get("count", 1)
return error_count
The provided code snippet includes necessary dependencies for implementing the `get_user_requests` function. Write a Python function `async def get_user_requests(user, groups)` to solve the following problem:
Get requests relevant to a user. A user sees requests they have made as well as requests where they are a secondary approver
Here is the function:
async def get_user_requests(user, groups):
"""Get requests relevant to a user.
A user sees requests they have made as well as requests where they are a
secondary approver
"""
dynamo_handler = UserDynamoHandler(user)
all_requests = await dynamo_handler.get_all_requests()
query = {
"domains": config.get("dynamo.get_user_requests.domains", []),
"filters": [
{
"field": "extendedattributes.attributeName",
"values": ["secondary_approvers"],
"operator": "EQUALS",
},
{
"field": "extendedattributes.attributeValue",
"values": groups + [user],
"operator": "EQUALS",
},
],
"size": 500,
}
approver_groups = await auth.query_cached_groups(query=query)
approver_groups = [g["name"] for g in approver_groups]
requests = []
for req in all_requests:
if user == req.get("username", ""):
requests.append(req)
continue
group = req.get("group")
if group is None:
continue
if group in approver_groups + [user]:
requests.append(req)
return requests | Get requests relevant to a user. A user sees requests they have made as well as requests where they are a secondary approver |
162,140 | import asyncio
import time
from typing import Any
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.exceptions.exceptions import NoMatchingRequest
from consoleme.lib.auth import can_admin_all
from consoleme.lib.cache import store_json_results_in_redis_and_s3
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
class UserDynamoHandler(BaseDynamoHandler):
def __init__(self, user_email: Optional[str] = None) -> None:
try:
self.requests_table = self._get_dynamo_table(
config.get("aws.requests_dynamo_table", "consoleme_requests_global")
)
self.users_table = self._get_dynamo_table(
config.get("aws.users_dynamo_table", "consoleme_users_global")
)
self.group_log = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_audit_global")
)
self.dynamic_config = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_config_global")
)
self.policy_requests_table = self._get_dynamo_table(
config.get(
"aws.policy_requests_dynamo_table", "consoleme_policy_requests"
)
)
self.api_health_roles_table = self._get_dynamo_table(
config.get(
"aws.api_health_apps_table_dynamo_table",
"consoleme_api_health_apps",
)
)
self.resource_cache_table = self._get_dynamo_table(
config.get(
"aws.resource_cache_dynamo_table", "consoleme_resource_cache"
)
)
self.cloudtrail_table = self._get_dynamo_table(
config.get("aws.cloudtrail_table", "consoleme_cloudtrail")
)
self.notifications_table = self._get_dynamo_table(
config.get("aws.notifications_table", "consoleme_notifications")
)
if user_email:
self.user = self.get_or_create_user(user_email)
self.affected_user = self.user
except Exception:
if config.get("development"):
log.error(
"Unable to connect to Dynamo. Trying to set user via development configuration",
exc_info=True,
)
self.user = self.sign_request(
{
"last_updated": int(time.time()),
"username": user_email,
"requests": [],
}
)
self.affected_user = self.user
else:
log.error("Unable to get Dynamo table.", exc_info=True)
raise
def write_resource_cache_data(self, data):
self.parallel_write_table(
self.resource_cache_table, data, ["resourceId", "resourceType"]
)
async def get_dynamic_config_yaml(self) -> bytes:
"""Retrieve dynamic configuration yaml."""
return await sync_to_async(self.get_dynamic_config_yaml_sync)()
def get_dynamic_config_yaml_sync(self) -> bytes:
"""Retrieve dynamic configuration yaml synchronously"""
c = b""
try:
current_config = self.dynamic_config.get_item(Key={"id": "master"})
if not current_config:
return c
compressed_config = current_config.get("Item", {}).get("config", "")
if not compressed_config:
return c
c = zlib.decompress(compressed_config.value)
except Exception: # noqa
sentry_sdk.capture_exception()
return c
def get_dynamic_config_dict(self) -> dict:
"""Retrieve dynamic configuration dictionary that can be merged with primary configuration dictionary."""
try:
loop = asyncio.get_running_loop()
except RuntimeError: # if cleanup: 'RuntimeError: There is no current event loop..'
loop = None
if loop and loop.is_running():
current_config_yaml = self.get_dynamic_config_yaml_sync()
else:
current_config_yaml = asyncio.run(self.get_dynamic_config_yaml())
config_d = yaml.safe_load(current_config_yaml)
return config_d
async def get_all_api_health_alerts(self) -> list:
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
response: dict = self.api_health_roles_table.scan()
items = response.get("Items", [])
while "LastEvaluatedKey" in response:
response = self.api_health_roles_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_api_health_alert_app(self, app_name) -> dict:
resp: dict = await sync_to_async(self.api_health_roles_table.get_item)(
Key={"appName": app_name}
)
return resp.get("Item", None)
async def write_api_health_alert_info(self, request, user_email: str):
"""
Writes a health alert role to the appropriate DynamoDB table
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
request["app_create_time"]: int = int(time.time())
request["updated_by"]: str = user_email
request["last_updated"]: int = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception:
error = {
"message": "Unable to add new api_health info request",
"request": request,
"function": function,
}
log.error(error, exc_info=True)
raise
return request
async def update_api_health_alert_info(
self, request: dict, user_email=None, update_by=None, last_updated=None
):
"""
Update api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
if update_by:
request["updated_by"] = update_by
else:
request["updated_by"] = user_email
if last_updated:
request["last_updated"] = last_updated
else:
request["last_updated"] = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error: dict = {
"function": function,
"message": "Unable to update api_health_info request",
"request": request,
"error": str(e),
}
log.error(error, exc_info=True)
raise Exception(error)
return request
async def delete_api_health_alert_info(self, app: str) -> None:
"""
Delete api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
try:
await sync_to_async(self.api_health_roles_table.delete_item)(
Key={"appName": app}
)
except Exception:
error: dict = {
"function": function,
"message": "Unable to delete api_health info",
"app": app,
}
log.error(error, exc_info=True)
raise
async def write_policy_request(
self,
user_email: str,
justification: str,
arn: str,
policy_name: str,
policy_changes: dict,
resources: List[str],
resource_policies: List[Dict],
request_time: int = None,
request_uuid=None,
policy_status="pending",
cross_account_request: bool = False,
dry_run: bool = False,
):
"""
Writes a policy request to the appropriate DynamoDB table
dry_run will create the request format, but won't actually write it
Sample run:
write_policy_request(policy_changes)
"""
request_time = request_time or int(time.time())
# Craft the new request json
timestamp = int(time.time())
request_id = request_uuid or str(uuid.uuid4())
new_request = {
"request_id": request_id,
"arn": arn,
"status": policy_status,
"justification": justification,
"request_time": request_time,
"updated_by": user_email,
"last_updated": timestamp,
"username": user_email,
"policy_name": policy_name,
"policy_changes": json.dumps(policy_changes),
"resources": resources,
"resource_policies": resource_policies,
"cross_account_request": cross_account_request,
}
if not dry_run:
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
except Exception as e:
error = f"Unable to add new policy request: {new_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
else:
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"request": new_request,
"message": "Dry run, skipping adding request to dynamo",
}
log.debug(log_data)
return new_request
async def write_policy_request_v2(self, extended_request: ExtendedRequestModel):
"""
Writes a policy request v2 to the appropriate DynamoDB table
Sample run:
write_policy_request_v2(request)
"""
new_request = {
"request_id": extended_request.id,
"principal": extended_request.principal.dict(),
"status": extended_request.request_status.value,
"justification": extended_request.justification,
"request_time": extended_request.timestamp,
"last_updated": int(time.time()),
"version": "2",
"extended_request": json.loads(extended_request.json()),
"username": extended_request.requester_email,
}
if extended_request.principal.principal_type == "AwsResource":
new_request["arn"] = extended_request.principal.principal_arn
elif extended_request.principal.principal_type == "HoneybeeAwsResourceTemplate":
repository_name = extended_request.principal.repository_name
resource_identifier = extended_request.principal.resource_identifier
new_request["arn"] = f"{repository_name}-{resource_identifier}"
else:
raise Exception("Invalid principal type")
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Writing policy request v2 to Dynamo",
"request": new_request,
}
log.debug(log_data)
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
log_data[
"message"
] = "Successfully finished writing policy request v2 to Dynamo"
log.debug(log_data)
except Exception as e:
log_data["message"] = "Error occurred writing policy request v2 to Dynamo"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
error = f"{log_data['message']}: {str(e)}"
raise Exception(error)
return new_request
async def update_policy_request(self, updated_request):
"""
Update a policy request by request ID
Sample run:
update_policy_request(policy_changes)
"""
updated_request["last_updated"] = int(time.time())
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(updated_request)
)
except Exception as e:
error = f"Unable to add updated policy request: {updated_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return updated_request
async def get_policy_requests(self, arn=None, request_id=None):
"""Reads a policy request from the appropriate DynamoDB table"""
if not arn and not request_id:
raise Exception("Must pass in ARN or policy request ID")
if request_id:
requests = self.policy_requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
else:
requests = self.policy_requests_table.query(
KeyConditionExpression="arn = :arn",
ExpressionAttributeValues={":arn": arn},
)
matching_requests = []
if requests["Items"]:
items = self._data_from_dynamo_replace(requests["Items"])
items = await self.convert_policy_requests_to_v3(items)
matching_requests.extend(items)
return matching_requests
async def convert_policy_requests_to_v3(self, requests):
# Remove this function and calls to this function after a grace period of
changed = False
for request in requests:
if not request.get("version") in ["2"]:
continue
if request.get("extended_request") and not request.get("principal"):
principal_arn = request.pop("arn")
request["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request["extended_request"]["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request.pop("arn", None)
changes = (
request.get("extended_request", {})
.get("changes", {})
.get("changes", [])
)
for change in changes:
if not change.get("principal_arn"):
continue
if not change.get("version") in ["2.0", "2", 2]:
continue
change["principal"] = {
"principal_arn": change["principal_arn"],
"principal_type": "AwsResource",
}
change.pop("principal_arn")
change["version"] = "3.0"
changed = True
if changed:
self.parallel_write_table(self.policy_requests_table, requests)
return requests
async def get_all_policy_requests(
self, status: Optional[str] = "pending"
) -> List[Dict[str, Union[int, List[str], str]]]:
"""Return all policy requests. If a status is specified, only requests with the specified status will be
returned.
:param status:
:return:
"""
requests = await sync_to_async(self.parallel_scan_table)(
self.policy_requests_table
)
requests = await self.convert_policy_requests_to_v3(requests)
return_value = []
if status:
for item in requests:
if status and item["status"] == status:
return_value.append(item)
else:
return_value = requests
return return_value
async def update_dynamic_config(self, config: str, updated_by: str) -> None:
"""Take a YAML config and writes to DDB (The reason we use YAML instead of JSON is to preserve comments)."""
# Validate that config loads as yaml, raises exception if not
yaml.safe_load(config)
stats.count("update_dynamic_config", tags={"updated_by": updated_by})
current_config_entry = self.dynamic_config.get_item(Key={"id": "master"})
if current_config_entry.get("Item"):
old_config = {
"id": current_config_entry["Item"]["updated_at"],
"updated_by": current_config_entry["Item"]["updated_by"],
"config": current_config_entry["Item"]["config"],
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(old_config))
new_config = {
"id": "master",
"config": zlib.compress(config.encode()),
"updated_by": updated_by,
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(new_config))
def validate_signature(self, items):
signature = items.pop("signature")
if isinstance(signature, Binary):
signature = signature.value
json_request = json.dumps(items, sort_keys=True)
if not crypto.verify(json_request, signature):
raise Exception(f"Invalid signature for request: {json_request}")
def sign_request(
self, user_entry: Dict[str, Union[Decimal, List[str], Binary, str]]
) -> Dict[str, Union[Decimal, List[str], str, bytes]]:
"""
Sign the request and returned request with signature
:param user_entry:
:return:
"""
# Remove old signature if it exists
user_entry.pop("signature", None)
user_entry = self._data_from_dynamo_replace(user_entry)
json_request = json.dumps(user_entry, sort_keys=True, use_decimal=True)
sig = crypto.sign(json_request)
user_entry["signature"] = sig
return user_entry
async def authenticate_user(self, login_attempt) -> AuthenticationResponse:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {
"function": function,
"user_email": login_attempt.username,
"after_redirect_uri": login_attempt.after_redirect_uri,
}
user_entry = await sync_to_async(self.users_table.query)(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": login_attempt.username},
)
user = None
generic_error = ["User doesn't exist, or password is incorrect. "]
if user_entry and "Items" in user_entry and len(user_entry["Items"]) == 1:
user = user_entry["Items"][0]
if not user:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = f"Unable to find user: {login_attempt.username}"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
if not user.get("password"):
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "User exists, but doesn't have a password stored in the database"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
password_hash_matches = bcrypt.checkpw(
login_attempt.password.encode("utf-8"), user["password"].value
)
if not password_hash_matches:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "Password does not match. "
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
return AuthenticationResponse(
authenticated=True, username=user["username"], groups=user["groups"]
)
def create_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
timestamp = int(time.time())
unsigned_user_entry = {
"created": timestamp,
"last_updated": timestamp,
"username": user_email,
"requests": [],
"groups": groups,
}
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
unsigned_user_entry["password"] = bcrypt.hashpw(pw, salt)
user_entry = self.sign_request(unsigned_user_entry)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def update_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
user_ddb = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
user = None
if user_ddb and "Items" in user_ddb and len(user_ddb["Items"]) == 1:
user = user_ddb["Items"][0]
if not user:
raise DataNotRetrievable(f"Unable to find user: {user_email}")
timestamp = int(time.time())
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
user["password"] = bcrypt.hashpw(pw, salt)
if groups:
user["groups"] = groups
user["last_updated"] = timestamp
user_entry = self.sign_request(user)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def delete_user(self, user_email):
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user_entry = {"username": user_email}
try:
self.users_table.delete_item(Key=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to delete user: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
async def get_user(
self, user_email: str
) -> Optional[Dict[str, Union[Decimal, List[str], Binary, str]]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
if user and "Items" in user and len(user["Items"]) == 1:
return user["Items"][0]
return None
def get_or_create_user(
self, user_email: str
) -> Dict[str, Union[Decimal, List[str], Binary, str]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
items = []
if user and "Items" in user:
items = user["Items"]
if not items:
return self.create_user(user_email)
return items[0]
def resolve_request_ids(
self, request_ids: List[str]
) -> List[Dict[str, Union[int, str]]]:
requests = []
for request_id in request_ids:
request = self.requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
if request["Items"]:
items = self._data_from_dynamo_replace(request["Items"])
requests.append(items[0])
else:
raise NoMatchingRequest(
f"No matching request for request_id: {request_id}"
)
return requests
def add_request_id_to_user(
self,
affected_user: Dict[str, Union[Decimal, List[str], Binary, str]],
request_id: str,
) -> None:
affected_user["requests"].append(request_id)
self.users_table.put_item(
Item=self._data_to_dynamo_replace(self.sign_request(affected_user))
)
def add_request(
self,
user_email: str,
group: str,
justification: str,
request_time: None = None,
status: str = "pending",
updated_by: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Add a user request to the dynamo table
Sample run:
add_request("user@example.com", "engtest", "because")
:param user_email: Email address of user
:param group: Name of group user is requesting access to
:param justification:
:param request_id:
:param request_time:
:param status:
:param updated_by:
:return:
"""
"""
Request:
group
justification
role
request_time
approval_time
updated_by
approval_reason
status
user@example.com:
requests: []
last_updated: 1
signature: xxxx
#pending: []
#expired: []
# How to expire requests if soemeone maliciously deletes content
# How to query for all approved requests for group X
# What if we want to send email saying your request is expiring in 7 days? Maybe celery to query all
# What about concept of request ID? Maybe base64 encoded thing?
# Need an all-in-one page to show all pending requests, all expired/approved requests
"""
request_time = request_time or int(time.time())
stats.count("new_group_request", tags={"user": user_email, "group": group})
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
# Get current user. Create if they do not already exist
# self.user = self.get_or_create_user(user_email)
# Get current user requests, which will validate existing signature
# existing_request_ids = self.user["requests"]
# existing_requests = self.resolve_request_ids(existing_request_ids)
existing_pending_requests_for_group = self.get_requests_by_user(
user_email, group, status="pending"
)
# Craft the new request json
timestamp = int(time.time())
request_id = str(uuid.uuid4())
new_request = {
"request_id": request_id,
"group": group,
"status": status,
"justification": justification,
"request_time": request_time,
"updated_by": updated_by,
"last_updated": timestamp,
"username": user_email,
}
# See if user already has an active or pending request for the group
if existing_pending_requests_for_group:
for request in existing_pending_requests_for_group:
raise PendingRequestAlreadyExists(
f"Pending request for group: {group} already exists: {request}"
)
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(new_request))
except Exception as e:
error = {
"error": f"Unable to add user request: {str(e)}",
"request": new_request,
}
log.error(error, exc_info=True)
raise Exception(error)
self.add_request_id_to_user(self.affected_user, request_id)
return new_request
async def get_all_requests(self, status=None):
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
items = await sync_to_async(self.parallel_scan_table)(self.requests_table)
return_value = []
if status:
for item in items:
new_json = []
for j in item["json"]:
if j["status"] == status:
new_json.append(j)
item["json"] = new_json
if new_json:
return_value.append(item)
else:
return_value = items
return return_value
def get_requests_by_user(
self,
user_email: str,
group: str = None,
status: str = None,
use_cache: bool = False,
) -> dict:
"""Get requests by user. Group and status can also be specified to filter results.
:param user_email:
:param group:
:param status:
:return:
"""
red_key = f"USER_REQUESTS_{user_email}-{group}-{status}"
if use_cache:
requests_to_return = red.get(red_key)
if requests_to_return:
return json.loads(requests_to_return)
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
existing_request_ids = self.affected_user["requests"]
existing_requests = self.resolve_request_ids(existing_request_ids)
requests_to_return = []
if existing_requests:
for request in existing_requests:
if group and request["group"] != group:
continue
if status and request["status"] != status:
continue
requests_to_return.append(request)
if use_cache:
red.setex(red_key, 120, json.dumps(requests_to_return))
return requests_to_return
def change_request_status(
self, user_email, group, new_status, updated_by=None, reviewer_comments=None
):
"""
:param user:
:param status:
:param request_id:
:return:
"""
stats.count(
"update_group_request",
tags={
"user": user_email,
"group": group,
"new_status": new_status,
"updated_by": updated_by,
},
)
modified_request = None
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
timestamp = int(time.time())
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
existing_requests = self.get_requests_by_user(user_email)
if existing_requests:
updated = False
for request in existing_requests:
if request["group"] == group:
request["updated_by"] = updated_by
request["status"] = new_status
request["last_updated"] = timestamp
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error = f"Unable to add user request: {request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
updated = True
if not updated:
raise NoExistingRequest(
f"Unable to find existing request for user: {user_email} and group: {group}."
)
else:
raise NoExistingRequest(
f"Unable to find existing requests for user: {user_email}"
)
return modified_request
def change_request_status_by_id(
self,
request_id: str,
new_status: str,
updated_by: Optional[str] = None,
reviewer_comments: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Change request status by ID
:param request_id:
:param new_status:
:param updated_by:
:return: new requests
"""
modified_request = None
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
requests = self.resolve_request_ids([request_id])
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
for request in requests:
request["status"] = new_status
request["updated_by"] = updated_by
request["last_updated"] = int(time.time())
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(request))
except Exception as e:
error = f"Unable to add user request: {request} : {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return modified_request
def get_all_policies(self):
"""Return all policies."""
response = self.policies_table.scan()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = self.policies_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def create_group_log_entry(
self,
group: str,
username: str,
updated_by: str,
action: str,
updated_at: None = None,
extra: None = None,
) -> None:
updated_at = updated_at or int(time.time())
log_id = str(uuid.uuid4())
log_entry = {
"uuid": log_id,
"group": group,
"username": username,
"updated_by": updated_by,
"updated_at": updated_at,
"action": action,
"extra": extra,
}
self.group_log.put_item(Item=self._data_to_dynamo_replace(log_entry))
async def get_all_audit_logs(self) -> List[Dict[str, Union[int, None, str]]]:
response = await sync_to_async(self.group_log.scan)()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = await sync_to_async(self.group_log.scan)(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_all_pending_requests(self):
return await self.get_all_requests(status="pending")
def batch_write_cloudtrail_events(self, items):
with self.cloudtrail_table.batch_writer(
overwrite_by_pkeys=["arn", "request_id"]
) as batch:
for item in items:
batch.put_item(Item=self._data_to_dynamo_replace(item))
return True
async def get_top_cloudtrail_errors_by_arn(self, arn, n=5):
response: dict = await sync_to_async(self.cloudtrail_table.query)(
KeyConditionExpression=Key("arn").eq(arn)
)
items = response.get("Items", [])
aggregated_errors = defaultdict(dict)
for item in items:
if int(item["ttl"]) < int(time.time()):
continue
event_call = item["event_call"]
event_resource = item.get("resource", "")
event_string = f"{event_call}|||{event_resource}"
if not aggregated_errors.get(event_string):
aggregated_errors[event_string]["count"] = 0
aggregated_errors[event_string]["generated_policy"] = item.get(
"generated_policy", {}
)
aggregated_errors[event_string]["count"] += 1
top_n_errors = {
k: v
for k, v in sorted(
aggregated_errors.items(),
key=lambda item: item[1]["count"],
reverse=True,
)[:n]
}
return top_n_errors
def count_arn_errors(self, error_count, items):
for item in items:
arn = item.get("arn")
if not error_count.get(arn):
error_count[arn] = 0
error_count[arn] += item.get("count", 1)
return error_count
async def get_existing_pending_approved_request(user: str, group_info: Any) -> None:
dynamo_handler = UserDynamoHandler(user)
existing_requests = await sync_to_async(dynamo_handler.get_requests_by_user)(user)
if existing_requests:
for request in existing_requests:
if group_info.get("name") == request.get("group") and request.get(
"status"
) in ["pending", "approved"]:
return request
return None | null |
162,141 | import asyncio
import time
from typing import Any
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.exceptions.exceptions import NoMatchingRequest
from consoleme.lib.auth import can_admin_all
from consoleme.lib.cache import store_json_results_in_redis_and_s3
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
class UserDynamoHandler(BaseDynamoHandler):
def __init__(self, user_email: Optional[str] = None) -> None:
try:
self.requests_table = self._get_dynamo_table(
config.get("aws.requests_dynamo_table", "consoleme_requests_global")
)
self.users_table = self._get_dynamo_table(
config.get("aws.users_dynamo_table", "consoleme_users_global")
)
self.group_log = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_audit_global")
)
self.dynamic_config = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_config_global")
)
self.policy_requests_table = self._get_dynamo_table(
config.get(
"aws.policy_requests_dynamo_table", "consoleme_policy_requests"
)
)
self.api_health_roles_table = self._get_dynamo_table(
config.get(
"aws.api_health_apps_table_dynamo_table",
"consoleme_api_health_apps",
)
)
self.resource_cache_table = self._get_dynamo_table(
config.get(
"aws.resource_cache_dynamo_table", "consoleme_resource_cache"
)
)
self.cloudtrail_table = self._get_dynamo_table(
config.get("aws.cloudtrail_table", "consoleme_cloudtrail")
)
self.notifications_table = self._get_dynamo_table(
config.get("aws.notifications_table", "consoleme_notifications")
)
if user_email:
self.user = self.get_or_create_user(user_email)
self.affected_user = self.user
except Exception:
if config.get("development"):
log.error(
"Unable to connect to Dynamo. Trying to set user via development configuration",
exc_info=True,
)
self.user = self.sign_request(
{
"last_updated": int(time.time()),
"username": user_email,
"requests": [],
}
)
self.affected_user = self.user
else:
log.error("Unable to get Dynamo table.", exc_info=True)
raise
def write_resource_cache_data(self, data):
self.parallel_write_table(
self.resource_cache_table, data, ["resourceId", "resourceType"]
)
async def get_dynamic_config_yaml(self) -> bytes:
"""Retrieve dynamic configuration yaml."""
return await sync_to_async(self.get_dynamic_config_yaml_sync)()
def get_dynamic_config_yaml_sync(self) -> bytes:
"""Retrieve dynamic configuration yaml synchronously"""
c = b""
try:
current_config = self.dynamic_config.get_item(Key={"id": "master"})
if not current_config:
return c
compressed_config = current_config.get("Item", {}).get("config", "")
if not compressed_config:
return c
c = zlib.decompress(compressed_config.value)
except Exception: # noqa
sentry_sdk.capture_exception()
return c
def get_dynamic_config_dict(self) -> dict:
"""Retrieve dynamic configuration dictionary that can be merged with primary configuration dictionary."""
try:
loop = asyncio.get_running_loop()
except RuntimeError: # if cleanup: 'RuntimeError: There is no current event loop..'
loop = None
if loop and loop.is_running():
current_config_yaml = self.get_dynamic_config_yaml_sync()
else:
current_config_yaml = asyncio.run(self.get_dynamic_config_yaml())
config_d = yaml.safe_load(current_config_yaml)
return config_d
async def get_all_api_health_alerts(self) -> list:
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
response: dict = self.api_health_roles_table.scan()
items = response.get("Items", [])
while "LastEvaluatedKey" in response:
response = self.api_health_roles_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_api_health_alert_app(self, app_name) -> dict:
resp: dict = await sync_to_async(self.api_health_roles_table.get_item)(
Key={"appName": app_name}
)
return resp.get("Item", None)
async def write_api_health_alert_info(self, request, user_email: str):
"""
Writes a health alert role to the appropriate DynamoDB table
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
request["app_create_time"]: int = int(time.time())
request["updated_by"]: str = user_email
request["last_updated"]: int = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception:
error = {
"message": "Unable to add new api_health info request",
"request": request,
"function": function,
}
log.error(error, exc_info=True)
raise
return request
async def update_api_health_alert_info(
self, request: dict, user_email=None, update_by=None, last_updated=None
):
"""
Update api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
if update_by:
request["updated_by"] = update_by
else:
request["updated_by"] = user_email
if last_updated:
request["last_updated"] = last_updated
else:
request["last_updated"] = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error: dict = {
"function": function,
"message": "Unable to update api_health_info request",
"request": request,
"error": str(e),
}
log.error(error, exc_info=True)
raise Exception(error)
return request
async def delete_api_health_alert_info(self, app: str) -> None:
"""
Delete api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
try:
await sync_to_async(self.api_health_roles_table.delete_item)(
Key={"appName": app}
)
except Exception:
error: dict = {
"function": function,
"message": "Unable to delete api_health info",
"app": app,
}
log.error(error, exc_info=True)
raise
async def write_policy_request(
self,
user_email: str,
justification: str,
arn: str,
policy_name: str,
policy_changes: dict,
resources: List[str],
resource_policies: List[Dict],
request_time: int = None,
request_uuid=None,
policy_status="pending",
cross_account_request: bool = False,
dry_run: bool = False,
):
"""
Writes a policy request to the appropriate DynamoDB table
dry_run will create the request format, but won't actually write it
Sample run:
write_policy_request(policy_changes)
"""
request_time = request_time or int(time.time())
# Craft the new request json
timestamp = int(time.time())
request_id = request_uuid or str(uuid.uuid4())
new_request = {
"request_id": request_id,
"arn": arn,
"status": policy_status,
"justification": justification,
"request_time": request_time,
"updated_by": user_email,
"last_updated": timestamp,
"username": user_email,
"policy_name": policy_name,
"policy_changes": json.dumps(policy_changes),
"resources": resources,
"resource_policies": resource_policies,
"cross_account_request": cross_account_request,
}
if not dry_run:
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
except Exception as e:
error = f"Unable to add new policy request: {new_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
else:
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"request": new_request,
"message": "Dry run, skipping adding request to dynamo",
}
log.debug(log_data)
return new_request
async def write_policy_request_v2(self, extended_request: ExtendedRequestModel):
"""
Writes a policy request v2 to the appropriate DynamoDB table
Sample run:
write_policy_request_v2(request)
"""
new_request = {
"request_id": extended_request.id,
"principal": extended_request.principal.dict(),
"status": extended_request.request_status.value,
"justification": extended_request.justification,
"request_time": extended_request.timestamp,
"last_updated": int(time.time()),
"version": "2",
"extended_request": json.loads(extended_request.json()),
"username": extended_request.requester_email,
}
if extended_request.principal.principal_type == "AwsResource":
new_request["arn"] = extended_request.principal.principal_arn
elif extended_request.principal.principal_type == "HoneybeeAwsResourceTemplate":
repository_name = extended_request.principal.repository_name
resource_identifier = extended_request.principal.resource_identifier
new_request["arn"] = f"{repository_name}-{resource_identifier}"
else:
raise Exception("Invalid principal type")
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Writing policy request v2 to Dynamo",
"request": new_request,
}
log.debug(log_data)
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
log_data[
"message"
] = "Successfully finished writing policy request v2 to Dynamo"
log.debug(log_data)
except Exception as e:
log_data["message"] = "Error occurred writing policy request v2 to Dynamo"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
error = f"{log_data['message']}: {str(e)}"
raise Exception(error)
return new_request
async def update_policy_request(self, updated_request):
"""
Update a policy request by request ID
Sample run:
update_policy_request(policy_changes)
"""
updated_request["last_updated"] = int(time.time())
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(updated_request)
)
except Exception as e:
error = f"Unable to add updated policy request: {updated_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return updated_request
async def get_policy_requests(self, arn=None, request_id=None):
"""Reads a policy request from the appropriate DynamoDB table"""
if not arn and not request_id:
raise Exception("Must pass in ARN or policy request ID")
if request_id:
requests = self.policy_requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
else:
requests = self.policy_requests_table.query(
KeyConditionExpression="arn = :arn",
ExpressionAttributeValues={":arn": arn},
)
matching_requests = []
if requests["Items"]:
items = self._data_from_dynamo_replace(requests["Items"])
items = await self.convert_policy_requests_to_v3(items)
matching_requests.extend(items)
return matching_requests
async def convert_policy_requests_to_v3(self, requests):
# Remove this function and calls to this function after a grace period of
changed = False
for request in requests:
if not request.get("version") in ["2"]:
continue
if request.get("extended_request") and not request.get("principal"):
principal_arn = request.pop("arn")
request["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request["extended_request"]["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request.pop("arn", None)
changes = (
request.get("extended_request", {})
.get("changes", {})
.get("changes", [])
)
for change in changes:
if not change.get("principal_arn"):
continue
if not change.get("version") in ["2.0", "2", 2]:
continue
change["principal"] = {
"principal_arn": change["principal_arn"],
"principal_type": "AwsResource",
}
change.pop("principal_arn")
change["version"] = "3.0"
changed = True
if changed:
self.parallel_write_table(self.policy_requests_table, requests)
return requests
async def get_all_policy_requests(
self, status: Optional[str] = "pending"
) -> List[Dict[str, Union[int, List[str], str]]]:
"""Return all policy requests. If a status is specified, only requests with the specified status will be
returned.
:param status:
:return:
"""
requests = await sync_to_async(self.parallel_scan_table)(
self.policy_requests_table
)
requests = await self.convert_policy_requests_to_v3(requests)
return_value = []
if status:
for item in requests:
if status and item["status"] == status:
return_value.append(item)
else:
return_value = requests
return return_value
async def update_dynamic_config(self, config: str, updated_by: str) -> None:
"""Take a YAML config and writes to DDB (The reason we use YAML instead of JSON is to preserve comments)."""
# Validate that config loads as yaml, raises exception if not
yaml.safe_load(config)
stats.count("update_dynamic_config", tags={"updated_by": updated_by})
current_config_entry = self.dynamic_config.get_item(Key={"id": "master"})
if current_config_entry.get("Item"):
old_config = {
"id": current_config_entry["Item"]["updated_at"],
"updated_by": current_config_entry["Item"]["updated_by"],
"config": current_config_entry["Item"]["config"],
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(old_config))
new_config = {
"id": "master",
"config": zlib.compress(config.encode()),
"updated_by": updated_by,
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(new_config))
def validate_signature(self, items):
signature = items.pop("signature")
if isinstance(signature, Binary):
signature = signature.value
json_request = json.dumps(items, sort_keys=True)
if not crypto.verify(json_request, signature):
raise Exception(f"Invalid signature for request: {json_request}")
def sign_request(
self, user_entry: Dict[str, Union[Decimal, List[str], Binary, str]]
) -> Dict[str, Union[Decimal, List[str], str, bytes]]:
"""
Sign the request and returned request with signature
:param user_entry:
:return:
"""
# Remove old signature if it exists
user_entry.pop("signature", None)
user_entry = self._data_from_dynamo_replace(user_entry)
json_request = json.dumps(user_entry, sort_keys=True, use_decimal=True)
sig = crypto.sign(json_request)
user_entry["signature"] = sig
return user_entry
async def authenticate_user(self, login_attempt) -> AuthenticationResponse:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {
"function": function,
"user_email": login_attempt.username,
"after_redirect_uri": login_attempt.after_redirect_uri,
}
user_entry = await sync_to_async(self.users_table.query)(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": login_attempt.username},
)
user = None
generic_error = ["User doesn't exist, or password is incorrect. "]
if user_entry and "Items" in user_entry and len(user_entry["Items"]) == 1:
user = user_entry["Items"][0]
if not user:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = f"Unable to find user: {login_attempt.username}"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
if not user.get("password"):
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "User exists, but doesn't have a password stored in the database"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
password_hash_matches = bcrypt.checkpw(
login_attempt.password.encode("utf-8"), user["password"].value
)
if not password_hash_matches:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "Password does not match. "
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
return AuthenticationResponse(
authenticated=True, username=user["username"], groups=user["groups"]
)
def create_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
timestamp = int(time.time())
unsigned_user_entry = {
"created": timestamp,
"last_updated": timestamp,
"username": user_email,
"requests": [],
"groups": groups,
}
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
unsigned_user_entry["password"] = bcrypt.hashpw(pw, salt)
user_entry = self.sign_request(unsigned_user_entry)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def update_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
user_ddb = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
user = None
if user_ddb and "Items" in user_ddb and len(user_ddb["Items"]) == 1:
user = user_ddb["Items"][0]
if not user:
raise DataNotRetrievable(f"Unable to find user: {user_email}")
timestamp = int(time.time())
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
user["password"] = bcrypt.hashpw(pw, salt)
if groups:
user["groups"] = groups
user["last_updated"] = timestamp
user_entry = self.sign_request(user)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def delete_user(self, user_email):
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user_entry = {"username": user_email}
try:
self.users_table.delete_item(Key=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to delete user: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
async def get_user(
self, user_email: str
) -> Optional[Dict[str, Union[Decimal, List[str], Binary, str]]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
if user and "Items" in user and len(user["Items"]) == 1:
return user["Items"][0]
return None
def get_or_create_user(
self, user_email: str
) -> Dict[str, Union[Decimal, List[str], Binary, str]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
items = []
if user and "Items" in user:
items = user["Items"]
if not items:
return self.create_user(user_email)
return items[0]
def resolve_request_ids(
self, request_ids: List[str]
) -> List[Dict[str, Union[int, str]]]:
requests = []
for request_id in request_ids:
request = self.requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
if request["Items"]:
items = self._data_from_dynamo_replace(request["Items"])
requests.append(items[0])
else:
raise NoMatchingRequest(
f"No matching request for request_id: {request_id}"
)
return requests
def add_request_id_to_user(
self,
affected_user: Dict[str, Union[Decimal, List[str], Binary, str]],
request_id: str,
) -> None:
affected_user["requests"].append(request_id)
self.users_table.put_item(
Item=self._data_to_dynamo_replace(self.sign_request(affected_user))
)
def add_request(
self,
user_email: str,
group: str,
justification: str,
request_time: None = None,
status: str = "pending",
updated_by: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Add a user request to the dynamo table
Sample run:
add_request("user@example.com", "engtest", "because")
:param user_email: Email address of user
:param group: Name of group user is requesting access to
:param justification:
:param request_id:
:param request_time:
:param status:
:param updated_by:
:return:
"""
"""
Request:
group
justification
role
request_time
approval_time
updated_by
approval_reason
status
user@example.com:
requests: []
last_updated: 1
signature: xxxx
#pending: []
#expired: []
# How to expire requests if soemeone maliciously deletes content
# How to query for all approved requests for group X
# What if we want to send email saying your request is expiring in 7 days? Maybe celery to query all
# What about concept of request ID? Maybe base64 encoded thing?
# Need an all-in-one page to show all pending requests, all expired/approved requests
"""
request_time = request_time or int(time.time())
stats.count("new_group_request", tags={"user": user_email, "group": group})
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
# Get current user. Create if they do not already exist
# self.user = self.get_or_create_user(user_email)
# Get current user requests, which will validate existing signature
# existing_request_ids = self.user["requests"]
# existing_requests = self.resolve_request_ids(existing_request_ids)
existing_pending_requests_for_group = self.get_requests_by_user(
user_email, group, status="pending"
)
# Craft the new request json
timestamp = int(time.time())
request_id = str(uuid.uuid4())
new_request = {
"request_id": request_id,
"group": group,
"status": status,
"justification": justification,
"request_time": request_time,
"updated_by": updated_by,
"last_updated": timestamp,
"username": user_email,
}
# See if user already has an active or pending request for the group
if existing_pending_requests_for_group:
for request in existing_pending_requests_for_group:
raise PendingRequestAlreadyExists(
f"Pending request for group: {group} already exists: {request}"
)
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(new_request))
except Exception as e:
error = {
"error": f"Unable to add user request: {str(e)}",
"request": new_request,
}
log.error(error, exc_info=True)
raise Exception(error)
self.add_request_id_to_user(self.affected_user, request_id)
return new_request
async def get_all_requests(self, status=None):
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
items = await sync_to_async(self.parallel_scan_table)(self.requests_table)
return_value = []
if status:
for item in items:
new_json = []
for j in item["json"]:
if j["status"] == status:
new_json.append(j)
item["json"] = new_json
if new_json:
return_value.append(item)
else:
return_value = items
return return_value
def get_requests_by_user(
self,
user_email: str,
group: str = None,
status: str = None,
use_cache: bool = False,
) -> dict:
"""Get requests by user. Group and status can also be specified to filter results.
:param user_email:
:param group:
:param status:
:return:
"""
red_key = f"USER_REQUESTS_{user_email}-{group}-{status}"
if use_cache:
requests_to_return = red.get(red_key)
if requests_to_return:
return json.loads(requests_to_return)
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
existing_request_ids = self.affected_user["requests"]
existing_requests = self.resolve_request_ids(existing_request_ids)
requests_to_return = []
if existing_requests:
for request in existing_requests:
if group and request["group"] != group:
continue
if status and request["status"] != status:
continue
requests_to_return.append(request)
if use_cache:
red.setex(red_key, 120, json.dumps(requests_to_return))
return requests_to_return
def change_request_status(
self, user_email, group, new_status, updated_by=None, reviewer_comments=None
):
"""
:param user:
:param status:
:param request_id:
:return:
"""
stats.count(
"update_group_request",
tags={
"user": user_email,
"group": group,
"new_status": new_status,
"updated_by": updated_by,
},
)
modified_request = None
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
timestamp = int(time.time())
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
existing_requests = self.get_requests_by_user(user_email)
if existing_requests:
updated = False
for request in existing_requests:
if request["group"] == group:
request["updated_by"] = updated_by
request["status"] = new_status
request["last_updated"] = timestamp
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error = f"Unable to add user request: {request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
updated = True
if not updated:
raise NoExistingRequest(
f"Unable to find existing request for user: {user_email} and group: {group}."
)
else:
raise NoExistingRequest(
f"Unable to find existing requests for user: {user_email}"
)
return modified_request
def change_request_status_by_id(
self,
request_id: str,
new_status: str,
updated_by: Optional[str] = None,
reviewer_comments: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Change request status by ID
:param request_id:
:param new_status:
:param updated_by:
:return: new requests
"""
modified_request = None
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
requests = self.resolve_request_ids([request_id])
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
for request in requests:
request["status"] = new_status
request["updated_by"] = updated_by
request["last_updated"] = int(time.time())
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(request))
except Exception as e:
error = f"Unable to add user request: {request} : {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return modified_request
def get_all_policies(self):
"""Return all policies."""
response = self.policies_table.scan()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = self.policies_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def create_group_log_entry(
self,
group: str,
username: str,
updated_by: str,
action: str,
updated_at: None = None,
extra: None = None,
) -> None:
updated_at = updated_at or int(time.time())
log_id = str(uuid.uuid4())
log_entry = {
"uuid": log_id,
"group": group,
"username": username,
"updated_by": updated_by,
"updated_at": updated_at,
"action": action,
"extra": extra,
}
self.group_log.put_item(Item=self._data_to_dynamo_replace(log_entry))
async def get_all_audit_logs(self) -> List[Dict[str, Union[int, None, str]]]:
response = await sync_to_async(self.group_log.scan)()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = await sync_to_async(self.group_log.scan)(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_all_pending_requests(self):
return await self.get_all_requests(status="pending")
def batch_write_cloudtrail_events(self, items):
with self.cloudtrail_table.batch_writer(
overwrite_by_pkeys=["arn", "request_id"]
) as batch:
for item in items:
batch.put_item(Item=self._data_to_dynamo_replace(item))
return True
async def get_top_cloudtrail_errors_by_arn(self, arn, n=5):
response: dict = await sync_to_async(self.cloudtrail_table.query)(
KeyConditionExpression=Key("arn").eq(arn)
)
items = response.get("Items", [])
aggregated_errors = defaultdict(dict)
for item in items:
if int(item["ttl"]) < int(time.time()):
continue
event_call = item["event_call"]
event_resource = item.get("resource", "")
event_string = f"{event_call}|||{event_resource}"
if not aggregated_errors.get(event_string):
aggregated_errors[event_string]["count"] = 0
aggregated_errors[event_string]["generated_policy"] = item.get(
"generated_policy", {}
)
aggregated_errors[event_string]["count"] += 1
top_n_errors = {
k: v
for k, v in sorted(
aggregated_errors.items(),
key=lambda item: item[1]["count"],
reverse=True,
)[:n]
}
return top_n_errors
def count_arn_errors(self, error_count, items):
for item in items:
arn = item.get("arn")
if not error_count.get(arn):
error_count[arn] = 0
error_count[arn] += item.get("count", 1)
return error_count
async def get_existing_pending_request(user: str, group_info: Any) -> None:
dynamo_handler = UserDynamoHandler(user)
existing_requests = await sync_to_async(dynamo_handler.get_requests_by_user)(user)
if existing_requests:
for request in existing_requests:
if group_info.get("name") == request.get("group") and request.get(
"status"
) in ["pending"]:
return request
return None | null |
162,142 | import asyncio
import time
from typing import Any
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.exceptions.exceptions import NoMatchingRequest
from consoleme.lib.auth import can_admin_all
from consoleme.lib.cache import store_json_results_in_redis_and_s3
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
def get_pending_requests_url():
return f"{config.get('url')}/accessui/pending" | null |
162,143 | import asyncio
import time
from typing import Any
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.exceptions.exceptions import NoMatchingRequest
from consoleme.lib.auth import can_admin_all
from consoleme.lib.cache import store_json_results_in_redis_and_s3
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
def get_request_review_url(request_id: str) -> str:
return f"{config.get('url')}/accessui/request/{request_id}" | null |
162,144 | import asyncio
import time
from typing import Any
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.exceptions.exceptions import NoMatchingRequest
from consoleme.lib.auth import can_admin_all
from consoleme.lib.cache import store_json_results_in_redis_and_s3
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
def get_accessui_pending_requests_url():
return f"{config.get('accessui_url')}/requests" | null |
162,145 | import asyncio
import time
from typing import Any
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.exceptions.exceptions import NoMatchingRequest
from consoleme.lib.auth import can_admin_all
from consoleme.lib.cache import store_json_results_in_redis_and_s3
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
def get_accessui_request_review_url(request_id):
return f"{config.get('accessui_url')}/requests/{request_id}" | null |
162,146 | from consoleme.config import config
from consoleme.models import CloudAccountModel, CloudAccountModelArray
class CloudAccountModel(BaseModel):
id: Optional[str] = None
name: Optional[str] = None
status: Optional[Status1] = None
type: Optional[Type] = None
sync_enabled: Optional[bool] = None
sensitive: Optional[bool] = False
environment: Optional[Environment] = None
aliases: Optional[List[str]] = None
email: Optional[str] = None
class CloudAccountModelArray(BaseModel):
accounts: Optional[List[CloudAccountModel]] = None
async def retrieve_accounts_from_config() -> CloudAccountModelArray:
cloud_accounts = []
accounts_in_configuration = config.get("dynamic_config.account_ids_to_name", {})
accounts_in_configuration.update(config.get("account_ids_to_name", {}))
for account_id, names in accounts_in_configuration.items():
account_name = names
# Legacy support for a list of account names (with aliases)
if account_name and isinstance(account_name, list):
account_name = account_name[0]
cloud_accounts.append(
CloudAccountModel(
id=account_id,
name=account_name,
status="active",
sync_enabled=True,
type="aws",
)
)
return CloudAccountModelArray(accounts=cloud_accounts) | null |
162,147 | import sys
from typing import List
import ujson as json
from tornado.httpclient import AsyncHTTPClient, HTTPClientError
from consoleme.config import config
from consoleme.exceptions.exceptions import MissingConfigurationValue
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.models import CloudAccountModel, CloudAccountModelArray
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
class MissingConfigurationValue(BaseException):
"""Unable to find expected configuration value"""
def __init__(self, msg=""):
stats.count("MissingConfigurationValue")
super().__init__(msg)
class CloudAccountModel(BaseModel):
id: Optional[str] = None
name: Optional[str] = None
status: Optional[Status1] = None
type: Optional[Type] = None
sync_enabled: Optional[bool] = None
sensitive: Optional[bool] = False
environment: Optional[Environment] = None
aliases: Optional[List[str]] = None
email: Optional[str] = None
class CloudAccountModelArray(BaseModel):
accounts: Optional[List[CloudAccountModel]] = None
async def retrieve_accounts_from_swag() -> CloudAccountModelArray:
function: str = f"{sys._getframe().f_code.co_name}"
expected_owners: List = config.get(
"retrieve_accounts_from_swag.expected_owners", []
)
swag_base_url = config.get("retrieve_accounts_from_swag.base_url")
if not swag_base_url:
raise MissingConfigurationValue("Unable to find Swag URL in configuration")
swag_url = swag_base_url + "api/1/accounts"
try:
http_client = AsyncHTTPClient(force_instance=True)
resp = await http_client.fetch(
swag_url,
headers={"Content-Type": "application/json", "Accept": "application/json"},
)
except (ConnectionError, HTTPClientError) as e:
log.error(
{
"message": "Unable to connect to SWAG",
"error": str(e),
"function": function,
},
exc_info=True,
)
stats.count(f"{function}.connectionerror")
raise
swag_accounts = json.loads(resp.body)
cloud_accounts = []
for account in swag_accounts:
# Ignore third party accounts
if expected_owners and account.get("owner") not in expected_owners:
continue
account_status = account["account_status"]
sync_enabled = False
if account_status == "ready":
account_status = "active"
sync_enabled = True
cloud_accounts.append(
CloudAccountModel(
id=account["id"],
name=account["name"],
email=account["email"],
status=account_status,
sync_enabled=sync_enabled,
sensitive=account["sensitive"],
environment=account["environment"],
aliases=account["aliases"],
type="aws",
)
)
return CloudAccountModelArray(accounts=cloud_accounts) | null |
162,148 | from typing import Any, Dict, List, Literal
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError
from cloudaux import CloudAux
from cloudaux.aws.decorators import paginated
from cloudaux.aws.sts import boto3_cached_conn
from consoleme.config import config
from consoleme.exceptions.exceptions import MissingConfigurationValue
from consoleme.models import (
CloudAccountModel,
CloudAccountModelArray,
ServiceControlPolicyDetailsModel,
ServiceControlPolicyModel,
ServiceControlPolicyTargetModel,
)
class MissingConfigurationValue(BaseException):
"""Unable to find expected configuration value"""
def __init__(self, msg=""):
stats.count("MissingConfigurationValue")
super().__init__(msg)
class CloudAccountModel(BaseModel):
id: Optional[str] = None
name: Optional[str] = None
status: Optional[Status1] = None
type: Optional[Type] = None
sync_enabled: Optional[bool] = None
sensitive: Optional[bool] = False
environment: Optional[Environment] = None
aliases: Optional[List[str]] = None
email: Optional[str] = None
class CloudAccountModelArray(BaseModel):
accounts: Optional[List[CloudAccountModel]] = None
The provided code snippet includes necessary dependencies for implementing the `retrieve_accounts_from_aws_organizations` function. Write a Python function `async def retrieve_accounts_from_aws_organizations() -> CloudAccountModelArray` to solve the following problem:
Polls AWS Organizations for our Account ID to Account Name mapping :param: null :return: CloudAccountModelArray
Here is the function:
async def retrieve_accounts_from_aws_organizations() -> CloudAccountModelArray:
"""
Polls AWS Organizations for our Account ID to Account Name mapping
:param: null
:return: CloudAccountModelArray
"""
cloud_accounts = []
for organization in config.get("cache_accounts_from_aws_organizations", []):
organizations_master_account_id = organization.get(
"organizations_master_account_id"
)
role_to_assume = organization.get(
"organizations_master_role_to_assume",
config.get("policies.role_name"),
)
if not organizations_master_account_id:
raise MissingConfigurationValue(
"Your AWS Organizations Master Account ID is not specified in configuration. "
"Unable to sync accounts from "
"AWS Organizations"
)
if not role_to_assume:
raise MissingConfigurationValue(
"ConsoleMe doesn't know what role to assume to retrieve account information "
"from AWS Organizations. please set the appropriate configuration value."
)
client = await sync_to_async(boto3_cached_conn)(
"organizations",
account_number=organizations_master_account_id,
assume_role=role_to_assume,
session_name="ConsoleMeOrganizationsSync",
)
paginator = await sync_to_async(client.get_paginator)("list_accounts")
page_iterator = await sync_to_async(paginator.paginate)()
accounts = []
for page in page_iterator:
accounts.extend(page["Accounts"])
for account in accounts:
status = account["Status"].lower()
cloud_accounts.append(
CloudAccountModel(
id=account["Id"],
name=account["Name"],
email=account["Email"],
status=status,
type="aws",
sync_enabled=True, # TODO: Check for tag to disable sync?
)
)
return CloudAccountModelArray(accounts=cloud_accounts) | Polls AWS Organizations for our Account ID to Account Name mapping :param: null :return: CloudAccountModelArray |
162,149 | import boto3
from consoleme.config import config
from consoleme.models import CloudAccountModel, CloudAccountModelArray
class CloudAccountModel(BaseModel):
class CloudAccountModelArray(BaseModel):
async def retrieve_current_account() -> CloudAccountModelArray:
client = boto3.client("sts", **config.get("boto3.client_kwargs", {}))
identity = client.get_caller_identity()
account_aliases = boto3.client(
"iam", **config.get("boto3.client_kwargs", {})
).list_account_aliases()["AccountAliases"]
account_id = None
if identity and identity.get("Account"):
account_id = identity.get("Account")
account_name = account_id
if account_aliases:
account_name = account_aliases[0]
cloud_account = [
CloudAccountModel(
id=account_id,
name=account_name,
status="active",
sync_enabled=True,
type="aws",
)
]
return CloudAccountModelArray(accounts=cloud_account) | null |
162,150 | import sys
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
import boto3
import pytz
import ujson as json
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError
from cloudaux import sts_conn
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.sts import boto3_cached_conn
from consoleme.config import config
from consoleme.exceptions.exceptions import MissingConfigurationValue
from consoleme.lib.plugins import get_plugin_by_name
class MissingConfigurationValue(BaseException):
"""Unable to find expected configuration value"""
def __init__(self, msg=""):
stats.count("MissingConfigurationValue")
super().__init__(msg)
The provided code snippet includes necessary dependencies for implementing the `is_object_older_than_seconds` function. Write a Python function `async def is_object_older_than_seconds( key: str, older_than_seconds: int, bucket: Optional[str] = None, s3_client=None ) -> bool` to solve the following problem:
This function checks if an S3 object is older than the specified number of seconds. if the object doesn't exist, this function will return True.
Here is the function:
async def is_object_older_than_seconds(
key: str, older_than_seconds: int, bucket: Optional[str] = None, s3_client=None
) -> bool:
"""
This function checks if an S3 object is older than the specified number of seconds. if the object doesn't
exist, this function will return True.
"""
if not bucket:
bucket = config.get("consoleme_s3_bucket")
if not bucket:
raise MissingConfigurationValue(
"`bucket` not defined, and we can't find the default bucket in "
"the configuration key `consoleme_s3_bucket`."
)
now = datetime.utcnow().replace(tzinfo=pytz.utc)
if not s3_client:
s3_client = boto3.client("s3", **config.get("boto3.client_kwargs", {}))
try:
res = await sync_to_async(s3_client.head_object)(Bucket=bucket, Key=key)
except ClientError as e:
# If file is not found, we'll tell the user it's older than the specified time
if e.response.get("Error", {}).get("Code") == "404":
return True
raise
datetime_value = res["LastModified"]
if (now - datetime_value).total_seconds() > older_than_seconds:
return True
return False | This function checks if an S3 object is older than the specified number of seconds. if the object doesn't exist, this function will return True. |
162,151 | import sys
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
import boto3
import pytz
import ujson as json
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError
from cloudaux import sts_conn
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.sts import boto3_cached_conn
from consoleme.config import config
from consoleme.exceptions.exceptions import MissingConfigurationValue
from consoleme.lib.plugins import get_plugin_by_name
The provided code snippet includes necessary dependencies for implementing the `does_object_exist` function. Write a Python function `async def does_object_exist(bucket: str, key: str, s3_client=None) -> bool` to solve the following problem:
This function checks if an S3 object exists.
Here is the function:
async def does_object_exist(bucket: str, key: str, s3_client=None) -> bool:
"""
This function checks if an S3 object exists.
"""
if not s3_client:
s3_client = boto3.client("s3", **config.get("boto3.client_kwargs", {}))
try:
await sync_to_async(s3_client.head_object)(Bucket=bucket, Key=key)
except ClientError as e:
# If file is not found, we'll tell the user it's older than the specified time
if e.response.get("Error", {}).get("Code") == "404":
return False
raise
return True | This function checks if an S3 object exists. |
162,152 | import sys
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
import boto3
import pytz
import ujson as json
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError
from cloudaux import sts_conn
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.sts import boto3_cached_conn
from consoleme.config import config
from consoleme.exceptions.exceptions import MissingConfigurationValue
from consoleme.lib.plugins import get_plugin_by_name
async def get_object_async(**kwargs):
"""Get an S3 object Asynchronously"""
return await sync_to_async(get_object)(**kwargs)
The provided code snippet includes necessary dependencies for implementing the `fetch_json_object_from_s3` function. Write a Python function `async def fetch_json_object_from_s3( bucket: str, object: str ) -> Union[Dict[str, Any], List[dict]]` to solve the following problem:
Fetch and load a JSON-formatted object in S3 :param bucket: S3 bucket :param object: S3 Object :return: Dict
Here is the function:
async def fetch_json_object_from_s3(
bucket: str, object: str
) -> Union[Dict[str, Any], List[dict]]:
"""
Fetch and load a JSON-formatted object in S3
:param bucket: S3 bucket
:param object: S3 Object
:return: Dict
"""
s3_object = await get_object_async(Bucket=bucket, Key=object, region=config.region)
object_content = s3_object["Body"].read()
data = json.loads(object_content)
return data | Fetch and load a JSON-formatted object in S3 :param bucket: S3 bucket :param object: S3 Object :return: Dict |
162,153 | import sys
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
import boto3
import pytz
import ujson as json
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError
from cloudaux import sts_conn
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.sts import boto3_cached_conn
from consoleme.config import config
from consoleme.exceptions.exceptions import MissingConfigurationValue
from consoleme.lib.plugins import get_plugin_by_name
log = config.get_logger("consoleme")
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def map_operation_to_api(operation, default):
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
operations_map = {
"BATCH.DELETE.OBJECT": "s3:DeleteObject",
"REST.COPY.OBJECT": "s3:PutObject",
"REST.COPY.OBJECT_GET": "REST.COPY.OBJECT_GET",
"REST.COPY.PART": "s3:PutObject",
"REST.DELETE.BUCKET": "s3:DeleteBucket",
"REST.DELETE.OBJECT": "s3:DeleteObject",
"REST.DELETE.UPLOAD": "s3:DeleteObject",
"REST.GET.ACCELERATE": "s3:GetAccelerateConfiguration",
"REST.GET.ACL": "s3:GetObjectVersionAcl",
"REST.GET.ANALYTICS": "s3:GetAnalyticsConfiguration",
"REST.GET.BUCKET": "s3:GetBucket",
"REST.GET.BUCKETPOLICY": "s3:GetBucketPolicy",
"REST.GET.BUCKETVERSIONS": "s3:GetBucketVersioning",
"REST.GET.CORS": "s3:GetBucketCORS",
"REST.GET.ENCRYPTION": "s3:GetEncryptionConfiguration",
"REST.GET.INTELLIGENT_TIERING": "REST.GET.INTELLIGENT_TIERING",
"REST.GET.INVENTORY": "s3:GetInventoryConfiguration",
"REST.GET.LIFECYCLE": "s3:GetLifecycleConfiguration",
"REST.GET.LOCATION": "s3:GetBucketLocation",
"REST.GET.LOGGING_STATUS": "s3:GetBucketLogging",
"REST.GET.METRICS": "s3:GetMetricsConfiguration",
"REST.GET.NOTIFICATION": "s3:GetBucketNotification",
"REST.GET.OBJECT": "s3:GetObject",
"REST.GET.OBJECT_LOCK_CONFIGURATION": "s3:GetObjectLockConfiguration",
"REST.GET.OBJECT_TAGGING": "s3:GetObjectTagging",
"REST.GET.OWNERSHIP_CONTROLS": "s3:GetBucketOwnershipControls",
"REST.GET.POLICY_STATUS": "s3:GetBucketPolicyStatus",
"REST.GET.PUBLIC_ACCESS_BLOCK": "s3:GetBucketPublicAccessBlock",
"REST.GET.REPLICATION": "s3:GetReplicationConfiguration",
"REST.GET.REQUEST_PAYMENT": "s3:GetBucketRequestPayment",
"REST.GET.TAGGING": "s3:GetBucketTagging",
"REST.GET.UPLOAD": "s3:GetObject",
"REST.GET.UPLOADS": "s3:GetObject",
"REST.GET.VERSIONING": "s3:GetObjectVersion",
"REST.GET.WEBSITE": "s3:GetBucketWebsite",
"REST.HEAD.BUCKET": "s3:ListBucket",
"REST.HEAD.OBJECT": "s3:GetObject",
"REST.COPY.PART_GET": "s3:GetObject",
"REST.POST.BUCKET": "REST.POST.BUCKET",
"REST.POST.MULTI_OBJECT_DELETE": "s3:DeleteObject",
"REST.POST.RESTORE": "s3:RestoreObject",
"REST.POST.UPLOAD": "s3:PutObject",
"REST.POST.UPLOADS": "s3:PutObject",
"REST.PUT.ACL": "s3:PutBucketAcl,s3:PutObjectAcl",
"REST.PUT.BUCKET": "REST.PUT.BUCKET",
"REST.PUT.BUCKETPOLICY": "s3:PutBucketPolicy",
"REST.PUT.CORS": "s3:PutBucketCORS",
"REST.PUT.LIFECYCLE": "s3:PutLifecycleConfiguration",
"REST.PUT.LOGGING_STATUS": "REST.PUT.LOGGING_STATUS-NotBucketOwnerOrGrantee",
"REST.PUT.NOTIFICATION": "s3:PutBucketNotification",
"REST.PUT.OBJECT": "s3:PutObject",
"REST.PUT.OBJECT_TAGGING": "s3:PutObjectTagging",
"REST.PUT.PART": "s3:PutObject",
"REST.PUT.PUBLIC_ACCESS_BLOCK": "s3:PutBucketPublicAccessBlock",
"REST.PUT.REPLICATION": "s3:PutReplicationConfiguration",
"REST.PUT.VERSIONING": "s3:putbucketversioning",
"REST.PUT.WEBSITE": "s3:PutBucketWebsite",
}
api_call = operations_map.get(operation)
if api_call is None:
stats.count(f"{function}.error")
log.error(
{
"message": "S3 Operation Needs Mapping",
"function": function,
"query": operation,
}
)
return default
else:
stats.count(f"{function}.success")
return api_call | null |
162,154 | import sys
from asgiref.sync import async_to_sync
from consoleme.config import config
from consoleme.lib.account_indexers import get_account_id_to_name_mapping
from consoleme.lib.plugins import get_plugin_by_name
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
log = config.get_logger()
ALL_ACCOUNTS = async_to_sync(get_account_id_to_name_mapping)(status=None)
The provided code snippet includes necessary dependencies for implementing the `format_role_name` function. Write a Python function `def format_role_name(arn: str, accounts: dict) -> str` to solve the following problem:
Given a role name, return what to display on the UI. This cleanly formats per-user roles.
Here is the function:
def format_role_name(arn: str, accounts: dict) -> str:
"""Given a role name, return what to display on the UI. This cleanly formats per-user roles."""
role_name = arn.split("role/")[1]
if not accounts:
# Only fall back to ALL_ACCOUNTS if an accounts dict is not supplied
accounts = ALL_ACCOUNTS
if config.get("format_role_name.show_full_arn"):
return arn
elif config.get("format_role_name.show_account_name_role_name"):
account_id = arn.split(":")[4]
account_name = accounts.get(account_id)
if not account_name:
account_name = account_id
return f"{account_name}/{role_name}"
if not role_name.startswith("cm_"):
return role_name
name = accounts.get(arn.split(":")[4])
# This should NOT happen, but if it does, log it keep a metric of it:
if not name:
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Can't find account for per-user role",
"role": role_name,
"accounts": accounts,
}
log.error(log_data)
stats.count("index.unknown_account_role", tags={"role": role_name})
return name | Given a role name, return what to display on the UI. This cleanly formats per-user roles. |
162,155 | import base64
import sys
import time
from typing import Any, Callable, Dict, List, Optional, Union
import jsonschema
import jwt
import ujson as json
from cryptography.hazmat.backends.openssl.rsa import _RSAPublicKey
from consoleme.config import config
from consoleme.lib.crypto import Crypto
from consoleme.lib.generic import is_in_group
from consoleme.lib.plugins import get_plugin_by_name
crypto = Crypto()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
log = config.get_logger()
async def generate_auth_token(
user, ip, challenge_uuid, expiration=config.get("challenge.token_expiration", 3600)
):
stats.count("generate_auth_token")
log_data = {
"user": user,
"ip": ip,
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Generating token for user",
"challenge_uuid": challenge_uuid,
}
log.debug(log_data)
current_time = int(time.time())
valid_before = current_time + expiration
valid_after = current_time
auth_token = {
"user": user,
"ip": ip,
"challenge_uuid": challenge_uuid,
"valid_before": valid_before,
"valid_after": valid_after,
}
to_sign = (
"{{'user': '{0}', 'ip': '{1}', 'challenge_uuid'': '{2}', "
"'valid_before'': '{3}', 'valid_after'': '{4}'}}"
).format(user, ip, challenge_uuid, valid_before, valid_after)
sig = crypto.sign(to_sign)
auth_token["sig"] = sig
return base64.b64encode(json.dumps(auth_token).encode()) | null |
162,156 | import base64
import sys
import time
from typing import Any, Callable, Dict, List, Optional, Union
import jsonschema
import jwt
import ujson as json
from cryptography.hazmat.backends.openssl.rsa import _RSAPublicKey
from consoleme.config import config
from consoleme.lib.crypto import Crypto
from consoleme.lib.generic import is_in_group
from consoleme.lib.plugins import get_plugin_by_name
crypto = Crypto()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
log = config.get_logger()
async def validate_auth_token(user, ip, token):
stats.count("validate_auth_token")
log_data = {
"user": user,
"ip": ip,
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Validating token for user",
}
log.debug(log_data)
if not token:
stats.count("validate_auth_token.no_token")
msg = f"No token passed. User: {user}. IP: {ip}."
log.error(msg, exc_info=True)
raise Exception(msg)
decoded_token = base64.b64decode(token)
auth_token = json.loads(decoded_token)
current_time = int(time.time())
if auth_token.get("user") != user:
stats.count("validate_auth_token.user_mismatch")
msg = f"Auth token has a different user: {auth_token.get('user')}. User passed to function: {user}"
log.error(msg, exc_info=True)
raise Exception(msg)
if auth_token.get("ip") != ip:
stats.count("validate_auth_token.ip_mismatch")
msg = f"Auth token has a different IP: {auth_token.get('ip')}. IP passed to function: {ip}"
log.error(msg, exc_info=True)
raise Exception(msg)
if (
auth_token.get("valid_before") < current_time
or auth_token.get("valid_after") > current_time
):
stats.count("validate_auth_token.expiration_error")
msg = (
f"Auth token has expired. valid_before: {auth_token.get('valid_before')}. "
f"valid_after: {auth_token.get('valid_after')}. Current_time: {current_time}"
)
log.error(msg, exc_info=True)
raise Exception(msg)
to_verify = (
"{{'user': '{0}', 'ip': '{1}', 'challenge_uuid'': '{2}', "
"'valid_before'': '{3}', 'valid_after'': '{4}'}}"
).format(
auth_token.get("user"),
auth_token.get("ip"),
auth_token.get("challenge_uuid"),
auth_token.get("valid_before"),
auth_token.get("valid_after"),
)
token_details = {
"valid": crypto.verify(to_verify, auth_token.get("sig")),
"user": auth_token.get("user"),
}
return token_details | null |
162,157 | import base64
import sys
import time
from typing import Any, Callable, Dict, List, Optional, Union
import jsonschema
import jwt
import ujson as json
from cryptography.hazmat.backends.openssl.rsa import _RSAPublicKey
from consoleme.config import config
from consoleme.lib.crypto import Crypto
from consoleme.lib.generic import is_in_group
from consoleme.lib.plugins import get_plugin_by_name
def can_admin_all(user: str, user_groups: List[str]):
application_admin = config.get("application_admin")
if application_admin:
if user == application_admin or application_admin in user_groups:
return True
if is_in_group(
user,
user_groups,
[
*config.get("groups.can_admin", []),
*config.get("dynamic_config.groups.can_admin", []),
],
):
return True
return False
def is_in_group(
user: str, user_groups: List[str], required_groups: Union[List[str], str]
) -> bool:
if isinstance(required_groups, str):
required_groups = [required_groups]
for group in required_groups:
if group in user_groups or user == group:
return True
return False
def can_create_roles(
user: str,
user_groups: List[str],
) -> bool:
if can_admin_all(user, user_groups):
return True
if is_in_group(
user,
user_groups,
[
*config.get("groups.can_create_roles", []),
*config.get("dynamic_config.groups.can_create_roles", []),
],
):
return True
return False | null |
162,158 | import base64
import sys
import time
from typing import Any, Callable, Dict, List, Optional, Union
import jsonschema
import jwt
import ujson as json
from cryptography.hazmat.backends.openssl.rsa import _RSAPublicKey
from consoleme.config import config
from consoleme.lib.crypto import Crypto
from consoleme.lib.generic import is_in_group
from consoleme.lib.plugins import get_plugin_by_name
def can_delete_iam_principals_app(app_name):
if app_name in [
*config.get("groups.can_delete_roles_apps", []),
*config.get("dynamic_config.groups.can_delete_roles_apps", []),
]:
return True
return False | null |
162,159 | import base64
import sys
import time
from typing import Any, Callable, Dict, List, Optional, Union
import jsonschema
import jwt
import ujson as json
from cryptography.hazmat.backends.openssl.rsa import _RSAPublicKey
from consoleme.config import config
from consoleme.lib.crypto import Crypto
from consoleme.lib.generic import is_in_group
from consoleme.lib.plugins import get_plugin_by_name
def can_admin_all(user: str, user_groups: List[str]):
application_admin = config.get("application_admin")
if application_admin:
if user == application_admin or application_admin in user_groups:
return True
if is_in_group(
user,
user_groups,
[
*config.get("groups.can_admin", []),
*config.get("dynamic_config.groups.can_admin", []),
],
):
return True
return False
def is_in_group(
user: str, user_groups: List[str], required_groups: Union[List[str], str]
) -> bool:
if isinstance(required_groups, str):
required_groups = [required_groups]
for group in required_groups:
if group in user_groups or user == group:
return True
return False
def can_delete_iam_principals(
user: str,
user_groups: List[str],
) -> bool:
if can_admin_all(user, user_groups):
return True
if is_in_group(
user,
user_groups,
[
# TODO: Officially deprecate groups.can_delete_roles config key
*config.get("groups.can_delete_roles", []),
# TODO: Officially deprecate dynamic_config.groups.can_delete_roles config key
*config.get("dynamic_config.groups.can_delete_roles", []),
*config.get("groups.can_delete_iam_principals", []),
*config.get("dynamic_config.groups.can_delete_iam_principals", []),
],
):
return True
return False | null |
162,160 | import base64
import sys
import time
from typing import Any, Callable, Dict, List, Optional, Union
import jsonschema
import jwt
import ujson as json
from cryptography.hazmat.backends.openssl.rsa import _RSAPublicKey
from consoleme.config import config
from consoleme.lib.crypto import Crypto
from consoleme.lib.generic import is_in_group
from consoleme.lib.plugins import get_plugin_by_name
def can_admin_all(user: str, user_groups: List[str]):
application_admin = config.get("application_admin")
if application_admin:
if user == application_admin or application_admin in user_groups:
return True
if is_in_group(
user,
user_groups,
[
*config.get("groups.can_admin", []),
*config.get("dynamic_config.groups.can_admin", []),
],
):
return True
return False
def is_in_group(
user: str, user_groups: List[str], required_groups: Union[List[str], str]
) -> bool:
if isinstance(required_groups, str):
required_groups = [required_groups]
for group in required_groups:
if group in user_groups or user == group:
return True
return False
def can_edit_dynamic_config(
user: str,
user_groups: List[str],
) -> bool:
if can_admin_all(user, user_groups):
return True
if is_in_group(
user,
user_groups,
[
*config.get("groups.can_edit_config", []),
*config.get("dynamic_config.groups.can_edit_config", []),
],
):
return True
return False | null |
162,161 | import base64
import sys
import time
from typing import Any, Callable, Dict, List, Optional, Union
import jsonschema
import jwt
import ujson as json
from cryptography.hazmat.backends.openssl.rsa import _RSAPublicKey
from consoleme.config import config
from consoleme.lib.crypto import Crypto
from consoleme.lib.generic import is_in_group
from consoleme.lib.plugins import get_plugin_by_name
def can_admin_all(user: str, user_groups: List[str]):
application_admin = config.get("application_admin")
if application_admin:
if user == application_admin or application_admin in user_groups:
return True
if is_in_group(
user,
user_groups,
[
*config.get("groups.can_admin", []),
*config.get("dynamic_config.groups.can_admin", []),
],
):
return True
return False
def is_in_group(
user: str, user_groups: List[str], required_groups: Union[List[str], str]
) -> bool:
if isinstance(required_groups, str):
required_groups = [required_groups]
for group in required_groups:
if group in user_groups or user == group:
return True
return False
def can_edit_attributes(
user: str, user_groups: List[str], group_info: Optional[Any]
) -> bool:
if can_admin_all(user, user_groups):
return True
if is_in_group(
user,
user_groups,
[
*config.get("groups.can_admin_restricted", []),
*config.get("dynamic_config.groups.can_admin_restricted", []),
],
):
return True
if is_in_group(
user,
user_groups,
[
*config.get("groups.can_edit_attributes", []),
*config.get("dynamic_config.groups.can_edit_attributes", []),
],
):
return True
return False | null |
162,162 | import base64
import sys
import time
from typing import Any, Callable, Dict, List, Optional, Union
import jsonschema
import jwt
import ujson as json
from cryptography.hazmat.backends.openssl.rsa import _RSAPublicKey
from consoleme.config import config
from consoleme.lib.crypto import Crypto
from consoleme.lib.generic import is_in_group
from consoleme.lib.plugins import get_plugin_by_name
def can_admin_all(user: str, user_groups: List[str]):
application_admin = config.get("application_admin")
if application_admin:
if user == application_admin or application_admin in user_groups:
return True
if is_in_group(
user,
user_groups,
[
*config.get("groups.can_admin", []),
*config.get("dynamic_config.groups.can_admin", []),
],
):
return True
return False
def is_in_group(
user: str, user_groups: List[str], required_groups: Union[List[str], str]
) -> bool:
if isinstance(required_groups, str):
required_groups = [required_groups]
for group in required_groups:
if group in user_groups or user == group:
return True
return False
def can_edit_sensitive_attributes(
user: str, user_groups: List[str], group_info: Optional[Any]
) -> bool:
if can_admin_all(user, user_groups):
return True
if is_in_group(
user,
user_groups,
[
*config.get("groups.can_edit_sensitive_attributes", []),
*config.get("dynamic_config.groups.can_edit_sensitive_attributes", []),
],
):
return True
return False | null |
162,163 | import base64
import sys
import time
from typing import Any, Callable, Dict, List, Optional, Union
import jsonschema
import jwt
import ujson as json
from cryptography.hazmat.backends.openssl.rsa import _RSAPublicKey
from consoleme.config import config
from consoleme.lib.crypto import Crypto
from consoleme.lib.generic import is_in_group
from consoleme.lib.plugins import get_plugin_by_name
def is_sensitive_attr(attribute):
for attr in [
*config.get("groups.attributes.boolean", []),
*config.get("dynamic_config.groups.attributes.boolean", []),
]:
if attr.get("name") == attribute:
return attr.get("sensitive", False)
for attr in [
*config.get("groups.attributes.list", []),
*config.get("dynamic_config.groups.attributes.list", []),
]:
if attr.get("name") == attribute:
return attr.get("sensitive", False)
return False | null |
162,164 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
async def needs_updating(existing_policy, new_policy):
diff = DeepDiff(
existing_policy, new_policy, ignore_order=True, report_repetition=True
)
if diff:
return True
return False | null |
162,165 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
ALL_IAM_MANAGED_POLICIES: dict = {}
ALL_IAM_MANAGED_POLICIES_LAST_UPDATE: int = 0
red = RedisHandler().redis_sync()
async def retrieve_json_data_from_redis_or_s3(
redis_key: str = None,
redis_data_type: str = "str",
s3_bucket: str = None,
s3_key: str = None,
cache_to_redis_if_data_in_s3: bool = True,
max_age: Optional[int] = None,
default: Optional = None,
json_object_hook: Optional = None,
json_encoder: Optional = None,
):
"""
Retrieve data from Redis as a priority. If data is unavailable in Redis, fall back to S3 and attempt to store
data in Redis for quicker retrieval later.
:param redis_data_type: "str" or "hash", depending on how the data is stored in Redis
:param redis_key: Redis Key to retrieve data from
:param s3_bucket: S3 bucket to retrieve data from
:param s3_key: S3 key to retrieve data from
:param cache_to_redis_if_data_in_s3: Cache the data in Redis if the data is in S3 but not Redis
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
last_updated_redis_key = config.get(
"store_json_results_in_redis_and_s3.last_updated_redis_key",
"STORE_JSON_RESULTS_IN_REDIS_AND_S3_LAST_UPDATED",
)
stats.count(
f"{function}.called",
tags={"redis_key": redis_key, "s3_bucket": s3_bucket, "s3_key": s3_key},
)
# If we've defined an S3 key, but not a bucket, let's use the default bucket if it's defined in configuration.
if s3_key and not s3_bucket:
s3_bucket = config.get("consoleme_s3_bucket")
data = None
if redis_key:
if redis_data_type == "str":
data_s = red.get(redis_key)
if data_s:
data = json.loads(data_s, object_hook=json_object_hook)
elif redis_data_type == "hash":
data = red.hgetall(redis_key)
else:
raise UnsupportedRedisDataType("Unsupported redis_data_type passed")
if data and max_age:
current_time = int(time.time())
last_updated = int(red.hget(last_updated_redis_key, redis_key))
if current_time - last_updated > max_age:
data = None
# Fall back to S3 if expired.
if not s3_bucket or not s3_key:
raise ExpiredData(f"Data in Redis is older than {max_age} seconds.")
# Fall back to S3 if there's no data
if not data and s3_bucket and s3_key:
try:
s3_object = get_object(Bucket=s3_bucket, Key=s3_key)
except ClientError as e:
if str(e) == (
"An error occurred (NoSuchKey) when calling the GetObject operation: "
"The specified key does not exist."
):
if default is not None:
return default
raise
s3_object_content = await sync_to_async(s3_object["Body"].read)()
if s3_key.endswith(".gz"):
s3_object_content = gzip.decompress(s3_object_content)
data_object = json.loads(s3_object_content, object_hook=json_object_hook)
data = data_object["data"]
if data and max_age:
current_time = int(time.time())
last_updated = data_object["last_updated"]
if current_time - last_updated > max_age:
raise ExpiredData(f"Data in S3 is older than {max_age} seconds.")
if redis_key and cache_to_redis_if_data_in_s3:
await store_json_results_in_redis_and_s3(
data,
redis_key=redis_key,
redis_data_type=redis_data_type,
json_encoder=json_encoder,
)
if data is not None:
return data
if default is not None:
return default
raise DataNotRetrievable("Unable to retrieve expected data.")
class RedisHandler:
def __init__(
self,
host: str = config.get(
"redis.host.{}".format(region), config.get("redis.host.global", "localhost")
),
port: int = config.get("redis.port", 6379),
db: int = config.get("redis.db", 0),
) -> None:
self.red = None
self.host = host
self.port = port
self.db = db
self.enabled = True
if self.host is None or self.port is None or self.db is None:
self.enabled = False
async def redis(self, db: int = 0) -> Redis:
self.red = await sync_to_async(ConsoleMeRedis)(
host=self.host,
port=self.port,
db=self.db,
charset="utf-8",
decode_responses=True,
)
return self.red
def redis_sync(self, db: int = 0) -> Redis:
self.red = ConsoleMeRedis(
host=self.host,
port=self.port,
db=self.db,
charset="utf-8",
decode_responses=True,
)
return self.red
async def get_all_iam_managed_policies_for_account(account_id):
global ALL_IAM_MANAGED_POLICIES_LAST_UPDATE
global ALL_IAM_MANAGED_POLICIES
policy_key: str = config.get(
"redis.iam_managed_policies_key", "IAM_MANAGED_POLICIES"
)
current_time = time.time()
if current_time - ALL_IAM_MANAGED_POLICIES_LAST_UPDATE > 500:
red = await RedisHandler().redis()
ALL_IAM_MANAGED_POLICIES = await sync_to_async(red.hgetall)(policy_key)
ALL_IAM_MANAGED_POLICIES_LAST_UPDATE = current_time
if ALL_IAM_MANAGED_POLICIES:
return json.loads(ALL_IAM_MANAGED_POLICIES.get(account_id, "[]"))
else:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get(
"account_resource_cache.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="managed_policies", account_id=account_id)
return await retrieve_json_data_from_redis_or_s3(
s3_bucket=s3_bucket, s3_key=s3_key, default=[]
) | null |
162,166 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
async def get_resource_policy(account: str, resource_type: str, name: str, region: str):
async def generate_updated_resource_policy(
existing: Dict,
principal_arn: str,
resource_arns: List[str],
actions: List[str],
include_resources: bool = True,
) -> Dict:
async def get_resource_policies(
principal_arn: str, resource_actions: Dict[str, Dict[str, Any]], account: str
) -> Tuple[List[Dict], bool]:
resource_policies: List[Dict] = []
cross_account_request: bool = False
for resource_name, resource_info in resource_actions.items():
resource_account: str = resource_info.get("account", "")
if resource_account and resource_account != account:
# This is a cross-account request. Might need a resource policy.
cross_account_request = True
resource_type: str = resource_info.get("type", "")
resource_region: str = resource_info.get("region", "")
old_policy = await get_resource_policy(
resource_account, resource_type, resource_name, resource_region
)
arns = resource_info.get("arns", [])
actions = resource_info.get("actions", [])
new_policy = await generate_updated_resource_policy(
old_policy, principal_arn, arns, actions
)
result = {
"resource": resource_name,
"account": resource_account,
"type": resource_type,
"region": resource_region,
"policy_document": new_policy,
}
resource_policies.append(result)
return resource_policies, cross_account_request | null |
162,167 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
async def fetch_role_details(account_id, role_name):
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Attempting to fetch role details",
"account": account_id,
"role": role_name,
}
log.info(log_data)
iam_resource = await sync_to_async(boto3_cached_conn)(
"iam",
service_type="resource",
account_number=account_id,
region=config.region,
assume_role=config.get("policies.role_name"),
session_name="fetch_role_details",
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
try:
iam_role = await sync_to_async(iam_resource.Role)(role_name)
except ClientError as ce:
if ce.response["Error"]["Code"] == "NoSuchEntity":
log_data["message"] = "Requested role doesn't exist"
log.error(log_data)
raise
await sync_to_async(iam_role.load)()
return iam_role
async def fetch_assume_role_policy(role_arn: str) -> Optional[Dict]:
account_id = role_arn.split(":")[4]
role_name = role_arn.split("/")[-1]
try:
role = await fetch_role_details(account_id, role_name)
except ClientError:
# Role is most likely on an account that we do not have access to
sentry_sdk.capture_exception()
return None
return role.assume_role_policy_document | null |
162,168 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
log = config.get_logger(__name__)
auth = get_plugin_by_name(config.get("plugins.auth", "default_auth"))()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
class BackgroundCheckNotPassedException(BaseException):
def __init__(self, msg=""):
async def raise_if_background_check_required_and_no_background_check(role, user):
for compliance_account_id in config.get("aws.compliance_account_ids", []):
if compliance_account_id == role.split(":")[4]:
user_info = await auth.get_user_info(user, object=True)
if not user_info.passed_background_check:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data: dict = {
"function": function,
"user": user,
"role": role,
"message": "User trying to access SEG role without background check",
}
log.error(log_data)
stats.count(
f"{function}.access_denied_background_check_not_passed",
tags={"function": function, "user": user, "role": role},
)
raise BackgroundCheckNotPassedException(
config.get(
"aws.background_check_not_passed",
"You must have passed a background check to access role "
"{role}.",
).format(role=role)
) | null |
162,169 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
log = config.get_logger(__name__)
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
The provided code snippet includes necessary dependencies for implementing the `apply_managed_policy_to_role` function. Write a Python function `def apply_managed_policy_to_role( role: Dict, policy_name: str, session_name: str ) -> bool` to solve the following problem:
Apply a managed policy to a role. :param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call) :param policy_name: Name of managed policy to add to role :param session_name: Name of session to assume role with. This is an identifier that will be logged in CloudTrail :return:
Here is the function:
def apply_managed_policy_to_role(
role: Dict, policy_name: str, session_name: str
) -> bool:
"""
Apply a managed policy to a role.
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:param policy_name: Name of managed policy to add to role
:param session_name: Name of session to assume role with. This is an identifier that will be logged in CloudTrail
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {
"function": function,
"role": role,
"policy_name": policy_name,
"session_name": session_name,
}
account_id = role.get("Arn").split(":")[4]
policy_arn = f"arn:aws:iam::{account_id}:policy/{policy_name}"
client = boto3_cached_conn(
"iam",
account_number=account_id,
assume_role=config.get("policies.role_name"),
session_name=session_name,
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
client.attach_role_policy(RoleName=role.get("RoleName"), PolicyArn=policy_arn)
log_data["message"] = "Applied managed policy to role"
log.debug(log_data)
stats.count(
f"{function}.attach_role_policy",
tags={"role": role.get("Arn"), "policy": policy_arn},
)
return True | Apply a managed policy to a role. :param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call) :param policy_name: Name of managed policy to add to role :param session_name: Name of session to assume role with. This is an identifier that will be logged in CloudTrail :return: |
162,170 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
log = config.get_logger(__name__)
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
async def fetch_iam_user_details(account_id, iam_user_name):
"""
Fetches details about an IAM user from AWS. If `policies.role_name` configuration
is set, the hub (central) account ConsoleMeInstanceProfile role will assume the
configured role to perform the action.
:param account_id: account ID
:param iam_user_name: IAM user name
:return: iam_user resource
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Attempting to fetch role details",
"account": account_id,
"iam_user_name": iam_user_name,
}
log.info(log_data)
iam_resource = await sync_to_async(boto3_cached_conn)(
"iam",
service_type="resource",
account_number=account_id,
region=config.region,
assume_role=config.get("policies.role_name"),
session_name="fetch_iam_user_details",
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
try:
iam_user = await sync_to_async(iam_resource.User)(iam_user_name)
except ClientError as ce:
if ce.response["Error"]["Code"] == "NoSuchEntity":
log_data["message"] = "Requested user doesn't exist"
log.error(log_data)
raise
await sync_to_async(iam_user.load)()
return iam_user
The provided code snippet includes necessary dependencies for implementing the `delete_iam_user` function. Write a Python function `async def delete_iam_user(account_id, iam_user_name, username) -> bool` to solve the following problem:
This function assumes the user has already been pre-authorized to delete an IAM user. it will detach all managed policies, delete all inline policies, delete all access keys, and finally delete the IAM user. :param account_id: Account ID that the IAM user is on :param iam_user_name: name of IAM user to delete :param username: actor's username :return:
Here is the function:
async def delete_iam_user(account_id, iam_user_name, username) -> bool:
"""
This function assumes the user has already been pre-authorized to delete an IAM user. it will detach all managed
policies, delete all inline policies, delete all access keys, and finally delete the IAM user.
:param account_id: Account ID that the IAM user is on
:param iam_user_name: name of IAM user to delete
:param username: actor's username
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Attempting to delete role",
"account_id": account_id,
"iam_user_name": iam_user_name,
"user": username,
}
log.info(log_data)
iam_user = await fetch_iam_user_details(account_id, iam_user_name)
# Detach managed policies
for policy in await sync_to_async(iam_user.attached_policies.all)():
await sync_to_async(policy.load)()
log.info(
{
**log_data,
"message": "Detaching managed policy from user",
"policy_arn": policy.arn,
}
)
await sync_to_async(policy.detach_user)(UserName=iam_user)
# Delete Inline policies
for policy in await sync_to_async(iam_user.policies.all)():
await sync_to_async(policy.load)()
log.info(
{
**log_data,
"message": "Deleting inline policy on user",
"policy_name": policy.name,
}
)
await sync_to_async(policy.delete)()
log.info({**log_data, "message": "Performing access key deletion"})
access_keys = iam_user.access_keys.all()
for access_key in access_keys:
access_key.delete()
log.info({**log_data, "message": "Performing user deletion"})
await sync_to_async(iam_user.delete)()
stats.count(
f"{log_data['function']}.success", tags={"iam_user_name": iam_user_name}
)
return True | This function assumes the user has already been pre-authorized to delete an IAM user. it will detach all managed policies, delete all inline policies, delete all access keys, and finally delete the IAM user. :param account_id: Account ID that the IAM user is on :param iam_user_name: name of IAM user to delete :param username: actor's username :return: |
162,171 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
log = config.get_logger(__name__)
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
async def fetch_role_details(account_id, role_name):
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Attempting to fetch role details",
"account": account_id,
"role": role_name,
}
log.info(log_data)
iam_resource = await sync_to_async(boto3_cached_conn)(
"iam",
service_type="resource",
account_number=account_id,
region=config.region,
assume_role=config.get("policies.role_name"),
session_name="fetch_role_details",
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
try:
iam_role = await sync_to_async(iam_resource.Role)(role_name)
except ClientError as ce:
if ce.response["Error"]["Code"] == "NoSuchEntity":
log_data["message"] = "Requested role doesn't exist"
log.error(log_data)
raise
await sync_to_async(iam_role.load)()
return iam_role
async def delete_iam_role(account_id, role_name, username) -> bool:
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Attempting to delete role",
"account_id": account_id,
"role_name": role_name,
"user": username,
}
log.info(log_data)
role = await fetch_role_details(account_id, role_name)
for instance_profile in await sync_to_async(role.instance_profiles.all)():
await sync_to_async(instance_profile.load)()
log.info(
{
**log_data,
"message": "Removing and deleting instance profile from role",
"instance_profile": instance_profile.name,
}
)
await sync_to_async(instance_profile.remove_role)(RoleName=role.name)
await sync_to_async(instance_profile.delete)()
# Detach managed policies
for policy in await sync_to_async(role.attached_policies.all)():
await sync_to_async(policy.load)()
log.info(
{
**log_data,
"message": "Detaching managed policy from role",
"policy_arn": policy.arn,
}
)
await sync_to_async(policy.detach_role)(RoleName=role_name)
# Delete Inline policies
for policy in await sync_to_async(role.policies.all)():
await sync_to_async(policy.load)()
log.info(
{
**log_data,
"message": "Deleting inline policy on role",
"policy_name": policy.name,
}
)
await sync_to_async(policy.delete)()
log.info({**log_data, "message": "Performing role deletion"})
await sync_to_async(role.delete)()
stats.count(f"{log_data['function']}.success", tags={"role_name": role_name}) | null |
162,172 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
log = config.get_logger(__name__)
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def sanitize_session_name(unsanitized_session_name):
"""
This function sanitizes the session name typically passed in an assume_role call, to verify that it's
"""
valid_characters_re = re.compile(r"[\w+=,.@-]")
sanitized_session_name = ""
max_length = 64 # Session names have a length limit of 64 characters
for char in unsanitized_session_name:
if len(sanitized_session_name) == max_length:
break
if valid_characters_re.match(char):
sanitized_session_name += char
return sanitized_session_name
class MissingConfigurationValue(BaseException):
"""Unable to find expected configuration value"""
def __init__(self, msg=""):
stats.count("MissingConfigurationValue")
super().__init__(msg)
def get_plugin_by_name(plugin_name: str) -> Any:
if global_plugins.get(plugin_name):
return global_plugins[plugin_name]
plugins = []
for ep in pkg_resources.iter_entry_points("consoleme.plugins"):
plugins.append(ep.name)
if ep.name == plugin_name:
global_plugins[ep.name] = ep.load()
return global_plugins[ep.name]
initial_exception_message = f"Could not find the specified plugin: {plugin_name}. "
if plugin_name == "default_config":
initial_exception_message = (
f"Could not find the specified plugin: {plugin_name}. "
"Please install it with `pip install -e consoleme/default_plugins` "
"from the ConsoleMe base directory. "
)
exception_message = (
initial_exception_message + f"Plugins found: {', '.join(plugins)}. "
f"Make sure you've set the environment variable CONSOLEME_CONFIG_ENTRYPOINT to the name of your configuration "
f"entrypoint, otherwise it will default to `default_config`."
)
raise Exception(exception_message)
class RoleCreationRequestModel(BaseModel):
account_id: constr(min_length=12, max_length=12)
role_name: str
description: Optional[str] = None
instance_profile: Optional[bool] = True
The provided code snippet includes necessary dependencies for implementing the `create_iam_role` function. Write a Python function `async def create_iam_role(create_model: RoleCreationRequestModel, username)` to solve the following problem:
Creates IAM role. :param create_model: RoleCreationRequestModel, which has the following attributes: account_id: destination account's ID role_name: destination role name description: optional string - description of the role default: Role created by {username} through ConsoleMe instance_profile: optional boolean - whether to create an instance profile and attach it to the role or not default: True :param username: username of user requesting action :return: results: - indicating the results of each action
Here is the function:
async def create_iam_role(create_model: RoleCreationRequestModel, username):
"""
Creates IAM role.
:param create_model: RoleCreationRequestModel, which has the following attributes:
account_id: destination account's ID
role_name: destination role name
description: optional string - description of the role
default: Role created by {username} through ConsoleMe
instance_profile: optional boolean - whether to create an instance profile and attach it to the role or not
default: True
:param username: username of user requesting action
:return: results: - indicating the results of each action
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Attempting to create role",
"account_id": create_model.account_id,
"role_name": create_model.role_name,
"user": username,
}
log.info(log_data)
default_trust_policy = config.get(
"user_role_creator.default_trust_policy",
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole",
}
],
},
)
if default_trust_policy is None:
raise MissingConfigurationValue(
"Missing Default Assume Role Policy Configuration"
)
default_max_session_duration = config.get(
"user_role_creator.default_max_session_duration", 3600
)
if create_model.description:
description = create_model.description
else:
description = f"Role created by {username} through ConsoleMe"
iam_client = await sync_to_async(boto3_cached_conn)(
"iam",
service_type="client",
account_number=create_model.account_id,
region=config.region,
assume_role=config.get("policies.role_name"),
session_name=sanitize_session_name("create_role_" + username),
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
results = {"errors": 0, "role_created": "false", "action_results": []}
try:
await sync_to_async(iam_client.create_role)(
RoleName=create_model.role_name,
AssumeRolePolicyDocument=json.dumps(default_trust_policy),
Description=description,
MaxSessionDuration=default_max_session_duration,
Tags=[],
)
results["action_results"].append(
{
"status": "success",
"message": f"Role arn:aws:iam::{create_model.account_id}:role/{create_model.role_name} "
f"successfully created",
}
)
results["role_created"] = "true"
except Exception as e:
log_data["message"] = "Exception occurred creating role"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
results["action_results"].append(
{
"status": "error",
"message": f"Error creating role {create_model.role_name} in account {create_model.account_id}:"
+ str(e),
}
)
results["errors"] += 1
sentry_sdk.capture_exception()
# Since we were unable to create the role, no point continuing, just return
return results
# If here, role has been successfully created, add status updates for each action
results["action_results"].append(
{
"status": "success",
"message": "Successfully added default Assume Role Policy Document",
}
)
results["action_results"].append(
{
"status": "success",
"message": "Successfully added description: " + description,
}
)
# Create instance profile and attach if specified
if create_model.instance_profile:
try:
await sync_to_async(iam_client.create_instance_profile)(
InstanceProfileName=create_model.role_name
)
await sync_to_async(iam_client.add_role_to_instance_profile)(
InstanceProfileName=create_model.role_name,
RoleName=create_model.role_name,
)
results["action_results"].append(
{
"status": "success",
"message": f"Successfully added instance profile {create_model.role_name} to role "
f"{create_model.role_name}",
}
)
except Exception as e:
log_data[
"message"
] = "Exception occurred creating/attaching instance profile"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
results["action_results"].append(
{
"status": "error",
"message": f"Error creating/attaching instance profile {create_model.role_name} to role: "
+ str(e),
}
)
results["errors"] += 1
stats.count(
f"{log_data['function']}.success", tags={"role_name": create_model.role_name}
)
log_data["message"] = "Successfully created role"
log.info(log_data)
# Force caching of role
try:
aws = get_plugin_by_name(config.get("plugins.aws", "default_aws"))()
role_arn = (
f"arn:aws:iam::{create_model.account_id}:role/{create_model.role_name}"
)
await aws.fetch_iam_role(create_model.account_id, role_arn, force_refresh=True)
except Exception as e:
log.error({**log_data, "message": "Unable to cache role", "error": str(e)})
sentry_sdk.capture_exception()
return results | Creates IAM role. :param create_model: RoleCreationRequestModel, which has the following attributes: account_id: destination account's ID role_name: destination role name description: optional string - description of the role default: Role created by {username} through ConsoleMe instance_profile: optional boolean - whether to create an instance profile and attach it to the role or not default: True :param username: username of user requesting action :return: results: - indicating the results of each action |
162,173 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
log = config.get_logger(__name__)
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
async def fetch_role_details(account_id, role_name):
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Attempting to fetch role details",
"account": account_id,
"role": role_name,
}
log.info(log_data)
iam_resource = await sync_to_async(boto3_cached_conn)(
"iam",
service_type="resource",
account_number=account_id,
region=config.region,
assume_role=config.get("policies.role_name"),
session_name="fetch_role_details",
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
try:
iam_role = await sync_to_async(iam_resource.Role)(role_name)
except ClientError as ce:
if ce.response["Error"]["Code"] == "NoSuchEntity":
log_data["message"] = "Requested role doesn't exist"
log.error(log_data)
raise
await sync_to_async(iam_role.load)()
return iam_role
def sanitize_session_name(unsanitized_session_name):
"""
This function sanitizes the session name typically passed in an assume_role call, to verify that it's
"""
valid_characters_re = re.compile(r"[\w+=,.@-]")
sanitized_session_name = ""
max_length = 64 # Session names have a length limit of 64 characters
for char in unsanitized_session_name:
if len(sanitized_session_name) == max_length:
break
if valid_characters_re.match(char):
sanitized_session_name += char
return sanitized_session_name
class MissingConfigurationValue(BaseException):
"""Unable to find expected configuration value"""
def __init__(self, msg=""):
stats.count("MissingConfigurationValue")
super().__init__(msg)
class CloneRoleRequestModel(BaseModel):
account_id: constr(min_length=12, max_length=12)
role_name: str
dest_account_id: constr(min_length=12, max_length=12)
dest_role_name: str
options: Options
The provided code snippet includes necessary dependencies for implementing the `clone_iam_role` function. Write a Python function `async def clone_iam_role(clone_model: CloneRoleRequestModel, username)` to solve the following problem:
Clones IAM role within same account or across account, always creating and attaching instance profile if one exists on the source role. ;param username: username of user requesting action ;:param clone_model: CloneRoleRequestModel, which has the following attributes: account_id: source role's account ID role_name: source role's name dest_account_id: destination role's account ID (may be same as account_id) dest_role_name: destination role's name clone_options: dict to indicate what to copy when cloning: assume_role_policy: bool default: False - uses default ConsoleMe AssumeRolePolicy tags: bool default: False - defaults to no tags copy_description: bool default: False - defaults to copying provided description or default description description: string default: "Role cloned via ConsoleMe by `username` from `arn:aws:iam::<account_id>:role/<role_name>` if copy_description is True, then description is ignored inline_policies: bool default: False - defaults to no inline policies managed_policies: bool default: False - defaults to no managed policies :return: results: - indicating the results of each action
Here is the function:
async def clone_iam_role(clone_model: CloneRoleRequestModel, username):
"""
Clones IAM role within same account or across account, always creating and attaching instance profile if one exists
on the source role.
;param username: username of user requesting action
;:param clone_model: CloneRoleRequestModel, which has the following attributes:
account_id: source role's account ID
role_name: source role's name
dest_account_id: destination role's account ID (may be same as account_id)
dest_role_name: destination role's name
clone_options: dict to indicate what to copy when cloning:
assume_role_policy: bool
default: False - uses default ConsoleMe AssumeRolePolicy
tags: bool
default: False - defaults to no tags
copy_description: bool
default: False - defaults to copying provided description or default description
description: string
default: "Role cloned via ConsoleMe by `username` from `arn:aws:iam::<account_id>:role/<role_name>`
if copy_description is True, then description is ignored
inline_policies: bool
default: False - defaults to no inline policies
managed_policies: bool
default: False - defaults to no managed policies
:return: results: - indicating the results of each action
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Attempting to clone role",
"account_id": clone_model.account_id,
"role_name": clone_model.role_name,
"dest_account_id": clone_model.dest_account_id,
"dest_role_name": clone_model.dest_role_name,
"user": username,
}
log.info(log_data)
role = await fetch_role_details(clone_model.account_id, clone_model.role_name)
default_trust_policy = config.get("user_role_creator.default_trust_policy")
trust_policy = (
role.assume_role_policy_document
if clone_model.options.assume_role_policy
else default_trust_policy
)
if trust_policy is None:
raise MissingConfigurationValue(
"Missing Default Assume Role Policy Configuration"
)
default_max_session_duration = config.get(
"user_role_creator.default_max_session_duration", 3600
)
max_session_duration = (
role.max_session_duration
if clone_model.options.max_session_duration
else default_max_session_duration
)
if (
clone_model.options.copy_description
and role.description is not None
and role.description != ""
):
description = role.description
elif (
clone_model.options.description is not None
and clone_model.options.description != ""
):
description = clone_model.options.description
else:
description = f"Role cloned via ConsoleMe by {username} from {role.arn}"
tags = role.tags if clone_model.options.tags and role.tags else []
iam_client = await sync_to_async(boto3_cached_conn)(
"iam",
service_type="client",
account_number=clone_model.dest_account_id,
region=config.region,
assume_role=config.get("policies.role_name"),
session_name=sanitize_session_name("clone_role_" + username),
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
results = {"errors": 0, "role_created": "false", "action_results": []}
try:
await sync_to_async(iam_client.create_role)(
RoleName=clone_model.dest_role_name,
AssumeRolePolicyDocument=json.dumps(trust_policy),
Description=description,
MaxSessionDuration=max_session_duration,
Tags=tags,
)
results["action_results"].append(
{
"status": "success",
"message": f"Role arn:aws:iam::{clone_model.dest_account_id}:role/{clone_model.dest_role_name} "
f"successfully created",
}
)
results["role_created"] = "true"
except Exception as e:
log_data["message"] = "Exception occurred creating cloned role"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
results["action_results"].append(
{
"status": "error",
"message": f"Error creating role {clone_model.dest_role_name} in account {clone_model.dest_account_id}:"
+ str(e),
}
)
results["errors"] += 1
sentry_sdk.capture_exception()
# Since we were unable to create the role, no point continuing, just return
return results
if clone_model.options.tags:
results["action_results"].append(
{"status": "success", "message": "Successfully copied tags"}
)
if clone_model.options.assume_role_policy:
results["action_results"].append(
{
"status": "success",
"message": "Successfully copied Assume Role Policy Document",
}
)
else:
results["action_results"].append(
{
"status": "success",
"message": "Successfully added default Assume Role Policy Document",
}
)
if (
clone_model.options.copy_description
and role.description is not None
and role.description != ""
):
results["action_results"].append(
{"status": "success", "message": "Successfully copied description"}
)
elif clone_model.options.copy_description:
results["action_results"].append(
{
"status": "error",
"message": "Failed to copy description, so added default description: "
+ description,
}
)
else:
results["action_results"].append(
{
"status": "success",
"message": "Successfully added description: " + description,
}
)
# Create instance profile and attach if it exists in source role
if len(list(await sync_to_async(role.instance_profiles.all)())) > 0:
try:
await sync_to_async(iam_client.create_instance_profile)(
InstanceProfileName=clone_model.dest_role_name
)
await sync_to_async(iam_client.add_role_to_instance_profile)(
InstanceProfileName=clone_model.dest_role_name,
RoleName=clone_model.dest_role_name,
)
results["action_results"].append(
{
"status": "success",
"message": f"Successfully added instance profile {clone_model.dest_role_name} to role "
f"{clone_model.dest_role_name}",
}
)
except Exception as e:
log_data[
"message"
] = "Exception occurred creating/attaching instance profile"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
results["action_results"].append(
{
"status": "error",
"message": f"Error creating/attaching instance profile {clone_model.dest_role_name} to role: "
+ str(e),
}
)
results["errors"] += 1
# other optional attributes to copy over after role has been successfully created
cloned_role = await fetch_role_details(
clone_model.dest_account_id, clone_model.dest_role_name
)
# Copy inline policies
if clone_model.options.inline_policies:
for src_policy in await sync_to_async(role.policies.all)():
await sync_to_async(src_policy.load)()
try:
dest_policy = await sync_to_async(cloned_role.Policy)(src_policy.name)
await sync_to_async(dest_policy.put)(
PolicyDocument=json.dumps(src_policy.policy_document)
)
results["action_results"].append(
{
"status": "success",
"message": f"Successfully copied inline policy {src_policy.name}",
}
)
except Exception as e:
log_data["message"] = "Exception occurred copying inline policy"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
results["action_results"].append(
{
"status": "error",
"message": f"Error copying inline policy {src_policy.name}: "
+ str(e),
}
)
results["errors"] += 1
# Copy managed policies
if clone_model.options.managed_policies:
for src_policy in await sync_to_async(role.attached_policies.all)():
await sync_to_async(src_policy.load)()
dest_policy_arn = src_policy.arn.replace(
clone_model.account_id, clone_model.dest_account_id
)
try:
await sync_to_async(cloned_role.attach_policy)(
PolicyArn=dest_policy_arn
)
results["action_results"].append(
{
"status": "success",
"message": f"Successfully attached managed policy {src_policy.arn} as {dest_policy_arn}",
}
)
except Exception as e:
log_data["message"] = "Exception occurred copying managed policy"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
results["action_results"].append(
{
"status": "error",
"message": f"Error attaching managed policy {dest_policy_arn}: "
+ str(e),
}
)
results["errors"] += 1
stats.count(
f"{log_data['function']}.success", tags={"role_name": clone_model.role_name}
)
log_data["message"] = "Successfully cloned role"
log.info(log_data)
return results | Clones IAM role within same account or across account, always creating and attaching instance profile if one exists on the source role. ;param username: username of user requesting action ;:param clone_model: CloneRoleRequestModel, which has the following attributes: account_id: source role's account ID role_name: source role's name dest_account_id: destination role's account ID (may be same as account_id) dest_role_name: destination role's name clone_options: dict to indicate what to copy when cloning: assume_role_policy: bool default: False - uses default ConsoleMe AssumeRolePolicy tags: bool default: False - defaults to no tags copy_description: bool default: False - defaults to copying provided description or default description description: string default: "Role cloned via ConsoleMe by `username` from `arn:aws:iam::<account_id>:role/<role_name>` if copy_description is True, then description is ignored inline_policies: bool default: False - defaults to no inline policies managed_policies: bool default: False - defaults to no managed policies :return: results: - indicating the results of each action |
162,174 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
The provided code snippet includes necessary dependencies for implementing the `role_has_tag` function. Write a Python function `def role_has_tag(role: Dict, key: str, value: Optional[str] = None) -> bool` to solve the following problem:
Checks a role dictionary and determine of the role has the specified tag. If `value` is passed, This function will only return true if the tag's value matches the `value` variable. :param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call) :param key: key of the tag :param value: optional value of the tag :return:
Here is the function:
def role_has_tag(role: Dict, key: str, value: Optional[str] = None) -> bool:
"""
Checks a role dictionary and determine of the role has the specified tag. If `value` is passed,
This function will only return true if the tag's value matches the `value` variable.
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:param key: key of the tag
:param value: optional value of the tag
:return:
"""
for tag in role.get("Tags", []):
if tag.get("Key") == key:
if not value or tag.get("Value") == value:
return True
return False | Checks a role dictionary and determine of the role has the specified tag. If `value` is passed, This function will only return true if the tag's value matches the `value` variable. :param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call) :param key: key of the tag :param value: optional value of the tag :return: |
162,175 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
The provided code snippet includes necessary dependencies for implementing the `role_has_managed_policy` function. Write a Python function `def role_has_managed_policy(role: Dict, managed_policy_name: str) -> bool` to solve the following problem:
Checks a role dictionary to determine if a managed policy is attached :param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call) :param managed_policy_name: the name of the managed policy :return:
Here is the function:
def role_has_managed_policy(role: Dict, managed_policy_name: str) -> bool:
"""
Checks a role dictionary to determine if a managed policy is attached
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:param managed_policy_name: the name of the managed policy
:return:
"""
for managed_policy in role.get("AttachedManagedPolicies", []):
if managed_policy.get("PolicyName") == managed_policy_name:
return True
return False | Checks a role dictionary to determine if a managed policy is attached :param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call) :param managed_policy_name: the name of the managed policy :return: |
162,176 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
The provided code snippet includes necessary dependencies for implementing the `role_newer_than_x_days` function. Write a Python function `def role_newer_than_x_days(role: Dict, days: int) -> bool` to solve the following problem:
Checks a role dictionary to determine if it is newer than the specified number of days :param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call) :param days: number of days :return:
Here is the function:
def role_newer_than_x_days(role: Dict, days: int) -> bool:
"""
Checks a role dictionary to determine if it is newer than the specified number of days
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:param days: number of days
:return:
"""
if isinstance(role.get("CreateDate"), str):
role["CreateDate"] = parse(role.get("CreateDate"))
role_age = datetime.now(tz=pytz.utc) - role.get("CreateDate")
if role_age.days < days:
return True
return False | Checks a role dictionary to determine if it is newer than the specified number of days :param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call) :param days: number of days :return: |
162,177 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
The provided code snippet includes necessary dependencies for implementing the `is_role_instance_profile` function. Write a Python function `def is_role_instance_profile(role: Dict) -> bool` to solve the following problem:
Checks a role naively to determine if it is associate with an instance profile. We only check by name, and not the actual attached instance profiles. :param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call) :return:
Here is the function:
def is_role_instance_profile(role: Dict) -> bool:
"""
Checks a role naively to determine if it is associate with an instance profile.
We only check by name, and not the actual attached instance profiles.
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:return:
"""
return role.get("RoleName").endswith("InstanceProfile") | Checks a role naively to determine if it is associate with an instance profile. We only check by name, and not the actual attached instance profiles. :param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call) :return: |
162,178 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
async def access_analyzer_validate_policy(
policy: str, log_data, policy_type: str = "IDENTITY_POLICY"
) -> List[Dict[str, Any]]:
try:
enhanced_findings = []
client = await sync_to_async(boto3.client)(
"accessanalyzer",
region_name=config.region,
**config.get("boto3.client_kwargs", {}),
)
access_analyzer_response = await sync_to_async(client.validate_policy)(
policyDocument=policy,
policyType=policy_type, # ConsoleMe only supports identity policy analysis currently
)
for finding in access_analyzer_response.get("findings", []):
for location in finding.get("locations", []):
enhanced_findings.append(
{
"issue": finding.get("issueCode"),
"detail": "",
"location": {
"line": location.get("span", {})
.get("start", {})
.get("line"),
"column": location.get("span", {})
.get("start", {})
.get("column"),
"filepath": None,
},
"severity": finding.get("findingType"),
"title": finding.get("issueCode"),
"description": finding.get("findingDetails"),
}
)
return enhanced_findings
except (ParamValidationError, ClientError) as e:
log.error(
{
**log_data,
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Error retrieving Access Analyzer data",
"policy": policy,
"error": str(e),
}
)
sentry_sdk.capture_exception()
return []
async def parliament_validate_iam_policy(policy: str) -> List[Dict[str, Any]]:
analyzed_policy = await sync_to_async(analyze_policy_string)(policy)
findings = analyzed_policy.findings
enhanced_findings = []
for finding in findings:
enhanced_finding = await sync_to_async(enhance_finding)(finding)
enhanced_findings.append(
{
"issue": enhanced_finding.issue,
"detail": json.dumps(enhanced_finding.detail),
"location": enhanced_finding.location,
"severity": enhanced_finding.severity,
"title": enhanced_finding.title,
"description": enhanced_finding.description,
}
)
return enhanced_findings
async def validate_iam_policy(policy: str, log_data: Dict):
parliament_findings: List = await parliament_validate_iam_policy(policy)
access_analyzer_findings: List = await access_analyzer_validate_policy(
policy, log_data, policy_type="IDENTITY_POLICY"
)
return parliament_findings + access_analyzer_findings | null |
162,179 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
async def get_all_scps(force_sync=False) -> Dict[str, List[ServiceControlPolicyModel]]:
"""Retrieve a dictionary containing all Service Control Policies across organizations
Args:
force_sync: force a cache update
"""
redis_key = config.get(
"cache_scps_across_organizations.redis.key.all_scps_key", "ALL_AWS_SCPS"
)
scps = await retrieve_json_data_from_redis_or_s3(
redis_key,
s3_bucket=config.get("cache_scps_across_organizations.s3.bucket"),
s3_key=config.get(
"cache_scps_across_organizations.s3.file", "scps/cache_scps_v1.json.gz"
),
default={},
max_age=86400,
)
if force_sync or not scps:
scps = await cache_all_scps()
scp_models = {}
for account, org_scps in scps.items():
scp_models[account] = [ServiceControlPolicyModel(**scp) for scp in org_scps]
return scp_models
async def get_organizational_units_for_account(identifier: str) -> Set[str]:
"""Return a set of Organizational Unit IDs for a given account or OU ID
Args:
identifier: AWS account or OU ID
"""
all_orgs = await get_org_structure()
organizational_units = set()
for org_id, org_structure in all_orgs.items():
found, organizational_units = await _is_member_of_ou(identifier, org_structure)
if found:
break
if not organizational_units:
log.warning("could not find account in organization")
return organizational_units
async def _scp_targets_account_or_ou(
scp: ServiceControlPolicyModel, identifier: str, organizational_units: Set[str]
) -> bool:
"""Return True if the provided SCP targets the account or OU identifier provided
Args:
scp: Service Control Policy whose targets we check
identifier: AWS account or OU ID
organizational_units: set of IDs for OUs of which the identifier is a member
"""
for target in scp.targets:
if target.target_id == identifier or target.target_id in organizational_units:
return True
return False
class ServiceControlPolicyArrayModel(BaseModel):
__root__: List[ServiceControlPolicyModel]
The provided code snippet includes necessary dependencies for implementing the `get_scps_for_account_or_ou` function. Write a Python function `async def get_scps_for_account_or_ou(identifier: str) -> ServiceControlPolicyArrayModel` to solve the following problem:
Retrieve a list of Service Control Policies for the account or OU specified by the identifier Args: identifier: AWS account or OU ID
Here is the function:
async def get_scps_for_account_or_ou(identifier: str) -> ServiceControlPolicyArrayModel:
"""Retrieve a list of Service Control Policies for the account or OU specified by the identifier
Args:
identifier: AWS account or OU ID
"""
all_scps = await get_all_scps()
account_ous = await get_organizational_units_for_account(identifier)
scps_for_account = []
for org_account_id, scps in all_scps.items():
# Iterate through each org's SCPs and see if the provided account_id is in the targets
for scp in scps:
if await _scp_targets_account_or_ou(scp, identifier, account_ous):
scps_for_account.append(scp)
scps = ServiceControlPolicyArrayModel(__root__=scps_for_account)
return scps | Retrieve a list of Service Control Policies for the account or OU specified by the identifier Args: identifier: AWS account or OU ID |
162,180 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
red = RedisHandler().redis_sync()
def query(
query: str, use_aggregator: bool = True, account_id: Optional[str] = None
) -> List:
resources = []
if use_aggregator:
config_client = boto3.client(
"config", region_name=config.region, **config.get("boto3.client_kwargs", {})
)
configuration_aggregator_name: str = config.get(
"aws_config.configuration_aggregator.name"
).format(region=config.region)
if not configuration_aggregator_name:
raise MissingConfigurationValue("Invalid configuration for aws_config")
response = config_client.select_aggregate_resource_config(
Expression=query,
ConfigurationAggregatorName=configuration_aggregator_name,
Limit=100,
)
for r in response.get("Results", []):
resources.append(json.loads(r))
while response.get("NextToken"):
response = config_client.select_aggregate_resource_config(
Expression=query,
ConfigurationAggregatorName=configuration_aggregator_name,
Limit=100,
NextToken=response["NextToken"],
)
for r in response.get("Results", []):
resources.append(json.loads(r))
return resources
else: # Don't use Config aggregator and instead query all the regions on an account
session = boto3.Session()
available_regions = config.get("aws_config.available_regions", [])
if not available_regions:
available_regions = session.get_available_regions("config")
excluded_regions = config.get(
"api_protect.exclude_regions",
["af-south-1", "ap-east-1", "ap-northeast-3", "eu-south-1", "me-south-1"],
)
regions = [x for x in available_regions if x not in excluded_regions]
for region in regions:
config_client = boto3_cached_conn(
"config",
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=region,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=config.get(
"aws.sts_endpoint_url", "https://sts.{region}.amazonaws.com"
).format(region=config.region),
),
client_kwargs=config.get("boto3.client_kwargs", {}),
)
try:
response = config_client.select_resource_config(
Expression=query, Limit=100
)
for r in response.get("Results", []):
resources.append(json.loads(r))
# Query Config for a specific account in all regions we care about
while response.get("NextToken"):
response = config_client.select_resource_config(
Expression=query, Limit=100, NextToken=response["NextToken"]
)
for r in response.get("Results", []):
resources.append(json.loads(r))
except ClientError as e:
log.error(
{
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Failed to query AWS Config",
"query": query,
"use_aggregator": use_aggregator,
"account_id": account_id,
"region": region,
"error": str(e),
},
exc_info=True,
)
sentry_sdk.capture_exception()
return resources
async def redis_hsetex(name: str, key: str, value: Any, expiration_seconds: int):
"""
Lazy way to set Redis hash keys with an expiration. Warning: Entries set here only get deleted when redis_hgetex
is called on an expired key.
:param name: Redis key
:param key: Hash key
:param value: Hash value
:param expiration_seconds: Number of seconds to consider entry expired
:return:
"""
expiration = int(time.time()) + expiration_seconds
red = await RedisHandler().redis()
v = await sync_to_async(red.hset)(
name, key, json.dumps({"value": value, "ttl": expiration})
)
return v
async def redis_hgetex(name: str, key: str, default=None):
"""
Lazy way to retrieve an entry from a Redis Hash, and delete it if it's due to expire.
:param name:
:param key:
:param default:
:return:
"""
red = await RedisHandler().redis()
if not red.exists(name):
return default
result_j = await sync_to_async(red.hget)(name, key)
if not result_j:
return default
result = json.loads(result_j)
if int(time.time()) > result["ttl"]:
red.hdel(name, key)
return default
return result["value"]
The provided code snippet includes necessary dependencies for implementing the `resource_arn_known_in_aws_config` function. Write a Python function `async def resource_arn_known_in_aws_config( resource_arn: str, run_query: bool = True, run_query_with_aggregator: bool = True, expiration_seconds: int = config.get( "aws.resource_arn_known_in_aws_config.expiration_seconds", 3600 ), ) -> bool` to solve the following problem:
Determines if the resource ARN is known in AWS Config. AWS config does not store all resource types, nor will it account for cross-organizational resources, so the result of this function shouldn't be used to determine if a resource "exists" or not. A more robust approach is determining the resource type and querying AWS API directly to see if it exists, but this requires a lot of code. Note: This data may be stale by ~ 1 hour and 15 minutes (local results caching + typical AWS config delay) :param expiration_seconds: Number of seconds to consider stored result expired :param resource_arn: ARN of the resource we want to look up :param run_query: Should we run an AWS config query if we're not able to find the resource in our AWS Config cache? :param run_query_with_aggregator: Should we run the AWS Config query on our AWS Config aggregator? :return:
Here is the function:
async def resource_arn_known_in_aws_config(
resource_arn: str,
run_query: bool = True,
run_query_with_aggregator: bool = True,
expiration_seconds: int = config.get(
"aws.resource_arn_known_in_aws_config.expiration_seconds", 3600
),
) -> bool:
"""
Determines if the resource ARN is known in AWS Config. AWS config does not store all resource
types, nor will it account for cross-organizational resources, so the result of this function shouldn't be used
to determine if a resource "exists" or not.
A more robust approach is determining the resource type and querying AWS API directly to see if it exists, but this
requires a lot of code.
Note: This data may be stale by ~ 1 hour and 15 minutes (local results caching + typical AWS config delay)
:param expiration_seconds: Number of seconds to consider stored result expired
:param resource_arn: ARN of the resource we want to look up
:param run_query: Should we run an AWS config query if we're not able to find the resource in our AWS Config cache?
:param run_query_with_aggregator: Should we run the AWS Config query on our AWS Config aggregator?
:return:
"""
known_arn = False
if not resource_arn.startswith("arn:aws:"):
return known_arn
resources_from_aws_config_redis_key: str = config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
)
if red.exists(resources_from_aws_config_redis_key) and red.hget(
resources_from_aws_config_redis_key, resource_arn
):
return True
resource_arn_exists_temp_matches_redis_key: str = config.get(
"resource_arn_known_in_aws_config.redis.temp_matches_key",
"TEMP_QUERIED_RESOURCE_ARN_CACHE",
)
# To prevent repetitive queries against AWS config, first see if we've already ran a query recently
result = await redis_hgetex(
resource_arn_exists_temp_matches_redis_key, resource_arn
)
if result:
return result["known"]
if not run_query:
return False
r = await sync_to_async(query)(
f"select arn where arn = '{resource_arn}'",
use_aggregator=run_query_with_aggregator,
)
if r:
known_arn = True
# To prevent future repetitive queries on AWS Config, set our result in Redis with an expiration
await redis_hsetex(
resource_arn_exists_temp_matches_redis_key,
resource_arn,
{"known": known_arn},
expiration_seconds=expiration_seconds,
)
return known_arn | Determines if the resource ARN is known in AWS Config. AWS config does not store all resource types, nor will it account for cross-organizational resources, so the result of this function shouldn't be used to determine if a resource "exists" or not. A more robust approach is determining the resource type and querying AWS API directly to see if it exists, but this requires a lot of code. Note: This data may be stale by ~ 1 hour and 15 minutes (local results caching + typical AWS config delay) :param expiration_seconds: Number of seconds to consider stored result expired :param resource_arn: ARN of the resource we want to look up :param run_query: Should we run an AWS config query if we're not able to find the resource in our AWS Config cache? :param run_query_with_aggregator: Should we run the AWS Config query on our AWS Config aggregator? :return: |
162,181 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
async def redis_hsetex(name: str, key: str, value: Any, expiration_seconds: int):
"""
Lazy way to set Redis hash keys with an expiration. Warning: Entries set here only get deleted when redis_hgetex
is called on an expired key.
:param name: Redis key
:param key: Hash key
:param value: Hash value
:param expiration_seconds: Number of seconds to consider entry expired
:return:
"""
expiration = int(time.time()) + expiration_seconds
red = await RedisHandler().redis()
v = await sync_to_async(red.hset)(
name, key, json.dumps({"value": value, "ttl": expiration})
)
return v
async def redis_hgetex(name: str, key: str, default=None):
"""
Lazy way to retrieve an entry from a Redis Hash, and delete it if it's due to expire.
:param name:
:param key:
:param default:
:return:
"""
red = await RedisHandler().redis()
if not red.exists(name):
return default
result_j = await sync_to_async(red.hget)(name, key)
if not result_j:
return default
result = json.loads(result_j)
if int(time.time()) > result["ttl"]:
red.hdel(name, key)
return default
return result["value"]
The provided code snippet includes necessary dependencies for implementing the `simulate_iam_principal_action` function. Write a Python function `async def simulate_iam_principal_action( principal_arn, action, resource_arn, source_ip, expiration_seconds: int = config.get( "aws.simulate_iam_principal_action.expiration_seconds", 3600 ), )` to solve the following problem:
Simulates an IAM principal action affecting a resource :return:
Here is the function:
async def simulate_iam_principal_action(
principal_arn,
action,
resource_arn,
source_ip,
expiration_seconds: int = config.get(
"aws.simulate_iam_principal_action.expiration_seconds", 3600
),
):
"""
Simulates an IAM principal action affecting a resource
:return:
"""
# simulating IAM principal policies is expensive.
# Temporarily cache and return results by principal_arn, action, and resource_arn. We don't consider source_ip
# when caching because it could vary greatly for application roles running on multiple instances/containers.
resource_arn_exists_temp_matches_redis_key: str = config.get(
"resource_arn_known_in_aws_config.redis.temp_matches_key",
"TEMP_POLICY_SIMULATION_CACHE",
)
cache_key = f"{principal_arn}-{action}-{resource_arn}"
result = await redis_hgetex(resource_arn_exists_temp_matches_redis_key, cache_key)
if result:
return result
ip_regex = r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
context_entries = []
if source_ip and re.match(ip_regex, source_ip):
context_entries.append(
{
"ContextKeyName": "aws:SourceIp",
"ContextKeyValues": [source_ip],
"ContextKeyType": "ip",
}
)
account_id = principal_arn.split(":")[4]
client = await sync_to_async(boto3_cached_conn)(
"iam",
account_number=account_id,
assume_role=config.get("policies.role_name"),
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=config.get(
"aws.sts_endpoint_url", "https://sts.{region}.amazonaws.com"
).format(region=config.region),
),
retry_max_attempts=2,
)
try:
response = await sync_to_async(client.simulate_principal_policy)(
PolicySourceArn=principal_arn,
ActionNames=[
action,
],
ResourceArns=[
resource_arn,
],
# TODO: Attach resource policy when discoverable
# ResourcePolicy='string',
# TODO: Attach Account ID of resource
# ResourceOwner='string',
ContextEntries=context_entries,
MaxItems=100,
)
await redis_hsetex(
resource_arn_exists_temp_matches_redis_key,
resource_arn,
response["EvaluationResults"],
expiration_seconds=expiration_seconds,
)
except Exception:
sentry_sdk.capture_exception()
return None
return response["EvaluationResults"] | Simulates an IAM principal action affecting a resource :return: |
162,182 | import fnmatch
import json
import re
import sys
import time
from copy import deepcopy
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
import boto3
import pytz
import sentry_sdk
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError, ParamValidationError
from cloudaux import CloudAux
from cloudaux.aws.decorators import rate_limited
from cloudaux.aws.iam import get_managed_policy_document, get_policy
from cloudaux.aws.s3 import (
get_bucket_location,
get_bucket_policy,
get_bucket_resource,
get_bucket_tagging,
)
from cloudaux.aws.sns import get_topic_attributes
from cloudaux.aws.sqs import get_queue_attributes, get_queue_url, list_queue_tags
from cloudaux.aws.sts import boto3_cached_conn
from dateutil.parser import parse
from deepdiff import DeepDiff
from parliament import analyze_policy_string, enhance_finding
from policy_sentry.util.arns import get_account_from_arn, parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
BackgroundCheckNotPassedException,
InvalidInvocationArgument,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers.aws_organizations import (
retrieve_org_structure,
retrieve_scps_for_organization,
)
from consoleme.lib.aws_config.aws_config import query
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.generic import sort_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler, redis_hget, redis_hgetex, redis_hsetex
from consoleme.models import (
CloneRoleRequestModel,
RoleCreationRequestModel,
ServiceControlPolicyArrayModel,
ServiceControlPolicyModel,
)
async def get_iam_principal_owner(arn: str, aws: Any) -> Optional[str]:
principal_details = {}
principal_type = arn.split(":")[-1].split("/")[0]
account_id = arn.split(":")[4]
# trying to find principal for subsequent queries
if principal_type == "role":
principal_details = await aws().fetch_iam_role(account_id, arn)
elif principal_type == "user":
principal_details = await aws().fetch_iam_user(account_id, arn)
return principal_details.get("owner") | null |
162,183 | import sys
import uuid
import tornado.escape
import ujson as json
from tornado.httpclient import AsyncHTTPClient, HTTPClientError, HTTPRequest
from tornado.httputil import HTTPHeaders
from consoleme.config import config
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_policy_request_uri_v2
from consoleme.models import ExtendedRequestModel
def slack_preflight_check(func):
async def shortcircuit():
return None
def wrapper(*args, **kwargs):
if not config.get("slack.notifications_enabled", False):
return shortcircuit()
return func(*args, **kwargs)
return wrapper | null |
162,184 | import sys
import uuid
import tornado.escape
import ujson as json
from tornado.httpclient import AsyncHTTPClient, HTTPClientError, HTTPRequest
from tornado.httputil import HTTPHeaders
from consoleme.config import config
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_policy_request_uri_v2
from consoleme.models import ExtendedRequestModel
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
async def send_slack_notification(payload, payload_id):
"""
Sends a notification using specified webhook URL about a new request created
"""
slack_webhook_url = config.get("slack.webhook_url")
if not slack_webhook_url:
log.error(
f"Missing webhook URL for slack notification. Not sending payload: {payload_id}"
)
return
http_headers = HTTPHeaders({"Content-Type": "application/json"})
http_req = HTTPRequest(
url=slack_webhook_url,
method="POST",
headers=http_headers,
body=json.dumps(payload),
)
http_client = AsyncHTTPClient(force_instance=True)
try:
await http_client.fetch(request=http_req)
log.debug(f"Slack notifications sent for payload: {payload_id}")
except (ConnectionError, HTTPClientError) as e:
log.error(
f"Slack notifications could not be sent for payload: {payload_id} due to {str(e)}"
)
async def _build_policy_payload(
extended_request: ExtendedRequestModel,
requester: str,
arn: str,
admin_approved: bool,
approval_probe_approved: bool,
):
request_uri = await get_policy_request_uri_v2(extended_request)
pre_text = "A new request has been created"
if admin_approved:
pre_text += " and auto-approved by admin"
elif approval_probe_approved:
pre_text += " and auto-approved by auto-approval probe"
payload = {
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*<{request_uri}|ConsoleMe Policy Change Request>*",
},
},
{
"type": "section",
"text": {"type": "mrkdwn", "text": f"*User* \n {requester}"},
},
{
"type": "section",
"text": {"type": "mrkdwn", "text": f"*Resource* \n {arn}"},
},
{
"type": "section",
"fields": [
{"text": "*Justification*", "type": "mrkdwn"},
{"type": "plain_text", "text": "\n"},
{
"type": "plain_text",
"text": f"{tornado.escape.xhtml_escape(extended_request.justification)}",
},
],
},
{"type": "divider"},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"{pre_text}. Click *<{request_uri}|here>* to view it.",
},
},
]
}
return payload
class ExtendedRequestModel(RequestModel):
changes: ChangeModelArray
requester_info: UserModel
reviewer: Optional[str] = None
comments: Optional[List[CommentModel]] = None
The provided code snippet includes necessary dependencies for implementing the `send_slack_notification_new_policy_request` function. Write a Python function `async def send_slack_notification_new_policy_request( extended_request: ExtendedRequestModel, admin_approved, approval_probe_approved )` to solve the following problem:
Sends a notification using specified webhook URL about a new request created
Here is the function:
async def send_slack_notification_new_policy_request(
extended_request: ExtendedRequestModel, admin_approved, approval_probe_approved
):
"""
Sends a notification using specified webhook URL about a new request created
"""
if admin_approved and config.get("slack.ignore_auto_admin_policies", False):
# Don't send slack notifications for policies that were auto approved due to admin status
return None
function = f"{__name__}.{sys._getframe().f_code.co_name}"
requester = extended_request.requester_email
arn = extended_request.principal.principal_arn
stats.count(function, tags={"user": requester, "arn": arn})
payload_id = uuid.uuid4()
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": requester,
"arn": arn,
"message": "Incoming request for slack notification",
"request": extended_request.dict(),
"admin_approved": admin_approved,
"approval_probe_approved": approval_probe_approved,
"payload_id": payload_id,
}
log.debug(log_data)
payload = await _build_policy_payload(
extended_request, requester, arn, admin_approved, approval_probe_approved
)
return await send_slack_notification(payload, payload_id) | Sends a notification using specified webhook URL about a new request created |
162,185 | import asyncio
from typing import Dict, List, Optional, Union
from password_strength import PasswordPolicy
from consoleme.config import config
from consoleme.lib.redis import RedisHandler
red = RedisHandler().redis_sync()
import asyncio
async def wait_after_authentication_failure(user) -> str:
redix_key_expiration = config.get(
"wait_after_authentication_failure.expiration", 60
)
redis_key = f"wait_after_authentication_failure_{user}"
num_password_failures = red.get(redis_key)
if not num_password_failures:
num_password_failures = 0
num_password_failures = int(num_password_failures) # Redis values are strings
red.setex(redis_key, redix_key_expiration, num_password_failures + 1)
await asyncio.sleep(num_password_failures**2)
next_delay = (num_password_failures + 1) ** 2
return (
f"Your next authentication failure will result in a {next_delay} second wait. "
f"This wait time will expire after {redix_key_expiration} seconds of no authentication failures."
) | null |
162,186 | import asyncio
from typing import Dict, List, Optional, Union
from password_strength import PasswordPolicy
from consoleme.config import config
from consoleme.lib.redis import RedisHandler
async def check_password_strength(
password,
) -> Optional[Union[Dict[str, str], Dict[str, List[str]]]]:
password_policy_args = {
"strength": config.get("auth.password_policy.strength", 0.5),
"entropy_bits": config.get("auth.password_policy.entry_bits"),
"length": config.get("auth.password_policy.length"),
"uppercase": config.get("auth.password_policy.uppercase"),
"numbers": config.get("auth.password_policy.numbers"),
"special": config.get("auth.password_policy.special"),
"nonletters": config.get("auth.password_policy.nonletters"),
}
# We remove entries with null values since password_strength doesn't like them.
password_policy_args = {k: v for k, v in password_policy_args.items() if v}
policy = PasswordPolicy.from_names(**password_policy_args)
tested_pass = policy.password(password)
errors = tested_pass.test()
# Convert errors to string so they can be json encoded later
errors: List[str] = [str(e) for e in errors]
if errors:
return {"message": "Password doesn't have enough entropy.", "errors": errors} | null |
162,187 | import sys
import threading
import time
from typing import Any, Optional
import boto3
import redis
import ujson as json
from asgiref.sync import sync_to_async
from redis.client import Redis
from consoleme.config import config
from consoleme.lib.plugins import get_plugin_by_name
class RedisHandler:
def __init__(
self,
host: str = config.get(
"redis.host.{}".format(region), config.get("redis.host.global", "localhost")
),
port: int = config.get("redis.port", 6379),
db: int = config.get("redis.db", 0),
) -> None:
self.red = None
self.host = host
self.port = port
self.db = db
self.enabled = True
if self.host is None or self.port is None or self.db is None:
self.enabled = False
async def redis(self, db: int = 0) -> Redis:
self.red = await sync_to_async(ConsoleMeRedis)(
host=self.host,
port=self.port,
db=self.db,
charset="utf-8",
decode_responses=True,
)
return self.red
def redis_sync(self, db: int = 0) -> Redis:
self.red = ConsoleMeRedis(
host=self.host,
port=self.port,
db=self.db,
charset="utf-8",
decode_responses=True,
)
return self.red
import redis
from redis.client import Redis
def redis_get_sync(key: str, default: None = None) -> Optional[str]:
red = RedisHandler().redis_sync()
try:
v = red.get(key)
except redis.exceptions.ConnectionError:
v = None
if not v:
return default
return v | null |
162,188 | import time
from hashlib import sha256
from typing import Dict, List, Optional
import ujson as json
from policy_sentry.querying.actions import get_actions_with_access_level
from consoleme.config import config
from consoleme.exceptions.exceptions import (
InvalidRequestParameter,
MissingConfigurationValue,
)
from consoleme.lib.account_indexers import get_account_id_to_name_mapping
from consoleme.lib.aws import minimize_iam_policy_statements
from consoleme.lib.defaults import SELF_SERVICE_IAM_DEFAULTS
from consoleme.lib.generic import generate_random_string, iterate_and_format_dict
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.models import (
ChangeGeneratorModel,
ChangeGeneratorModelArray,
ChangeModelArray,
CrudChangeGeneratorModel,
InlinePolicyChangeModel,
PolicyModel,
ResourceModel,
Status,
)
async def _generate_inline_policy_change_model(
principal: str,
resources: List[ResourceModel],
statements: List[Dict],
user: str,
is_new: bool = True,
policy_name: Optional[str] = None,
) -> InlinePolicyChangeModel:
"""
Generates an inline policy change model.
:param principal: principal associated with the InlinePolicyChangeModel
:param resources: Resource ARNs (or wildcards) of the resources associated with the InlinePolicyChangeModel
:param statements: A list of AWS IAM policy statement dictionaries
:param user: User e-mail address
:param is_new: Boolean representing if we're creating a new policy or updating an existing policy
:param policy_name: Optional policy name. If not provided, one will be generated
:return: InlinePolicyChangeModel
"""
policy_name = await generate_policy_name(policy_name, user)
policy_document = await _generate_inline_policy_model_from_statements(statements)
change_details = {
"change_type": "inline_policy",
"principal": principal,
"resources": resources,
"policy_name": policy_name,
"new": is_new,
"policy": policy_document,
"status": Status.not_applied,
}
return InlinePolicyChangeModel(**change_details)
async def _generate_inline_iam_policy_statement_from_change_generator(
change: ChangeGeneratorModel,
) -> Dict:
"""
Generates an inline policy statement from a ChangeGeneratorModel.
:param change: ChangeGeneratorModel
:return: policy_statement: A dictionary representing an inline policy statement.
"""
generator_type = change.generator_type
if not isinstance(generator_type, str):
generator_type = change.generator_type.value
if generator_type == "s3":
policy = await _generate_s3_inline_policy_statement_from_mapping(change)
elif generator_type == "crud_lookup":
policy = await _generate_inline_policy_statement_from_policy_sentry(change)
else:
policy = await _generate_inline_policy_statement_from_mapping(change)
# Honeybee supports restricting policies to certain accounts.
if change.include_accounts:
policy["IncludeAccounts"] = change.include_accounts
if change.exclude_accounts:
policy["ExcludeAccounts"] = change.exclude_accounts
return policy
async def _attach_sids_to_policy_statements(
inline_iam_policy_statements: List[Dict], user: str
) -> List[Dict]:
"""
Generates and attaches Sids to each policy statement if the statement does not already have a Sid.
:param inline_iam_policy_statements: A list of IAM policy statement dictionaries
:param user: The acting user's email address
:return: A list of IAM policy statement dictionaries with Sid entries for each
"""
for statement in inline_iam_policy_statements:
if not statement.get("Sid"):
statement["Sid"] = await _generate_policy_sid(user)
return inline_iam_policy_statements
async def _generate_resource_model_from_arn(arn: str) -> Optional[ResourceModel]:
"""
Generates a ResourceModel from a Resource ARN
:param arn: AWS resource identifier
:return: ResourceModel
"""
try:
account_id = arn.split(":")[4]
resource_type = arn.split(":")[2]
region = arn.split(":")[3]
name = arn.split(":")[5].split("/")[-1]
if not region:
region = "global"
global ALL_ACCOUNTS
if not ALL_ACCOUNTS:
ALL_ACCOUNTS = await get_account_id_to_name_mapping()
account_name = ALL_ACCOUNTS.get(account_id, "")
return ResourceModel(
arn=arn,
name=name,
account_id=account_id,
account_name=account_name,
resource_type=resource_type,
region=region,
)
except IndexError:
# Resource is not parsable or a wildcard.
return
class InvalidRequestParameter(BaseException):
"""Invalid Request Parameter passed to function"""
def __init__(self, msg=""):
stats.count("InvalidRequestParameter")
super().__init__(msg)
async def minimize_iam_policy_statements(
inline_iam_policy_statements: List[Dict], disregard_sid=True
) -> List[Dict]:
"""
Minimizes a list of inline IAM policy statements.
1. Policies that are identical except for the resources will have the resources merged into a single statement
with the same actions, effects, conditions, etc.
2. Policies that have an identical resource, but different actions, will be combined if the rest of the policy
is identical.
:param inline_iam_policy_statements: A list of IAM policy statement dictionaries
:return: A potentially more compact list of IAM policy statement dictionaries
"""
exclude_ids = []
minimized_statements = []
inline_iam_policy_statements = await normalize_policies(
inline_iam_policy_statements
)
for i in range(len(inline_iam_policy_statements)):
inline_iam_policy_statement = inline_iam_policy_statements[i]
if disregard_sid:
inline_iam_policy_statement.pop("Sid", None)
if i in exclude_ids:
# We've already combined this policy with another. Ignore it.
continue
for j in range(i + 1, len(inline_iam_policy_statements)):
if j in exclude_ids:
# We've already combined this policy with another. Ignore it.
continue
inline_iam_policy_statement_to_compare = inline_iam_policy_statements[j]
if disregard_sid:
inline_iam_policy_statement_to_compare.pop("Sid", None)
# Check to see if policy statements are identical except for a given element. Merge the policies
# if possible.
for element in [
"Resource",
"Action",
"NotAction",
"NotResource",
"NotPrincipal",
]:
if not (
inline_iam_policy_statement.get(element)
or inline_iam_policy_statement_to_compare.get(element)
):
# This function won't handle `Condition`.
continue
diff = DeepDiff(
inline_iam_policy_statement,
inline_iam_policy_statement_to_compare,
ignore_order=True,
exclude_paths=[f"root['{element}']"],
)
if not diff:
exclude_ids.append(j)
# Policy can be minimized
inline_iam_policy_statement[element] = sorted(
list(
set(
inline_iam_policy_statement[element]
+ inline_iam_policy_statement_to_compare[element]
)
)
)
break
for i in range(len(inline_iam_policy_statements)):
if i not in exclude_ids:
inline_iam_policy_statements[i] = sort_dict(inline_iam_policy_statements[i])
minimized_statements.append(inline_iam_policy_statements[i])
# TODO(cccastrapel): Intelligently combine actions and/or resources if they include wildcards
minimized_statements = await normalize_policies(minimized_statements)
return minimized_statements
class ChangeModelArray(BaseModel):
changes: List[
Union[
InlinePolicyChangeModel,
ManagedPolicyChangeModel,
PermissionsBoundaryChangeModel,
ResourcePolicyChangeModel,
AssumeRolePolicyChangeModel,
ResourceTagChangeModel,
GenericFileChangeModel,
ManagedPolicyResourceChangeModel,
]
]
class ChangeGeneratorModelArray(BaseModel):
changes: List[
Union[
S3ChangeGeneratorModel,
SQSChangeGeneratorModel,
SNSChangeGeneratorModel,
SESChangeGeneratorModel,
CrudChangeGeneratorModel,
GenericChangeGeneratorModel,
CustomIamChangeGeneratorModel,
]
]
The provided code snippet includes necessary dependencies for implementing the `generate_change_model_array` function. Write a Python function `async def generate_change_model_array( changes: ChangeGeneratorModelArray, ) -> ChangeModelArray` to solve the following problem:
Compiles a ChangeModelArray which includes all of the AWS policies required to satisfy the ChangeGeneratorModelArray request. :param changes: ChangeGeneratorModelArray :return: ChangeModelArray
Here is the function:
async def generate_change_model_array(
changes: ChangeGeneratorModelArray,
) -> ChangeModelArray:
"""
Compiles a ChangeModelArray which includes all of the AWS policies required to satisfy the
ChangeGeneratorModelArray request.
:param changes: ChangeGeneratorModelArray
:return: ChangeModelArray
"""
change_models = []
inline_iam_policy_statements: List[Dict] = []
primary_principal = None
primary_user = None
resources = []
for change in changes.changes:
# Enforce a maximum of one user per ChangeGeneratorModelArray (aka Policy Request)
if not primary_user:
primary_user = change.user
if primary_user != change.user:
raise InvalidRequestParameter(
"All changes associated with request must be associated with the same user."
)
# Enforce a maximum of one principal ARN per ChangeGeneratorModelArray (aka Policy Request)
if not primary_principal:
primary_principal = change.principal
if primary_principal != change.principal:
raise InvalidRequestParameter(
"We only support making changes to a single principal ARN per request."
)
if change.generator_type == "custom_iam":
inline_policies = change.policy["Statement"]
if isinstance(inline_policies, dict):
inline_policies = [inline_policies]
else:
# Generate inline policy for the change, if applicable
inline_policies = [
await _generate_inline_iam_policy_statement_from_change_generator(
change
)
]
for inline_policy in inline_policies:
# Inline policies must have Action|NotAction, Resource|NotResource, and an Effect
if inline_policy and (
(not inline_policy.get("Action") and not inline_policy.get("NotAction"))
or (
not inline_policy.get("Resource")
and not inline_policy.get("NotResource")
)
or not inline_policy.get("Effect")
):
raise InvalidRequestParameter(
f"Generated inline policy is invalid. Double-check request parameter: {inline_policy}"
)
if inline_policy and change.resource_arn:
# TODO(ccastrapel): Add more details to the ResourceModel when we determine we can use it for something.
if isinstance(change.resource_arn, str):
change.resource_arn = [change.resource_arn]
for arn in change.resource_arn:
resource_model = await _generate_resource_model_from_arn(arn)
# If the resource arn is actually a wildcard, we might not have a valid resource model
if resource_model:
resources.append(resource_model)
if inline_policy:
inline_iam_policy_statements.append(inline_policy)
# TODO(ccastrapel): V2: Generate resource policies for the change, if applicable
# Minimize the policy statements to remove redundancy
inline_iam_policy_statements = await minimize_iam_policy_statements(
inline_iam_policy_statements
)
# Attach Sids to each of the statements that will help with identifying who made the request and when.
inline_iam_policy_statements = await _attach_sids_to_policy_statements(
inline_iam_policy_statements, primary_user
)
# TODO(ccastrapel): Check if the inline policy statements would be auto-approved and supply that context
inline_iam_policy_change_model = await _generate_inline_policy_change_model(
primary_principal, resources, inline_iam_policy_statements, primary_user
)
change_models.append(inline_iam_policy_change_model)
return ChangeModelArray.parse_obj({"changes": change_models}) | Compiles a ChangeModelArray which includes all of the AWS policies required to satisfy the ChangeGeneratorModelArray request. :param changes: ChangeGeneratorModelArray :return: ChangeModelArray |
162,189 | import asyncio
import logging
import os
import tornado.autoreload
import tornado.httpserver
import tornado.ioloop
import uvloop
from tornado.platform.asyncio import AsyncIOMainLoop
from consoleme.config import config
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.routes import make_app
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
log = config.get_logger()
if config.get("tornado.uvloop", True):
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
app = main()
def init():
port = config.get("tornado.port")
stats.count("start")
server = tornado.httpserver.HTTPServer(app)
if port:
server.bind(port, address=config.get("tornado.address"))
server.start() # forks one process per cpu
if config.get("tornado.debug", False):
for directory, _, files in os.walk("consoleme/templates"):
[
tornado.autoreload.watch(directory + "/" + f)
for f in files
if not f.startswith(".")
]
log.debug({"message": "Server started"})
asyncio.get_event_loop().run_forever() | null |
162,190 | from typing import List
import sentry_sdk
from consoleme.config import config
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.models import AppDetailsArray, AppDetailsModel, AwsPrincipalModel
class Policies:
"""
Policies internal plugin
"""
def __init__(
self,
) -> None:
self.dynamo = UserDynamoHandler()
async def get_errors_by_role(self, arn, n=5):
try:
return await self.dynamo.get_top_cloudtrail_errors_by_arn(arn, n)
except Exception:
sentry_sdk.capture_exception()
return {}
async def get_applications_associated_with_role(self, arn: str) -> AppDetailsArray:
"""
This function returns applications associated with a role from configuration. You may want to override this
function to pull this information from an authoratative source.
:param arn: Role ARN
:return: AppDetailsArray
"""
apps_formatted = []
application_details = config.get("application_details", {})
for app, details in application_details.items():
apps_formatted.append(
AppDetailsModel(
name=app,
owner=details.get("owner"),
owner_url=details.get("owner_url"),
app_url=details.get("app_url"),
)
)
return AppDetailsArray(app_details=apps_formatted)
async def get_roles_associated_with_app(
self, app_name: str
) -> List[AwsPrincipalModel]:
"""
This function returns roles associated with an app from configuration. You may want to override this
function to pull this information from an authoritative source.
:param app_name: Name of application
:return: List[AwsPrincipalModel]
"""
return []
The provided code snippet includes necessary dependencies for implementing the `init` function. Write a Python function `def init()` to solve the following problem:
Initialize Policies plugin.
Here is the function:
def init():
"""Initialize Policies plugin."""
return Policies() | Initialize Policies plugin. |
162,191 | from consoleme.config import config
from consoleme.lib.plugins import import_class_by_name
try:
Metric = import_class_by_name(desired_metric_plugin)
except ImportError:
raise
The provided code snippet includes necessary dependencies for implementing the `init` function. Write a Python function `def init()` to solve the following problem:
Initialize metrics plugin.
Here is the function:
def init():
"""Initialize metrics plugin."""
return Metric | Initialize metrics plugin. |
162,192 | import asyncio
import copy
import ssl
import sys
from datetime import datetime, timedelta
from typing import Any, Dict, Optional
import bleach
import boto3
import requests as requests_sync
import tenacity
import ujson as json
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError
from cloudaux.aws.iam import (
get_role_inline_policies,
get_role_managed_policies,
get_user_inline_policies,
get_user_managed_policies,
list_role_tags,
)
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import url_concat
from consoleme.config import config
from consoleme.exceptions.exceptions import UserRoleNotAssumableYet
from consoleme.lib.aws import raise_if_background_check_required_and_no_background_check
from consoleme.lib.dynamo import IAMRoleDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import send_communications_policy_change_request_v2
from consoleme.lib.redis import RedisHandler
class Aws:
"""The AWS class handles interactions with AWS."""
def __init__(self) -> None:
self.red = RedisHandler().redis_sync()
self.redis_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
self.dynamo = IAMRoleDynamoHandler()
stop_max_attempt_number=3,
wait_exponential_multiplier=1000,
wait_exponential_max=1000,
)
def _add_role_to_redis(self, role_entry: dict):
"""Add the role to redis with a retry.
:param role_entry:
:return:
"""
self.red.hset(self.redis_key, role_entry["arn"], json.dumps(role_entry))
stop_max_attempt_number=3,
wait_exponential_multiplier=1000,
wait_exponential_max=1000,
)
def _fetch_role_from_redis(self, role_arn: str):
"""Fetch the role from redis with a retry.
:param role_arn:
:return:
"""
return self.red.hget(self.redis_key, role_arn)
async def cloudaux_to_aws(self, principal):
"""Convert the cloudaux get_role/get_user into the get_account_authorization_details equivalent."""
# Pop out the fields that are not required:
# Arn and RoleName/UserName will be popped off later:
unrequired_fields = ["_version", "MaxSessionDuration"]
principal_type = principal["Arn"].split(":")[-1].split("/")[0]
for uf in unrequired_fields:
principal.pop(uf, None)
# Fix the Managed Policies:
principal["AttachedManagedPolicies"] = list(
map(
lambda x: {"PolicyName": x["name"], "PolicyArn": x["arn"]},
principal.get("ManagedPolicies", []),
)
)
principal.pop("ManagedPolicies", None)
# Fix the tags:
if isinstance(principal.get("Tags", {}), dict):
principal["Tags"] = list(
map(
lambda key: {"Key": key, "Value": principal["Tags"][key]},
principal.get("Tags", {}),
)
)
# Note: the instance profile list is verbose -- not transforming it (outside of renaming the field)!
principal["InstanceProfileList"] = principal.pop("InstanceProfiles", [])
# Inline Policies:
if principal_type == "role":
principal["RolePolicyList"] = list(
map(
lambda name: {
"PolicyName": name,
"PolicyDocument": principal["InlinePolicies"][name],
},
principal.get("InlinePolicies", {}),
)
)
else:
principal["UserPolicyList"] = copy.deepcopy(
principal.pop("InlinePolicies", [])
)
principal.pop("InlinePolicies", None)
return principal
def _get_iam_user_sync(account_id, user_name, conn) -> Optional[Dict[str, Any]]:
client = boto3_cached_conn(
"iam",
account_number=account_id,
assume_role=config.get("policies.role_name"),
read_only=True,
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
user = client.get_user(UserName=user_name)["User"]
user["ManagedPolicies"] = get_user_managed_policies(
{"UserName": user_name}, **conn
)
user["InlinePolicies"] = get_user_inline_policies(
{"UserName": user_name}, **conn
)
user["Tags"] = client.list_user_tags(UserName=user_name)
user["Groups"] = client.list_groups_for_user(UserName=user_name)
return user
async def _get_iam_user_async(
account_id, user_name, conn
) -> Optional[Dict[str, Any]]:
tasks = []
client = await sync_to_async(boto3_cached_conn)(
"iam",
account_number=account_id,
assume_role=config.get("policies.role_name"),
read_only=True,
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
user_details = asyncio.ensure_future(
sync_to_async(client.get_user)(UserName=user_name)
)
tasks.append(user_details)
all_tasks = [
get_user_managed_policies,
get_user_inline_policies,
]
for t in all_tasks:
tasks.append(
asyncio.ensure_future(sync_to_async(t)({"UserName": user_name}, **conn))
)
user_tag_details = asyncio.ensure_future(
sync_to_async(client.list_user_tags)(UserName=user_name)
)
tasks.append(user_tag_details)
user_group_details = asyncio.ensure_future(
sync_to_async(client.list_groups_for_user)(UserName=user_name)
)
tasks.append(user_group_details)
responses = asyncio.gather(*tasks)
async_task_result = await responses
user = async_task_result[0]["User"]
user["ManagedPolicies"] = async_task_result[1]
inline_policies = []
for name, policy in async_task_result[2].items():
inline_policies.append({"PolicyName": name, "PolicyDocument": policy})
user["InlinePolicies"] = inline_policies
user["Tags"] = async_task_result[3].get("Tags", [])
user["Groups"] = async_task_result[4].get("Groups", [])
return user
def get_iam_role_sync(account_id, role_name, conn) -> Optional[Dict[str, Any]]:
client = boto3_cached_conn(
"iam",
account_number=account_id,
assume_role=config.get("policies.role_name"),
read_only=True,
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
role = client.get_role(RoleName=role_name)["Role"]
role["ManagedPolicies"] = get_role_managed_policies(
{"RoleName": role_name}, **conn
)
role["InlinePolicies"] = get_role_inline_policies(
{"RoleName": role_name}, **conn
)
role["Tags"] = list_role_tags({"RoleName": role_name}, **conn)
return role
async def _get_iam_role_async(
account_id, role_name, conn
) -> Optional[Dict[str, Any]]:
tasks = []
client = await sync_to_async(boto3_cached_conn)(
"iam",
account_number=account_id,
assume_role=config.get("policies.role_name"),
read_only=True,
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
role_details = asyncio.ensure_future(
sync_to_async(client.get_role)(RoleName=role_name)
)
tasks.append(role_details)
all_tasks = [
get_role_managed_policies,
get_role_inline_policies,
list_role_tags,
]
for t in all_tasks:
tasks.append(
asyncio.ensure_future(sync_to_async(t)({"RoleName": role_name}, **conn))
)
responses = asyncio.gather(*tasks)
async_task_result = await responses
role = async_task_result[0]["Role"]
role["ManagedPolicies"] = async_task_result[1]
role["InlinePolicies"] = async_task_result[2]
role["Tags"] = async_task_result[3]
return role
async def fetch_iam_user(
self,
account_id: str,
user_arn: str,
run_sync=False,
) -> Optional[Dict[str, Any]]:
"""Fetch the IAM User from AWS in threadpool if run_sync=False, otherwise synchronously.
:param account_id:
:param user_arn:
:return:
"""
log_data: dict = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user_arn": user_arn,
"account_id": account_id,
}
try:
user_name = user_arn.split("/")[-1]
conn = {
"account_number": account_id,
"assume_role": config.get("policies.role_name"),
"region": config.region,
"client_kwargs": config.get("boto3.client_kwargs", {}),
}
if run_sync:
user = self._get_iam_user_sync(account_id, user_name, conn)
else:
user = await self._get_iam_user_async(account_id, user_name, conn)
except ClientError as ce:
if ce.response["Error"]["Code"] == "NoSuchEntity":
# The user does not exist:
log_data["message"] = "User does not exist in AWS."
log.error(log_data)
stats.count(
"aws.fetch_iam_user.missing_in_aws",
tags={"account_id": account_id, "user_arn": user_arn},
)
return None
else:
log_data["message"] = f"Some other error: {ce.response}"
log.error(log_data)
stats.count(
"aws.fetch_iam_user.aws_connection_problem",
tags={"account_id": account_id, "user_arn": user_arn},
)
raise
await self.cloudaux_to_aws(user)
return user
async def fetch_iam_role(
self,
account_id: str,
role_arn: str,
force_refresh: bool = False,
run_sync=False,
) -> Optional[Dict[str, Any]]:
"""Fetch the IAM Role template from Redis and/or Dynamo.
:param account_id:
:param role_arn:
:return:
"""
log_data: dict = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"role_arn": role_arn,
"account_id": account_id,
"force_refresh": force_refresh,
}
result: dict = {}
if not force_refresh:
# First check redis:
result: str = await sync_to_async(self._fetch_role_from_redis)(role_arn)
if result:
result: dict = json.loads(result)
# If this item is less than an hour old, then return it from Redis.
if result["ttl"] > int(
(datetime.utcnow() - timedelta(hours=1)).timestamp()
):
log_data["message"] = "Role not in Redis -- fetching from DDB."
log.debug(log_data)
stats.count(
"aws.fetch_iam_role.in_redis",
tags={"account_id": account_id, "role_arn": role_arn},
)
result["policy"] = json.loads(result["policy"])
return result
# If not in Redis or it's older than an hour, proceed to DynamoDB:
result = await sync_to_async(self.dynamo.fetch_iam_role)(
role_arn, account_id
)
# If it's NOT in dynamo, or if we're forcing a refresh, we need to reach out to AWS and fetch:
if force_refresh or not result.get("Item"):
if force_refresh:
log_data["message"] = "Force refresh is enabled. Going out to AWS."
stats.count(
"aws.fetch_iam_role.force_refresh",
tags={"account_id": account_id, "role_arn": role_arn},
)
else:
log_data["message"] = "Role is missing in DDB. Going out to AWS."
stats.count(
"aws.fetch_iam_role.missing_dynamo",
tags={"account_id": account_id, "role_arn": role_arn},
)
log.debug(log_data)
try:
role_name = role_arn.split("/")[-1]
conn = {
"account_number": account_id,
"assume_role": config.get("policies.role_name"),
"region": config.region,
"client_kwargs": config.get("boto3.client_kwargs", {}),
}
if run_sync:
role = self.get_iam_role_sync(account_id, role_name, conn)
else:
role = await self._get_iam_role_async(account_id, role_name, conn)
except ClientError as ce:
if ce.response["Error"]["Code"] == "NoSuchEntity":
# The role does not exist:
log_data["message"] = "Role does not exist in AWS."
log.error(log_data)
stats.count(
"aws.fetch_iam_role.missing_in_aws",
tags={"account_id": account_id, "role_arn": role_arn},
)
return None
else:
log_data["message"] = f"Some other error: {ce.response}"
log.error(log_data)
stats.count(
"aws.fetch_iam_role.aws_connection_problem",
tags={"account_id": account_id, "role_arn": role_arn},
)
raise
# Format the role for DynamoDB and Redis:
await self.cloudaux_to_aws(role)
result = {
"arn": role.get("Arn"),
"name": role.pop("RoleName"),
"resourceId": role.pop("RoleId"),
"accountId": account_id,
"ttl": int((datetime.utcnow() + timedelta(hours=36)).timestamp()),
"policy": self.dynamo.convert_iam_resource_to_json(role),
"permissions_boundary": role.get("PermissionsBoundary", {}),
"templated": self.red.hget(
config.get("templated_roles.redis_key", "TEMPLATED_ROLES_v2"),
role.get("Arn").lower(),
),
}
# Sync with DDB:
await sync_to_async(self.dynamo.sync_iam_role_for_account)(result)
log_data["message"] = "Role fetched from AWS, and synced with DDB."
stats.count(
"aws.fetch_iam_role.fetched_from_aws",
tags={"account_id": account_id, "role_arn": role_arn},
)
else:
log_data["message"] = "Role fetched from DDB."
stats.count(
"aws.fetch_iam_role.in_dynamo",
tags={"account_id": account_id, "role_arn": role_arn},
)
# Fix the TTL:
result["Item"]["ttl"] = int(result["Item"]["ttl"])
result = result["Item"]
# Update the redis cache:
stats.count(
"aws.fetch_iam_role.in_dynamo",
tags={"account_id": account_id, "role_arn": role_arn},
)
await sync_to_async(self._add_role_to_redis)(result)
log_data["message"] += " Updated Redis."
log.debug(log_data)
result["policy"] = json.loads(result["policy"])
return result
async def call_user_lambda(
self, role: str, user_email: str, account_id: str, user_role_name: str = "user"
) -> str:
"""Call out to the lambda function to provision the per-user role for the account."""
raise NotImplementedError("This feature isn't enabled in ConsoleMe OSS")
wait=tenacity.wait_fixed(2),
stop=tenacity.stop_after_attempt(10),
retry=tenacity.retry_if_exception_type(UserRoleNotAssumableYet),
)
async def get_credentials(
self,
user: str,
role: str,
enforce_ip_restrictions: bool = True,
user_role: bool = False,
account_id: str = None,
custom_ip_restrictions: list = None,
) -> dict:
"""Get Credentials will return the list of temporary credentials from AWS."""
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"role": role,
"enforce_ip_restrictions": enforce_ip_restrictions,
"custom_ip_restrictions": custom_ip_restrictions,
"message": "Generating credentials",
}
session = boto3.Session()
client = session.client(
"sts",
region_name=config.region,
endpoint_url=config.get(
"aws.sts_endpoint_url", "https://sts.{region}.amazonaws.com"
).format(region=config.region),
)
ip_restrictions = config.get("aws.ip_restrictions")
stats.count("aws.get_credentials", tags={"role": role, "user": user})
# If this is a dynamic request, then we need to fetch the role details, call out to the lambda
# wait for it to complete, assume the role, and then return the assumed credentials back.
if user_role:
stats.count("aws.call_user_lambda", tags={"role": role, "user": user})
try:
role = await self.call_user_lambda(role, user, account_id)
except Exception as e:
raise e
await raise_if_background_check_required_and_no_background_check(role, user)
try:
if enforce_ip_restrictions and ip_restrictions:
policy = json.dumps(
dict(
Version="2012-10-17",
Statement=[
dict(
Effect="Deny",
Action="*",
Resource="*",
Condition=dict(
NotIpAddress={"aws:SourceIP": ip_restrictions},
Null={
"aws:ViaAWSService": "true",
"aws:PrincipalTag/AWSServiceTrust": "true",
},
StringNotLike={
"aws:PrincipalArn": [
"arn:aws:iam::*:role/aws:*"
]
},
),
),
dict(Effect="Allow", Action="*", Resource="*"),
],
)
)
credentials = await sync_to_async(client.assume_role)(
RoleArn=role,
RoleSessionName=user.lower(),
Policy=policy,
DurationSeconds=config.get("aws.session_duration", 3600),
)
credentials["Credentials"]["Expiration"] = int(
credentials["Credentials"]["Expiration"].timestamp()
)
log.debug(
{
**log_data,
"access_key_id": credentials["Credentials"]["AccessKeyId"],
}
)
return credentials
if custom_ip_restrictions:
policy = json.dumps(
dict(
Version="2012-10-17",
Statement=[
dict(
Effect="Deny",
Action="*",
Resource="*",
Condition=dict(
NotIpAddress={
"aws:SourceIP": custom_ip_restrictions
},
Null={
"aws:ViaAWSService": "true",
"aws:PrincipalTag/AWSServiceTrust": "true",
},
StringNotLike={
"aws:PrincipalArn": [
"arn:aws:iam::*:role/aws:*"
]
},
),
),
dict(Effect="Allow", Action="*", Resource="*"),
],
)
)
credentials = await sync_to_async(client.assume_role)(
RoleArn=role,
RoleSessionName=user.lower(),
Policy=policy,
DurationSeconds=config.get("aws.session_duration", 3600),
)
credentials["Credentials"]["Expiration"] = int(
credentials["Credentials"]["Expiration"].timestamp()
)
log.debug(
{
**log_data,
"access_key_id": credentials["Credentials"]["AccessKeyId"],
}
)
return credentials
credentials = await sync_to_async(client.assume_role)(
RoleArn=role,
RoleSessionName=user.lower(),
DurationSeconds=config.get("aws.session_duration", 3600),
)
credentials["Credentials"]["Expiration"] = int(
credentials["Credentials"]["Expiration"].timestamp()
)
log.debug(
{**log_data, "access_key_id": credentials["Credentials"]["AccessKeyId"]}
)
return credentials
except ClientError as e:
# TODO(ccastrapel): Determine if user role was really just created, or if this is an older role.
if user_role:
raise UserRoleNotAssumableYet(e.response["Error"])
raise
async def generate_url(
self,
user: str,
role: str,
region: str = "us-east-1",
user_role: bool = False,
account_id: str = None,
) -> str:
"""Generate URL will get temporary credentials and craft a URL with those credentials."""
function = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {
"function": function,
"user": user,
"role": role,
"message": "Generating authenticated AWS console URL",
}
log.debug(log_data)
credentials = await self.get_credentials(
user,
role,
user_role=user_role,
account_id=account_id,
enforce_ip_restrictions=False,
)
credentials_d = {
"sessionId": credentials.get("Credentials", {}).get("AccessKeyId"),
"sessionKey": credentials.get("Credentials", {}).get("SecretAccessKey"),
"sessionToken": credentials.get("Credentials", {}).get("SessionToken"),
}
req_params = {
"Action": "getSigninToken",
"Session": bleach.clean(json.dumps(credentials_d)),
"DurationSeconds": config.get("aws.session_duration", 3600),
}
http_client = AsyncHTTPClient(force_instance=True)
url_with_params: str = url_concat(
config.get(
"aws.federation_url", "https://signin.aws.amazon.com/federation"
),
req_params,
)
r = await http_client.fetch(url_with_params, ssl_options=ssl.SSLContext())
token = json.loads(r.body)
login_req_params = {
"Action": "login",
"Issuer": config.get("aws.issuer"),
"Destination": (
"{}".format(
config.get(
"aws.console_url", "https://{}.console.aws.amazon.com"
).format(region)
)
),
"SigninToken": bleach.clean(token.get("SigninToken")),
"SessionDuration": config.get("aws.session_duration", 3600),
}
r2 = requests_sync.Request(
"GET",
config.get(
"aws.federation_url", "https://signin.aws.amazon.com/federation"
),
params=login_req_params,
)
url = r2.prepare().url
return url
async def sns_publisher_group_requests(
self, user, group, justification, request_id, bg_check_passed
):
raise NotImplementedError()
async def sns_publish_policy_requests(self, request, request_uri):
raise NotImplementedError()
async def send_communications_policy_change_request(self, request, send_sns=False):
"""
Optionally send a notification when there's a new policy change request
:param request:
:param send_sns:
:return:
"""
log_data: dict = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Function is not configured.",
}
log.warning(log_data)
return
async def send_communications_new_policy_request(
self, extended_request, admin_approved, approval_probe_approved
):
"""
Optionally send a notification when there's a new policy change request
:param approval_probe_approved:
:param admin_approved:
:param extended_request:
:return:
"""
await send_communications_policy_change_request_v2(extended_request)
return
def handle_detected_role(role):
pass
async def should_auto_approve_policy_v2(self, extended_request, user, user_groups):
return {"approved": False}
The provided code snippet includes necessary dependencies for implementing the `init` function. Write a Python function `def init()` to solve the following problem:
Initialize the AWS plugin.
Here is the function:
def init():
"""Initialize the AWS plugin."""
return Aws() | Initialize the AWS plugin. |
162,193 | import sys
import time
from typing import List
import ujson as json
from consoleme.config import config
from consoleme.exceptions.exceptions import (
InvalidCertificateException,
MissingConfigurationValue,
NoUserException,
)
from consoleme.lib.generic import str2bool
from consoleme.lib.plugins import get_plugin_by_name
class Auth:
"""The Auth class authenticates the user and provides the user's groups."""
def __init__(self, headers: dict = None):
"""Initialize the auth class."""
self.headers = headers
async def get_user(self, headers: dict = None):
"""Get the user identity."""
if config.get("auth.get_user_by_header"):
return await self.get_user_by_header(headers)
else:
raise Exception("auth.get_user not configured")
async def get_user_by_header(self, headers: dict):
"""Get the user identity via plaintext header."""
if not headers:
raise Exception(
"auth.get_user_by_header enabled, but no headers were passed in"
)
user_header_name = config.get("auth.user_header_name")
if not user_header_name:
raise Exception(
"auth.user_header_name configuration not set, but auth.get_user_by_header is enabled."
)
user = headers.get(user_header_name)
if not user:
raise NoUserException("User header '{}' is empty.".format(user_header_name))
return user
async def get_groups(
self, user: str, headers=None, get_header_groups=False, only_direct=True
):
"""Get the user's groups."""
groups_to_add_for_all_users = config.get("auth.groups_to_add_for_all_users", [])
groups = []
if get_header_groups or config.get("auth.get_groups_by_header"):
header_groups = await self.get_groups_by_header(headers)
if header_groups:
groups.extend(header_groups)
elif config.get("auth.get_groups_from_google"):
from consoleme.lib.google import get_group_memberships
google_groups = await get_group_memberships(user)
if google_groups:
groups.extend(google_groups)
if groups_to_add_for_all_users:
# Optionally consider ConsoleMe users a member of these additional groups
groups.extend(groups_to_add_for_all_users)
if not groups:
log.error(
{
"message": "auth.get_groups not configured properly or no groups were obtained."
},
exc_info=True,
)
if config.get("auth.force_groups_lowercase", False):
groups = [x.lower() for x in groups]
return list(set(groups))
async def get_groups_by_header(self, headers: dict):
"""Get the user's groups by plaintext header."""
groups = []
if not headers:
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "No headers present.",
}
log.debug(log_data, exc_info=True)
return groups
groups_header_name = config.get("auth.groups_header_name", None)
if not groups_header_name:
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Group header name not configured.",
}
log.debug(log_data, exc_info=True)
raise Exception(
"auth.groups_header_name configuration not set, but auth.get_groups_by_header is enabled."
)
if headers.get(groups_header_name):
groups = headers[groups_header_name].split(",")
return groups
async def extract_certificate(self, headers: dict):
raise NotImplementedError()
async def extract_user_from_certificate(self, headers: dict):
return await self.extract_certificate(headers)
async def get_cert_age_seconds(self, headers: dict):
"""Retrieve age of mtls certificate."""
current_time = int(time.time())
cert = await self.extract_certificate(headers)
return current_time - int(cert.get("notBefore"))
async def validate_certificate(self, headers: dict):
cli_auth_required_headers = config.get("cli_auth.required_headers")
if not cli_auth_required_headers:
raise MissingConfigurationValue(
"You must specified the header key and expected value in order to validate a certificate for mutual "
"TLS authentication. Refer to the `cli_auth.required_headers` configuration"
)
for header in cli_auth_required_headers:
for k, v in header.items():
if headers.get(k) != v:
stats.count("auth.validate_certificate.error")
error = (
"Header {} is supposed to equal {}, but it equals {}.".format(
k, v, headers.get(k)
)
)
log_data = {
"function": "auth.validate_certificate",
"message": error,
}
log.error(log_data)
raise InvalidCertificateException(error)
return True
async def is_user_contractor(self, user):
return False
async def validate_and_return_api_caller(self, headers: dict):
cli_auth_required_headers = config.get("cli_auth.required_headers")
if not cli_auth_required_headers:
raise MissingConfigurationValue(
"You must specified the header key and expected value in order to validate a certificate for mutual "
"TLS authentication. Refer to the `cli_auth.required_headers` configuration"
)
for header in cli_auth_required_headers:
for k, v in header.items():
if headers.get(k) != v:
raise Exception(
f"Header {k} is supposed to equal {v}, but it equals {headers.get(k)}."
)
cert = await self.extract_user_from_certificate(headers)
user = cert.get("name")
if not user or user not in config.get("api_auth.valid_entities", []):
raise Exception("Not authorized to call this API with that certificate.")
return user
async def get_user_info(self, user: str, object: bool = False):
"""
Retrieve details about a user from an authorative source
:param user:
:param object:
:return:
"""
return {
"domain": "",
"userName": user,
"name": {
"givenName": "",
"familyName": "",
"fullName": "",
},
"primaryEmail": user,
}
async def get_group_info(self, group, members=True):
raise NotImplementedError()
async def put_group_attribute(self, group, attribute_name, attribute_value):
raise NotImplementedError()
async def put_group_attributes(self, group, attributes):
raise NotImplementedError()
async def is_user_in_group(self, user, group, only_direct=True):
raise NotImplementedError()
async def is_requestable(self, group):
raise NotImplementedError()
async def does_user_exist(self, user):
raise NotImplementedError()
async def get_group_attribute(self, group, attribute_name):
raise NotImplementedError()
async def get_secondary_approvers(self, group):
"""Return a list of secondary approvers for a group."""
raise NotImplementedError()
async def get_groups_with_attribute_name_value(
self, attribute_name, attribute_value
):
raise NotImplementedError()
async def get_users_with_attribute_name_value(
self, attribute_name, attribute_value
):
raise NotImplementedError()
async def is_group_requestable(self, group):
raise NotImplementedError()
async def get_all_requestable_groups(self):
raise NotImplementedError()
async def get_group_memberships(self, user, scopes=[], only_direct=True) -> list:
return []
async def get_group_members(self, group):
raise NotImplementedError()
async def get_user_attribute(self, user, attribute_name):
raise NotImplementedError()
async def get_or_create_user_role_name(self, user):
user_role_name_attribute = await self.get_user_attribute(user, "user_role_name")
if not user_role_name_attribute:
return await self.generate_and_store_user_role_name(user)
user_role_name = user_role_name_attribute.value
return user_role_name
async def generate_and_store_user_role_name(self, user):
(username, domain) = user.split("@")
return f"{username}-{domain}"
Auth.Group = Group
Auth.User = User
Auth.ExtendedAttribute = ExtendedAttribute
The provided code snippet includes necessary dependencies for implementing the `init` function. Write a Python function `def init()` to solve the following problem:
Initialize the auth plugin.
Here is the function:
def init():
"""Initialize the auth plugin."""
return Auth() | Initialize the auth plugin. |
162,194 | import json
from datetime import timedelta
from asgiref.sync import async_to_sync
from celery import Celery
from consoleme.config import config
from consoleme.lib.json_encoder import SetEncoder
from consoleme.lib.redis import RedisHandler
from consoleme.lib.timeout import Timeout
red = async_to_sync(RedisHandler().redis)()
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (frozenset, set, PrettyOrderedSet)):
return list(obj)
if isinstance(obj, Decimal):
return float(obj)
if isinstance(obj, datetime):
return obj.timestamp()
if isinstance(obj, Exception):
return str(obj)
return json.JSONEncoder.default(self, obj)
The provided code snippet includes necessary dependencies for implementing the `cache_application_information` function. Write a Python function `def cache_application_information()` to solve the following problem:
This task retrieves application information from configuration. You may want to override this function to utilize your organization's CI/CD pipeline for this information. :return:
Here is the function:
def cache_application_information():
"""
This task retrieves application information from configuration. You may want to override this function to
utilize your organization's CI/CD pipeline for this information.
:return:
"""
apps_to_roles = {}
for k, v in config.get("application_settings", {}).items():
apps_to_roles[k] = v.get("roles", [])
red.set(
config.get("celery.apps_to_roles.redis_key", "APPS_TO_ROLES"),
json.dumps(apps_to_roles, cls=SetEncoder),
) | This task retrieves application information from configuration. You may want to override this function to utilize your organization's CI/CD pipeline for this information. :return: |
162,195 | import json
from datetime import timedelta
from asgiref.sync import async_to_sync
from celery import Celery
from consoleme.config import config
from consoleme.lib.json_encoder import SetEncoder
from consoleme.lib.redis import RedisHandler
from consoleme.lib.timeout import Timeout
The provided code snippet includes necessary dependencies for implementing the `task_1` function. Write a Python function `def task_1()` to solve the following problem:
This task demonstrates how you can implement your own internal celery tasks to run on schedule or on demand. :return:
Here is the function:
def task_1():
"""
This task demonstrates how you can implement your own internal celery tasks to run on schedule or on demand.
:return:
"""
pass | This task demonstrates how you can implement your own internal celery tasks to run on schedule or on demand. :return: |
162,196 | import json
from datetime import timedelta
from asgiref.sync import async_to_sync
from celery import Celery
from consoleme.config import config
from consoleme.lib.json_encoder import SetEncoder
from consoleme.lib.redis import RedisHandler
from consoleme.lib.timeout import Timeout
internal_schedule = {
"task1": {
"task": "consoleme.default_plugins.plugins.celery_tasks.celery_tasks.task_1",
"options": {"expires": 4000},
"schedule": schedule,
},
"cache_application_information": {
"task": "consoleme.default_plugins.plugins.celery_tasks.celery_tasks.cache_application_information",
"options": {"expires": 4000},
"schedule": schedule,
},
}
The provided code snippet includes necessary dependencies for implementing the `init` function. Write a Python function `def init()` to solve the following problem:
Initialize the Celery Tasks plugin.
Here is the function:
def init():
"""Initialize the Celery Tasks plugin."""
return internal_schedule | Initialize the Celery Tasks plugin. |
162,197 | import hashlib
import os
import urllib.parse
from typing import List
from consoleme.lib.aws_secret_manager import get_aws_secret
def split_s3_path(s3_path):
path_parts = s3_path.replace("s3://", "").split("/")
b = path_parts.pop(0)
k = "/".join(path_parts)
return b, k | null |
162,198 | import hashlib
import os
import urllib.parse
from typing import List
from consoleme.lib.aws_secret_manager import get_aws_secret
class Config:
def get_config_location():
config_location = os.environ.get("CONFIG_LOCATION")
default_save_location = f"{os.curdir}/consoleme.yaml"
if config_location:
if config_location.startswith("s3://"):
import boto3
client = boto3.client("s3")
bucket, key = split_s3_path(config_location)
obj = client.get_object(Bucket=bucket, Key=key)
config_data = obj["Body"].read()
with open(default_save_location, "w") as f:
f.write(config_data.decode())
elif config_location.startswith("AWS_SECRETS_MANAGER:"):
secret_name = "".join(config_location.split("AWS_SECRETS_MANAGER:")[1:])
config_data = get_aws_secret(
secret_name, os.environ.get("EC2_REGION", "us-east-1")
)
with open(default_save_location, "w") as f:
f.write(config_data)
else:
return config_location
config_locations: List[str] = [
default_save_location,
os.path.expanduser("~/.config/consoleme/config.yaml"),
"/etc/consoleme/config/config.yaml",
"example_config/example_config_development.yaml",
]
for loc in config_locations:
if os.path.exists(loc):
return loc
raise Exception(
"Unable to find ConsoleMe's configuration. It either doesn't exist, or "
"ConsoleMe doesn't have permission to access it. Please set the CONFIG_LOCATION environment variable "
"to the path of the configuration, or to an s3 location with your configuration"
"(i.e: s3://YOUR_BUCKET/path/to/config.yaml). Otherwise, ConsoleMe will automatically search for the"
f"configuration in these locations: {', '.join(config_locations)}"
)
def internal_functions(cfg=None):
cfg = cfg or {}
pass
def is_contractor(user):
return False
def get_employee_photo_url(user):
from consoleme.config import config
# Try to get a custom employee photo url by formatting a string provided through configuration
custom_employee_photo_url = config.get(
"get_employee_photo_url.custom_employee_url", ""
).format(user=user)
if custom_employee_photo_url:
return custom_employee_photo_url
# Fall back to Gravatar
gravatar_url = (
"https://www.gravatar.com/avatar/"
+ hashlib.md5(user.lower().encode("utf-8")).hexdigest() # nosec
+ "?"
)
gravatar_url += urllib.parse.urlencode({"d": "mp"})
return gravatar_url
def get_employee_info_url(user):
return None
The provided code snippet includes necessary dependencies for implementing the `init` function. Write a Python function `def init()` to solve the following problem:
Initialize the Config plugin.
Here is the function:
def init():
"""Initialize the Config plugin."""
return Config() | Initialize the Config plugin. |
162,199 | from consoleme.default_plugins.plugins.internal_routes.handlers.internal_demo_route import (
InternalDemoRouteHandler,
)
class InternalRoutes:
ui_modules = {}
def get_internal_routes(self, make_jwt_validator, jwt_validator=None):
# The below code can be used with your ConsoleMe Internal package name to generate a path to your internal
# JavaScript and HTML files, if you wish to render these for the handler.
# path = pkg_resources.resource_filename("consoleme_internal", "templates")
internal_routes = [
(r"/internal_demo_route/?", InternalDemoRouteHandler),
# An example of serving static internal content is below, which would make use of the template path variable
# You defined above.
# (
# r"/static_internal/(.*)",
# NoCacheStaticFileHandler,
# dict(path=os.path.join(path, "static")),
# ),
]
return internal_routes
The provided code snippet includes necessary dependencies for implementing the `init` function. Write a Python function `def init()` to solve the following problem:
Initialize the internal routes plugin.
Here is the function:
def init():
"""Initialize the internal routes plugin."""
return InternalRoutes() | Initialize the internal routes plugin. |
162,200 | import sys
import time
from typing import List
import sentry_sdk
import simplejson as json
from redis.exceptions import ConnectionError
from consoleme.config import config
from consoleme.lib.account_indexers import get_account_id_to_name_mapping
from consoleme.lib.cloud_credential_authorization_mapping import (
CredentialAuthorizationMapping,
)
from consoleme.lib.crypto import Crypto
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler
class GroupMapping:
"""Group mapping handles mapping groups to eligible roles and accounts."""
def __init__(self):
pass
async def get_eligible_roles(
self, username: str, groups: list, user_role: str, console_only: bool, **kwargs
) -> list:
"""Get eligible roles for user."""
roles: list = []
# Legacy cruft, we should rename the parameter here.
include_cli: bool = not console_only
roles.extend(
await credential_authz_mapping.determine_users_authorized_roles(
username, groups, include_cli
)
)
return list(set(roles))
async def filter_eligible_roles(query: str, obj: object) -> List:
selected_roles: List = []
for r in obj.eligible_roles:
if query.lower() == r.lower():
# Exact match. Only return the specific role
return [r]
if query.lower() in r.lower():
selected_roles.append(r)
return list(set(selected_roles))
async def set_recent_user(self, user):
pass
async def set_console_roles_in_cache(
self,
user,
roles,
expiration=config.get("group_mapping_config.role_cache_expiration", 21600),
):
"""Set roles in cache with a nominal expiration"""
stats.count("set_console_roles_in_cache")
if not self.red:
self.red = await RedisHandler().redis()
expiration = int(time.time()) + expiration
role_blob = json.dumps({"user": user, "roles": roles, "expiration": expiration})
crypto = Crypto()
sig = crypto.sign(role_blob)
key = config.get(
"group_mapping_config.role_cache_redis_key", "ROLE_CACHE_{}"
).format(user)
sig_key = config.get(
"group_mapping_config.role_cache_redis_sig_key", "ROLE_CACHE_SIG_{}"
).format(user)
try:
self.red.setex(key, expiration, role_blob)
self.red.setex(sig_key, expiration, sig)
except ConnectionError:
log.error("Error connecting to Redis.", exc_info=True)
async def get_roles_from_cache(self, user):
"""Get roles from cache"""
stats.count("get_roles_from_cache")
key = config.get(
"group_mapping_config.role_cache_redis_key", "ROLE_CACHE_{}"
).format(user)
sig_key = config.get(
"group_mapping_config.role_cache_redis_sig_key", "ROLE_CACHE_SIG_{}"
).format(user)
if not self.red:
self.red = await RedisHandler().redis()
role_r = self.red.get(key)
if not role_r:
return []
role_sig = self.red.get(sig_key)
if not role_sig:
stats.count("get_roles_from_cache.no_role_sig")
log.error("Role data is in redis, but no signature is present.")
return []
role_blob = json.loads(role_r)
if int(time.time()) > role_blob.get("expiration", 0):
stats.count("get_roles_from_cache.role_cache_expired")
log.error("Role cache for {} has expired.".format(user))
return []
if role_blob.get("user") != user:
stats.count("get_roles_from_cache.role_cache_user_invalid")
log.error(
"Role cache user mismatch. Cache has: {}. User requested is {}".format(
role_blob.get("user"), user
)
)
return []
return role_blob.get("roles")
async def generate_credential_authorization_mapping(self, authorization_mapping):
# Override this with company-specific logic
return authorization_mapping
async def get_eligible_accounts(self, role_arns):
"""Get eligible accounts for user."""
stats.count("get_eligible_accounts")
account_ids = {}
friendly_names = await get_account_id_to_name_mapping()
for r in role_arns:
try:
account_id = r.split(":")[4]
account_friendlyname = friendly_names.get(account_id, "")
if account_friendlyname and isinstance(account_friendlyname, list):
account_ids[account_id] = account_friendlyname[0]
elif account_friendlyname and isinstance(account_friendlyname, str):
account_ids[account_id] = account_friendlyname
except Exception as e:
log.error(
{
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Unable to parse role ARN",
"role": r,
"error": str(e),
}
)
sentry_sdk.capture_exception()
return account_ids
async def get_account_mappings(self) -> dict:
"""Get a dictionary with all of the account mappings (friendly names -> ID and ID -> names)."""
return {}
async def get_secondary_approvers(self, group, return_default=False):
return config.get("access_requests.default_approver")
def get_account_names_to_ids(self, force_refresh: bool = False) -> dict:
"""Get account name to id mapping"""
stats.count("get_account_names_to_ids")
return {}
def get_account_ids_to_names(self, force_refresh: bool = False) -> str:
"""Get account id to name mapping"""
stats.count("get_account_ids_to_names")
return {}
async def get_max_cert_age_for_role(self, role_name: str):
"""Retrieve the maximum allowed certificate age allowable to retrieve a particular
role. 30 will be returned if there is no max age defined.
"""
return 360
async def get_all_account_data(self):
return {}
async def get_all_accounts(self):
"""Get all account details"""
return {}
async def get_all_user_groups(self, user, groups):
return []
def is_role_valid(self, entry):
return True
The provided code snippet includes necessary dependencies for implementing the `init` function. Write a Python function `def init()` to solve the following problem:
Initialize group_mapping plugin.
Here is the function:
def init():
"""Initialize group_mapping plugin."""
return GroupMapping() | Initialize group_mapping plugin. |
162,201 | import os
import pkg_resources
import requests
import sentry_sdk
import tornado.autoreload
import tornado.web
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
import consoleme
from consoleme.config import config
from consoleme.handlers.auth import AuthHandler
from consoleme.handlers.v1.credentials import GetCredentialsHandler
from consoleme.handlers.v1.headers import ApiHeaderHandler, HeaderHandler
from consoleme.handlers.v1.health import HealthHandler
from consoleme.handlers.v1.policies import (
ApiResourceTypeAheadHandler,
AutocompleteHandler,
ResourceTypeAheadHandler,
)
from consoleme.handlers.v1.roles import GetRolesHandler
from consoleme.handlers.v1.saml import SamlHandler
from consoleme.handlers.v2.audit import AuditRolesAccessHandler, AuditRolesHandler
from consoleme.handlers.v2.aws_iam_users import UserDetailHandler
from consoleme.handlers.v2.challenge import (
ChallengeGeneratorHandler,
ChallengePollerHandler,
ChallengeValidatorHandler,
)
from consoleme.handlers.v2.dynamic_config import DynamicConfigApiHandler
from consoleme.handlers.v2.errors import NotFoundHandler as V2NotFoundHandler
from consoleme.handlers.v2.generate_changes import GenerateChangesHandler
from consoleme.handlers.v2.generate_policy import GeneratePolicyHandler
from consoleme.handlers.v2.index import (
EligibleRoleHandler,
EligibleRolePageConfigHandler,
FrontendHandler,
)
from consoleme.handlers.v2.logout import LogOutHandler
from consoleme.handlers.v2.managed_policies import (
ManagedPoliciesForAccountHandler,
ManagedPoliciesHandler,
ManagedPoliciesOnPrincipalHandler,
)
from consoleme.handlers.v2.notifications import NotificationsHandler
from consoleme.handlers.v2.policies import (
CheckPoliciesHandler,
PoliciesHandler,
PoliciesPageConfigHandler,
)
from consoleme.handlers.v2.requests import (
RequestDetailHandler,
RequestHandler,
RequestsHandler,
RequestsPageConfigHandler,
)
from consoleme.handlers.v2.resources import GetResourceURLHandler, ResourceDetailHandler
from consoleme.handlers.v2.roles import (
AccountRolesHandler,
GetRolesMTLSHandler,
RoleCloneHandler,
RoleConsoleLoginHandler,
RoleDetailAppHandler,
RoleDetailHandler,
RolesHandler,
)
from consoleme.handlers.v2.self_service import (
PermissionTemplatesHandler,
SelfServiceConfigHandler,
)
from consoleme.handlers.v2.service_control_policy import ServiceControlPolicyHandler
from consoleme.handlers.v2.templated_resources import TemplatedResourceDetailHandler
from consoleme.handlers.v2.typeahead import (
ResourceTypeAheadHandlerV2,
SelfServiceStep1ResourceTypeahead,
)
from consoleme.handlers.v2.user import (
LoginConfigurationHandler,
LoginHandler,
UserManagementHandler,
UserRegistrationHandler,
)
from consoleme.handlers.v2.user_profile import UserProfileHandler
from consoleme.lib.auth import mk_jwks_validator
from consoleme.lib.plugins import get_plugin_by_name
internal_routes = get_plugin_by_name(
config.get("plugins.internal_routes", "default_internal_routes")
)()
def make_jwt_validator():
jwk_url = config.get("sso.jwk_url")
if not jwk_url:
raise Exception("Config 'sso.jwk_url' is not defined")
jwk_set = requests.get(jwk_url).json()
keys = [k for k in jwk_set["keys"] if k["kty"] == "RSA"]
jwk_schema = config.get("sso.jwk_schema")
if not jwk_schema:
raise Exception("Config 'sso.jwk_schema' is not defined")
return mk_jwks_validator(keys, jwk_schema["header"], jwk_schema["payload"])
class AuthHandler(BaseHandler):
async def prepare(self):
try:
if self.request.method.lower() in ["options", "post"]:
return
await super(AuthHandler, self).prepare()
except: # noqa
# NoUserException
raise
async def get(self):
self.write(
{
"authCookieExpiration": self.auth_cookie_expiration,
"currentServerTime": int(time.time()),
}
)
async def post(self):
self.write(
{
"authCookieExpiration": self.auth_cookie_expiration,
"currentServerTime": int(time.time()),
}
)
class GetCredentialsHandler(BaseMtlsHandler):
"""Main consoleme api handler."""
def check_xsrf_cookie(self):
pass
def initialize(self):
self.user = None
self.eligible_roles = []
async def raise_if_certificate_too_old(self, role, log_data=None):
log_data = {} if not log_data else log_data
try:
max_cert_age = await group_mapping.get_max_cert_age_for_role(role)
max_cert_age_message = config.get(
"errors.custom_max_cert_age_message", "Please refresh your certificate."
)
except Exception as e:
sentry_sdk.capture_exception()
log_data["error"] = e
log_data[
"message"
] = "Failed to get max MTLS certificate age. Returning default value of 1 day"
max_cert_age = 1 # Default to one day expiration if we fail to get max certificate age info
max_cert_age_seconds = max_cert_age * (24 * 60 * 60) # Seconds in a day
try:
if self.current_cert_age > max_cert_age_seconds:
raise CertTooOldException(
f"MTLS certificate is too old. The role you selected requires a max cert "
f"age of {max_cert_age} days. "
f"{max_cert_age_message}"
)
except CertTooOldException as e:
log_data["message"] = "Unable to get credentials for user"
log_data["eligible_roles"] = self.eligible_roles
log.warning(log_data, exc_info=True)
stats.count(
"GetCredentialsHandler.post.exception",
tags={"user": self.user, "requested_role": role, "authorized": False},
)
error = {
"code": "905",
"message": (
f"MTLS certificate is too old. {max_cert_age_message}. "
f"Max cert age for {role} is {max_cert_age} days."
),
"requested_role": role,
"exception": str(e),
"request_id": self.request_uuid,
}
self.set_status(403)
self.write(error)
await self.finish()
raise
async def _get_the_requested_role(self, request: dict, log_data: dict) -> str:
"""Get the requested role to complete the credentials fetching."""
if request.get("requested_role"):
return request["requested_role"]
elif request.get("app_name"):
role_models = await internal_policies.get_roles_associated_with_app(
request["app_name"]
)
if not role_models:
stats.count(
"GetCredentialsHandler.post",
tags={
"user": self.user,
"user_role": False,
"app_name": request["app_name"],
},
)
log_data["message"] = "No matching roles for provided app name."
log.warning(log_data)
error = {
"code": "900",
"message": "No matching roles for provided app name.",
"request_id": self.request_uuid,
}
self.set_status(400)
self.write(error)
await self.finish()
return ""
account_ids = set()
# If an account was passed into this function, we can use that
if "account" in request:
am = await group_mapping.get_account_mappings()
if request["account"] in am["ids_to_names"].keys():
# Account ID was passed in directly
account_ids.add(request["account"])
else:
# Might be a friendly name, have to check
if not am["names_to_ids"].get(request["account"]):
stats.count(
"GetCredentialsHandler.post.error",
tags={
"user": self.user,
"user_role": False,
"account": request["account"],
},
)
log_data["message"] = "Can't find the passed in account."
log.warning(log_data)
error = {
"code": "906",
"message": "No matching account for provided account.",
"request_id": self.request_uuid,
}
self.set_status(400)
self.write(error)
await self.finish()
return ""
account_ids.add(am["names_to_ids"][request["account"]])
if not account_ids:
# no account id was passed in, check to see if we can "smartly" determine the account.
# Preference will be given to test accounts
filtered_accounts = await get_cloud_account_model_array(
environment=Environment.test.value
)
# convert to set for O(1) lookup
for account in filtered_accounts.accounts:
account_ids.add(account.id)
potential_arns = []
# for all roles associated with app, find the one that is also an account in potential account ids
for role in role_models:
if role.account_id in account_ids:
potential_arns.append(role.arn)
if len(potential_arns) != 1:
# if length isn't exactly 1, then it's an error either way (0 or more than 1)
if len(potential_arns) == 0:
code = "900"
message = "No matching roles"
else:
code = "901"
message = "More than one matching role"
stats.count(
"GetCredentialsHandler.post.error",
tags={
"user": self.user,
"user_role": False,
},
)
log_data["message"] = message
log.warning(log_data)
error = {
"code": code,
"message": message,
"request_id": self.request_uuid,
}
self.set_status(400)
self.write(error)
await self.finish()
return ""
# if here, then success, we found exactly 1 ARN
return potential_arns[0]
else:
# Check that the account exists:
am = await group_mapping.get_account_mappings()
# First, check if an account ID was passed in:
if request["account"] in am["ids_to_names"].keys():
account_id = request["account"]
# If it was a "friendly" name, then get the account ID for it.
else:
# Was this a bogus name?
if not am["names_to_ids"].get(request["account"]):
stats.count(
"GetCredentialsHandler.post",
tags={
"user": self.user,
"user_role": True,
"account": request["account"],
},
)
log_data["message"] = "Can't find the passed in account."
log.warning(log_data)
error = {
"code": "906",
"message": "No matching account.",
"account": request["account"],
"request_id": self.request_uuid,
}
self.set_status(400)
self.write(error)
await self.finish()
return ""
account_id = am["names_to_ids"][request["account"]]
# Shove the account ID into the request:
request["account_id"] = account_id
return f"arn:aws:iam::{account_id}:role/{self.user_role_name}"
async def post(self):
"""/api/v1/get_credentials - Endpoint used to get credentials via mtls. Used by newt and weep.
---
get:
description: Credentials endpoint. Authenticates user via MTLS and returns requested credentials.
responses:
200:
description: Returns credentials or list of matching roles
403:
description: No matching roles found, or user has failed authn/authz.
"""
log_data = {
"function": "GetCredentialsHandler.post",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
# Validate the input:
data = tornado.escape.json_decode(self.request.body)
try:
request = await sync_to_async(credentials_schema.load)(data)
except ValidationError as ve:
stats.count(
"GetCredentialsHandler.post",
tags={"user": self.user, "validation_error": str(ve)},
)
log_data["validation_error"]: ve.messages
log.error(log_data)
error = {
"code": "904",
"message": f"Invalid JSON sent to the server:\n{json.dumps(ve.messages, indent=2)}",
"request_id": self.request_uuid,
}
self.set_status(400)
self.write(error)
await self.finish()
return
requester_type = self.requester.get("type")
if requester_type == "application":
app_name = self.requester.get("name")
await self.get_credentials_app_flow(
app_name, self.requester, request, log_data
)
elif requester_type == "user":
user_email = self.requester.get("email")
await self.get_credentials_user_flow(user_email, request, log_data)
else:
raise tornado.web.HTTPError(403, "Unauthorized entity.")
return
async def get_credentials_app_flow(self, app_name, app, request, log_data):
requested_role = request["requested_role"]
log_data["requested_role"] = requested_role
log_data["app"] = app_name
log_data["message"] = "App is requesting role"
log_data["custom_ip_restrictions"] = request.get("custom_ip_restrictions")
log_data["request"] = json.dumps(request)
log.debug(log_data)
arn_parts = requested_role.split(":")
if (
len(arn_parts) != 6
or arn_parts[0] != "arn"
or arn_parts[1] != "aws"
or arn_parts[2] != "iam"
):
log_data["message"] = "Invalid Role ARN"
log.warning(log_data)
error = {
"code": "899",
"message": "Invalid Role ARN. Applications must pass the full role ARN when requesting credentials",
"requested_role": requested_role,
}
self.set_status(403)
self.write(error)
await self.finish()
return
# Check if role is valid ARN
authorized = await internal_config.is_context_authorized(app, requested_role)
stats.count(
"GetCredentialsHandler.post",
tags={
"user": app_name,
"requested_role": requested_role,
"authorized": authorized,
},
)
if not authorized:
log_data["message"] = "Unauthorized"
log.warning(log_data)
error = {
"code": "900",
"message": "Unauthorized",
"requested_role": requested_role,
}
self.set_status(403)
self.write(error)
await self.finish()
return
credentials = await aws.get_credentials(
app_name,
requested_role,
enforce_ip_restrictions=False,
user_role=False,
account_id=None,
custom_ip_restrictions=request.get("custom_ip_restrictions"),
)
self.set_header("Content-Type", "application/json")
credentials.pop("ResponseMetadata", None)
credentials.pop("AssumedRoleUser", None)
credentials.pop("PackedPolicySize", None)
# Need to use ujson here because the credentials contain a datetime element
self.write(json.dumps(credentials))
await self.finish()
return
async def get_credentials_user_flow(self, user_email, request, log_data):
log_data["user"] = user_email
await self.authorization_flow(
user=user_email, console_only=request["console_only"]
)
# Get the role to request:
requested_role = await self._get_the_requested_role(request, log_data)
if not requested_role:
raise tornado.web.HTTPError(403, "No requested role detected.")
log_data["requested_role"] = requested_role
log_data["message"] = "User is requesting role"
log.debug(log_data)
matching_roles = await group_mapping.filter_eligible_roles(requested_role, self)
log_data["matching_roles"] = matching_roles
if len(matching_roles) == 0:
stats.count(
"GetCredentialsHandler.post",
tags={"user": self.user, "requested_role": None, "authorized": False},
)
log_data["message"] = "No matching roles"
log.warning(log_data)
error = {
"code": "900",
"message": "No matching roles",
"requested_role": requested_role,
"request_id": self.request_uuid,
}
self.set_status(403)
self.write(error)
return
if len(matching_roles) > 1:
stats.count(
"GetCredentialsHandler.post",
tags={"user": self.user, "requested_role": None, "authorized": False},
)
log_data["message"] = "More than one matching role"
log.warning(log_data)
error = {
"code": "901",
"message": log_data["message"],
"requested_role": requested_role,
"matching roles": matching_roles,
"request_id": self.request_uuid,
}
self.set_status(403)
self.write(error)
return
if len(matching_roles) == 1:
await self.raise_if_certificate_too_old(matching_roles[0], log_data)
try:
enforce_ip_restrictions = True
if request["no_ip_restrictions"]:
# Duo prompt the user in order to get non IP-restricted credentials
mfa_success = await duo_mfa_user(
self.user.split("@")[0],
message="ConsoleMe Non-IP Restricted Credential Request",
)
if mfa_success:
enforce_ip_restrictions = False
stats.count(
"GetCredentialsHandler.post.no_ip_restriction.success",
tags={"user": self.user, "requested_role": requested_role},
)
log_data[
"message"
] = "User requested non-IP-restricted credentials"
log.debug(log_data)
else:
# Log and emit a metric
log_data["message"] = "MFA Denied or Timeout"
log.warning(log_data)
stats.count(
"GetCredentialsHandler.post.no_ip_restriction.failure",
tags={"user": self.user, "requested_role": requested_role},
)
error = {
"code": "902",
"message": "MFA Not Successful",
"requested_role": requested_role,
"request_id": self.request_uuid,
}
self.set_status(403)
self.write(error)
await self.finish()
return
log_data["enforce_ip_restrictions"] = enforce_ip_restrictions
log_data["message"] = "Retrieving credentials"
log.debug(log_data)
# User-role logic:
# User-role should come in as cm-[username or truncated username]_[N or NC]
user_role = False
account_id = None
# User role must be in user's attributes
if (
self.user_role_name
and matching_roles[0].split("role/")[1] == self.user_role_name
):
user_role = True
account_id = (
matching_roles[0].split("arn:aws:iam::")[1].split(":role")[0]
)
credentials = await aws.get_credentials(
self.user,
matching_roles[0],
enforce_ip_restrictions=enforce_ip_restrictions,
user_role=user_role,
account_id=account_id,
)
except Exception as e:
log_data["message"] = "Unable to get credentials for user"
log_data["eligible_roles"] = self.eligible_roles
log.error(log_data, exc_info=True)
stats.count(
"GetCredentialsHandler.post.exception",
tags={
"user": self.user,
"requested_role": requested_role,
"authorized": False,
},
)
error = {
"code": "902",
"message": "Unable to get credentials.",
"requested_role": requested_role,
"matching_role": matching_roles[0],
"exception": str(e),
"request_id": self.request_uuid,
}
self.set_status(403)
self.write(error)
await self.finish()
return
if not credentials:
log_data["message"] = "Unauthorized or invalid role"
log.warning(log_data)
stats.count(
"GetCredentialsHandler.post.unauthorized",
tags={
"user": self.user,
"requested_role": requested_role,
"authorized": False,
},
)
error = {
"code": "903",
"message": "Requested role not found in eligible roles",
"requested_role": requested_role,
"eligible roles": self.eligible_roles,
"request_id": self.request_uuid,
}
self.write(error)
await self.finish()
return
else:
log_data["message"] = "Success. Returning credentials"
log.debug(log_data)
stats.count(
"GetCredentialsHandler.post.success",
tags={
"user": self.user,
"requested_role": requested_role,
"authorized": True,
},
)
credentials.pop("ResponseMetadata", None)
credentials.pop("AssumedRoleUser", None)
credentials.pop("PackedPolicySize", None)
self.write(json.dumps(credentials))
self.set_header("Content-Type", "application/json")
await self.finish()
return
class HeaderHandler(BaseHandler):
async def get(self):
"""
Show request headers for API requests. AuthZ is required.
---
description: Shows all headers received by server
responses:
200:
description: Pretty-formatted list of headers.
"""
if not self.user:
return
log_data = {
"user": self.user,
"function": "myheaders.get",
"message": "Incoming request",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
log.debug(log_data)
stats.count("myheaders.get", tags={"user": self.user})
response_html = []
for k, v in dict(self.request.headers).items():
if k.lower() in map(str.lower, config.get("headers.sensitive_headers", [])):
continue
response_html.append(
f"<p><strong>{xhtml_escape(k)}</strong>: {xhtml_escape(v)}</p>"
)
self.write("{}".format("\n".join(response_html)))
class ApiHeaderHandler(BaseMtlsHandler):
async def get(self):
"""
Show request headers for API requests. No AuthZ required.
---
description: Shows all headers received by server
responses:
200:
description: Pretty-formatted list of headers.
"""
log_data = {
"function": "apimyheaders.get",
"message": "Incoming request",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
log.debug(log_data)
stats.count("apimyheaders.get")
response = {}
for k, v in dict(self.request.headers).items():
if k.lower() in map(str.lower, config.get("headers.sensitive_headers", [])):
continue
response[k] = v
self.write(response)
class HealthHandler(TornadoRequestHandler):
"""Health handler."""
async def get(self):
"""Healthcheck endpoint
---
get:
description: Healtcheck endpoint
responses:
200:
description: Simple endpoint that returns 200 and a string to signify that the server is up.
"""
self.write("OK")
class AutocompleteHandler(BaseAPIV1Handler):
async def get(self):
"""
/api/v1/policyuniverse/autocomplete/?prefix=
---
get:
description: Supplies autocompleted permissions for the ace code editor.
responses:
200:
description: Returns a list of the matching permissions.
"""
if config.get("policy_editor.disallow_contractors", True) and self.contractor:
if self.user not in config.get(
"groups.can_bypass_contractor_restrictions", []
):
raise MustBeFte("Only FTEs are authorized to view this page.")
only_filter_services = False
if (
self.request.arguments.get("only_filter_services")
and self.request.arguments.get("only_filter_services")[0].decode("utf-8")
== "true"
):
only_filter_services = True
prefix = self.request.arguments.get("prefix")[0].decode("utf-8") + "*"
results = _expand_wildcard_action(prefix)
if only_filter_services:
# We return known matching services in a format that the frontend expects to see them. We omit the wildcard
# character returned by policyuniverse.
services = sorted(list({r.split(":")[0].replace("*", "") for r in results}))
results = [{"title": service} for service in services]
else:
results = [dict(permission=r) for r in results]
self.write(json.dumps(results))
await self.finish()
class ApiResourceTypeAheadHandler(BaseMtlsHandler):
async def get(self):
requester_type = self.requester.get("type", "")
if requester_type == "application":
if self.requester["name"] not in config.get("api_auth.valid_entities", []):
raise Exception("Call does not originate from a valid API caller")
elif requester_type == "user":
# TODO: do we need to block contractor access?
pass
else:
raise Exception("Call does not originate from a valid API caller")
results = await handle_resource_type_ahead_request(self)
self.write(json.dumps(results))
class ResourceTypeAheadHandler(BaseHandler):
async def get(self):
if config.get("policy_editor.disallow_contractors", True) and self.contractor:
if self.user not in config.get(
"groups.can_bypass_contractor_restrictions", []
):
raise MustBeFte("Only FTEs are authorized to view this page.")
results = await handle_resource_type_ahead_request(self)
self.write(json.dumps(results))
class GetRolesHandler(BaseMtlsHandler):
"""consoleme CLI role handler. Pass ?all=true to URL query to return all roles."""
def check_xsrf_cookie(self):
pass
def initialize(self):
self.user: str = None
self.eligible_roles: list = []
async def get(self):
"""
/api/v1/get_roles - Endpoint used to get list of roles. Used by weep and newt.
---
get:
description: Presents json-encoded list of eligible roles for the user.
responses:
200:
description: Present user with list of eligible roles.
403:
description: User has failed authn/authz.
"""
self.user: str = self.requester["email"]
include_all_roles = self.get_arguments("all")
console_only = True
if include_all_roles == ["true"]:
console_only = False
log_data = {
"function": "GetRolesHandler.get",
"user": self.user,
"console_only": console_only,
"message": "Writing all eligible user roles",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
log.debug(log_data)
stats.count("GetRolesHandler.get", tags={"user": self.user})
await self.authorization_flow(user=self.user, console_only=console_only)
self.write(json.dumps(sorted(self.eligible_roles)))
self.set_header("Content-Type", "application/json")
await self.finish()
class SamlHandler(BaseHandler):
def check_xsrf_cookie(self):
pass
async def post(self, endpoint):
req = await prepare_tornado_request_for_saml(self.request)
auth = await init_saml_auth(req)
if "sso" in endpoint:
return self.redirect(auth.login())
elif "acs" in endpoint:
auth.process_response()
errors = auth.get_errors()
not_auth_warn = not await sync_to_async(auth.is_authenticated)()
if not_auth_warn:
self.write("User is not authenticated")
await self.finish()
return
if len(errors) == 0:
saml_attributes = await sync_to_async(auth.get_attributes)()
email = saml_attributes[
config.get("get_user_by_saml_settings.attributes.email")
]
if isinstance(email, list) and len(email) > 0:
email = email[0]
groups = saml_attributes.get(
config.get("get_user_by_saml_settings.attributes.groups"), []
)
self_url = await sync_to_async(OneLogin_Saml2_Utils.get_self_url)(req)
if config.get("auth.set_auth_cookie"):
expiration = datetime.utcnow().replace(tzinfo=pytz.UTC) + timedelta(
minutes=config.get("jwt.expiration_minutes", 60)
)
encoded_cookie = await generate_jwt_token(
email, groups, exp=expiration
)
self.set_cookie(
config.get("auth_cookie_name", "consoleme_auth"),
encoded_cookie,
expires=expiration,
secure=config.get(
"auth.cookie.secure",
"https://" in config.get("url"),
),
httponly=config.get("auth.cookie.httponly", True),
samesite=config.get("auth.cookie.samesite", True),
)
if (
"RelayState" in self.request.arguments
and self_url
!= self.request.arguments["RelayState"][0].decode("utf-8")
):
return self.redirect(
auth.redirect_to(
self.request.arguments["RelayState"][0].decode("utf-8")
)
)
class AuditRolesHandler(BaseMtlsHandler):
"""Handler for /api/v2/audit/roles
Returns a list of all roles known to ConsoleMe
"""
allowed_methods = ["GET"]
def check_xsrf_cookie(self) -> None:
pass
async def get(self):
"""
GET /api/v2/audit/roles
"""
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
page = self.get_argument("page", "0")
try:
page = int(page)
except ValueError:
log_data["message"] = f"invalid value for page: {page}"
log.warning(log_data)
page = 0
count = self.get_argument("count", "1000")
try:
count = int(count)
except ValueError:
log_data["message"] = f"invalid value for count: {count}"
log.warning(log_data)
count = 1000
if page < 0:
page = 0
if count <= 0:
count = 1000
app_name = self.requester.get("name") or self.requester.get("username")
stats.count(
"AuditRoleHandler.get",
tags={
"requester": app_name,
},
)
roles = await credential_mapping.all_roles(
paginate=True, page=page, count=count
)
total_roles = await credential_mapping.number_roles()
start = page * count
end = start + count
end = min(end, total_roles)
roles = roles[start:end]
self.write(
WebResponse(
status=Status2.success,
status_code=200,
data=roles,
page=page,
total=total_roles,
count=len(roles),
last_page=_get_last_page(total_roles, count),
).json(exclude_unset=True)
)
class AuditRolesAccessHandler(BaseMtlsHandler):
"""Handler for /api/v2/audit/roles/{accountNumber}/{roleName}/access
Returns a list of groups with access to the requested role
"""
allowed_methods = ["GET"]
def check_xsrf_cookie(self) -> None:
pass
async def get(self, account_id, role_name):
"""
GET /api/v2/audit/roles/{accountNumber}/{roleName}/access
"""
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"account_id": account_id,
"role_name": role_name,
}
app_name = self.requester.get("name") or self.requester.get("username")
stats.count(
"RoleAccessHandler.get",
tags={
"requester": app_name,
"account_id": account_id,
"role_name": role_name,
},
)
groups = await credential_mapping.determine_role_authorized_groups(
account_id, role_name
)
if not groups:
log_data[
"message"
] = f"No authorized groups found for {role_name} in {account_id}"
log.warning(log_data)
self.set_status(404)
self.write(
WebResponse(
status=Status2.error,
status_code=404,
message="No groups found for requested role",
).json(exclude_unset=True)
)
return
self.write(
WebResponse(status=Status2.success, status_code=200, data=groups).json(
exclude_unset=True
)
)
class UserDetailHandler(BaseAPIV2Handler):
"""Handler for /api/v2/users/{accountNumber}/{userName}
Allows read and delete access to a specific user in an account.
"""
allowed_methods = ["GET", "DELETE"]
def initialize(self):
self.user: Optional[str] = None
self.eligible_roles: list = []
async def get(self, account_id, user_name):
"""
GET /api/v2/users/{account_number}/{user_name}
"""
log_data = {
"function": "UsersDetailHandler.get",
"user": self.user,
"ip": self.ip,
"message": "Retrieving user details",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"account_id": account_id,
"user_name": user_name,
}
stats.count(
"UsersDetailHandler.get",
tags={"user": self.user, "account_id": account_id, "user_name": user_name},
)
log.debug(log_data)
force_refresh = str2bool(
self.request.arguments.get("force_refresh", [False])[0]
)
error = ""
try:
user_details = await get_user_details(
account_id, user_name, extended=True, force_refresh=force_refresh
)
except Exception as e:
sentry_sdk.capture_exception()
log.error({**log_data, "error": e}, exc_info=True)
user_details = None
error = str(e)
if not user_details:
self.send_error(
404,
message=f"Unable to retrieve the specified user: {account_id}/{user_name}. {error}",
)
return
self.write(user_details.json())
async def delete(self, account_id, iam_user_name):
"""
DELETE /api/v2/users/{account_id}/{iam_user_name}
"""
account_id = tornado.escape.xhtml_escape(account_id)
iam_user_name = tornado.escape.xhtml_escape(iam_user_name)
if not self.user:
self.write_error(403, message="No user detected")
return
log_data = {
"user": self.user,
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"ip": self.ip,
"account": account_id,
"iam_user_name": iam_user_name,
}
can_delete_principal = can_delete_iam_principals(self.user, self.groups)
if not can_delete_principal:
stats.count(
f"{log_data['function']}.unauthorized",
tags={
"user": self.user,
"account": account_id,
"iam_user_name": iam_user_name,
"authorized": can_delete_principal,
"ip": self.ip,
},
)
log_data["message"] = "User is unauthorized to delete an AWS IAM User"
log.error(log_data)
self.write_error(
403, message="User is unauthorized to delete an AWS IAM user"
)
return
try:
await delete_iam_user(account_id, iam_user_name, self.user)
except Exception as e:
log_data["message"] = "Exception deleting AWS IAM User"
log.error(log_data, exc_info=True)
stats.count(
f"{log_data['function']}.exception",
tags={
"user": self.user,
"account": account_id,
"iam_user_name": iam_user_name,
"authorized": can_delete_principal,
"ip": self.ip,
},
)
self.write_error(500, message="Error occurred deleting IAM user: " + str(e))
return
# if here, user has been successfully deleted
arn = f"arn:aws:iam::{account_id}:user/{iam_user_name}"
await aws.fetch_iam_user(account_id, arn)
response_json = {
"status": "success",
"message": "Successfully deleted AWS IAM user from account",
"iam_user_name": iam_user_name,
"account": account_id,
}
self.write(response_json)
class ChallengeGeneratorHandler(TornadoRequestHandler):
"""
Challenge URLs are an alternative to mutual TLS for authenticating CLI clients of ConsoleMe.
If Challenge Token auth is enabled, this will generate time-sensitive challenge token urls that end-users
will be required to authenticate to. One authentication is verified, clients will be able to retrieve a
signed jwt that clients will be able to pass to ConsoleMe for authn/authz.
The ChallengeUrlGenerator endpoint must be unauthenticated because the CLI client will be requesting URLs
"""
def get_request_ip(self):
trusted_remote_ip_header = config.get("auth.remote_ip.trusted_remote_ip_header")
if trusted_remote_ip_header:
return self.request.headers[trusted_remote_ip_header].split(",")[0]
return self.request.remote_ip
async def get(self, user):
if not config.get("challenge_url.enabled", False):
raise MissingConfigurationValue(
"Challenge URL Authentication is not enabled in ConsoleMe's configuration"
)
ip = self.get_request_ip()
token = str(uuid.uuid4())
entry = {
"ttl": int(
(
datetime.utcnow().replace(tzinfo=pytz.UTC) + timedelta(minutes=2)
).timestamp()
),
"ip": ip,
"status": "pending",
"user": user,
}
red.hset(
config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
token,
json.dumps(entry),
)
challenge_url = "{url}/challenge_validator/{token}".format(
url=config.get("url"), token=token
)
polling_url = "{url}/noauth/v1/challenge_poller/{token}".format(
url=config.get("url"), token=token
)
self.write({"challenge_url": challenge_url, "polling_url": polling_url})
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"challenge_url": challenge_url,
"polling_url": polling_url,
"message": "Incoming request",
"ip": ip,
"user": user,
}
log.debug(log_data)
class ChallengeValidatorHandler(BaseHandler):
"""
This is the challenge authentication endpoint.
Once the user has authenticated successfully, we validate their information and mark the challenge as successful.
"""
async def get(self, requested_challenge_token):
if not config.get("challenge_url.enabled", False):
raise MissingConfigurationValue(
"Challenge URL Authentication is not enabled in ConsoleMe's configuration"
)
log_data = {
"user": self.user,
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"requested_challenge_token": requested_challenge_token,
"message": "Incoming request",
"ip": self.ip,
}
log.debug(log_data)
all_challenges = red.hgetall(
config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP")
)
if not all_challenges:
message = (
"Unable to find a matching challenge URL. This usually means that it has expired. "
"Please try requesting a new challenge URL."
)
self.write({"message": message})
return
await delete_expired_challenges(all_challenges)
valid_user_challenge = await retrieve_user_challenge(
self, requested_challenge_token, log_data
)
if not valid_user_challenge:
return
if valid_user_challenge.get("visited"):
message = (
"This unique challenge URL has already been viewed. "
"Please try requesting a new challenge URL."
)
self.write({"message": message})
return
request_ip = self.get_request_ip()
# By default, the challenge URL requester IP must match the URL the challenge was created with. In some cases
# (i.e. IPv4 vs IPv6), the challenge may have been created with an IPv4 address, and the authenticated browser
# verification request may originate from an IPv6 one, or visa versa, in which case this configuration may
# need to be explicitly set to False.
if config.get(
"challenge_url.request_ip_must_match_challenge_creation_ip", True
):
if request_ip != valid_user_challenge.get("ip"):
log.error(
{
**log_data,
"request_ip": request_ip,
"challenge_ip": valid_user_challenge.get("ip"),
"message": "Request IP doesn't match challenge IP",
}
)
self.write(
{
"message": (
"Your originating IP doesn't match the IP the challenge was created with. "
"If you are developing locally, this is probably because your CLI (Weep) made an IPv6 "
"request, and your web browser made an IPv4 request. Or visa-versa. If this is the case, "
"set the local configuration for "
"**challenge_url.request_ip_must_match_challenge_creation_ip** to **false**."
)
}
)
return
valid_user_challenge["visited"] = True
valid_user_challenge["nonce"] = str(uuid.uuid4())
red.hset(
config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
requested_challenge_token,
json.dumps(valid_user_challenge),
)
request_ip = valid_user_challenge["ip"]
request_user = valid_user_challenge["user"]
message = (
f"A user at **{request_ip}** has requested ConsoleMe credentials for **{request_user}**.\n\n"
f"You must approve this request for credentials to be provided. "
f"You will not be able to refresh or revisit this page after closing it.\n\n"
f"If you did not create this request, please report it to your security team."
)
self.write(
{
"message": message,
"nonce": valid_user_challenge["nonce"],
"show_approve_button": True,
}
)
async def post(self, requested_challenge_token):
if not config.get("challenge_url.enabled", False):
raise MissingConfigurationValue(
"Challenge URL Authentication is not enabled in ConsoleMe's configuration"
)
data = tornado.escape.json_decode(self.request.body)
log_data = {
"user": self.user,
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"requested_challenge_token": requested_challenge_token,
"message": "Incoming request",
"ip": self.ip,
}
log.debug(log_data)
all_challenges = red.hgetall(
config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP")
)
if not all_challenges:
message = (
"Unable to find a matching challenge URL. This usually means that it has expired. "
"Please try requesting a new challenge URL."
)
self.write({"message": message})
return
await delete_expired_challenges(all_challenges)
valid_user_challenge = await retrieve_user_challenge(
self, requested_challenge_token, log_data
)
if not valid_user_challenge:
message = (
"Unable to find a matching challenge URL. This usually means that it has expired. "
"Please try requesting a new challenge URL."
)
self.write({"message": message})
return
if data.get("nonce") != valid_user_challenge["nonce"]:
message = "Unable to validate challenge URL. The Nonce you've submitted is invalid."
log.error({**log_data, "message": message})
self.write({"message": message})
return
request_ip = self.get_request_ip()
# By default, the challenge URL requester IP must match the URL the challenge was created with. In some cases
# (i.e. IPv4 vs IPv6), the challenge may have been created with an IPv4 address, and the authenticated browser
# verification request may originate from an IPv6 one, or visa versa, in which case this configuration may
# need to be explicitly set to False.
if config.get(
"challenge_url.request_ip_must_match_challenge_creation_ip", True
):
if request_ip != valid_user_challenge.get("ip"):
log.error(
{
**log_data,
"request_ip": request_ip,
"challenge_ip": valid_user_challenge.get("ip"),
"message": "Request IP doesn't match challenge IP",
}
)
self.write(
{
"message": "Your originating IP doesn't match the IP the challenge was created with."
}
)
return
valid_user_challenge["status"] = "success"
valid_user_challenge["user"] = self.user
valid_user_challenge["groups"] = self.groups
red.hset(
config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
requested_challenge_token,
json.dumps(valid_user_challenge),
)
message = "You've successfully authenticated to ConsoleMe and may now close this page."
self.write({"message": message})
class ChallengePollerHandler(TornadoRequestHandler):
"""
This endpoint is an unauthenticated endpoint that the client uses to poll for successful challenge completion.
If the challenge has been completed successfully, and the IP of the endpoint matches the IP used to generate the
challenge URL, we return a signed jwt. It is expected that the client will poll this endpoint continuously until
the challenge url has been validated by a client, or until it has expired.
"""
async def get(self, requested_challenge_token):
if not config.get("challenge_url.enabled", False):
raise MissingConfigurationValue(
"Challenge URL Authentication is not enabled in ConsoleMe's configuration"
)
challenge_j = red.hget(
config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
requested_challenge_token,
)
if not challenge_j:
self.write({"status": "unknown"})
return
challenge = json.loads(challenge_j)
# Delete the token if it has expired
current_time = int(datetime.utcnow().replace(tzinfo=pytz.UTC).timestamp())
if challenge.get("ttl", 0) < current_time:
red.hdel(
config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
requested_challenge_token,
)
self.write({"status": "expired"})
return
ip = self.get_request_ip()
if ip != challenge.get("ip"):
self.write({"status": "unauthorized"})
return
# Generate a jwt if user authentication was successful
if challenge.get("status") == "success":
jwt_expiration = datetime.utcnow().replace(tzinfo=pytz.UTC) + timedelta(
minutes=config.get("jwt.expiration_minutes", 60)
)
encoded_jwt = await generate_jwt_token(
challenge.get("user"), challenge.get("groups"), exp=jwt_expiration
)
self.write(
{
"status": challenge["status"],
"cookie_name": config.get("auth_cookie_name", "consoleme_auth"),
"expiration": int(jwt_expiration.timestamp()),
"encoded_jwt": encoded_jwt,
"user": challenge["user"],
}
)
# Delete the token so that it cannot be re-used
red.hdel(
config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
requested_challenge_token,
)
return
self.write({"status": challenge.get("status")})
return
class DynamicConfigApiHandler(BaseHandler):
def on_finish(self) -> None:
if self.request.method != "POST":
return
# Force a refresh of credential authorization mapping in current region
# TODO: Trigger this to run cross-region
# TODO: Delete server-side user-role cache intelligently so users get immediate access
celery_app.send_task(
"consoleme.celery_tasks.celery_tasks.cache_credential_authorization_mapping",
countdown=config.get("dynamic_config.dynamo_load_interval"),
)
async def get(self) -> None:
"""
Get the dynamic configuration endpoint.
---
description: Presents a YAML-configured editor to allow viewing and modification of dynamic config
responses:
200:
description: View of dynamic configuration
403:
description: Unauthorized to access this page
"""
if not self.user:
return
if not can_edit_dynamic_config(self.user, self.groups):
raise tornado.web.HTTPError(
403, "Only application admins are authorized to view this page."
)
dynamic_config = await ddb.get_dynamic_config_yaml()
self.write(
{
"dynamicConfig": dynamic_config.decode("utf-8"),
"sha256": sha256(dynamic_config).hexdigest(),
}
)
async def post(self):
"""
Post an update to the dynamic configuration endpoint.
---
description: Update dynamic configuration
responses:
200:
description: Update successful.
403:
description: Unauthorized to access this page
"""
if not self.user:
return
if not can_edit_dynamic_config(self.user, self.groups):
raise tornado.web.HTTPError(
403, "Only application admins are authorized to view this page."
)
existing_dynamic_config = await ddb.get_dynamic_config_yaml()
if existing_dynamic_config:
existing_dynamic_config_sha256 = sha256(existing_dynamic_config).hexdigest()
else:
existing_dynamic_config_sha256 = None
result = {"status": "success"}
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user": self.user,
"existing_dynamic_config_sha256": existing_dynamic_config_sha256,
}
log.debug(log_data)
data = tornado.escape.json_decode(self.request.body)
try:
existing_sha256 = data.get("existing_sha256")
new_sha256 = sha256(data["new_config"].encode("utf-8")).hexdigest()
if existing_sha256 == new_sha256:
raise Exception(
"You didn't change the dynamic configuration. Try again!"
)
if (
existing_dynamic_config_sha256
and not existing_dynamic_config_sha256 == existing_sha256
):
raise Exception(
"Dynamic configuration was updated by another user before your changes were processed. "
"Please refresh your page and try again."
)
await ddb.update_dynamic_config(data["new_config"], self.user)
except Exception as e:
result["status"] = "error"
result["error"] = f"There was an error processing your request: {e}"
sentry_sdk.capture_exception()
self.write(json.dumps(result, cls=SetEncoder))
await self.finish()
return
result["newConfig"] = data["new_config"]
result["newsha56"] = new_sha256
self.write(result)
await self.finish()
# Force a refresh of dynamic configuration in the current region. Other regions will need to wait until the
# next background thread refreshes it automatically. By default, this happens every 60 seconds.
config.CONFIG.load_config_from_dynamo(ddb=ddb, red=red)
return
class GenerateChangesHandler(BaseAPIV2Handler):
"""Handler for /api/v2/generate_changes
Generates a ChangeModelArray from ChangeGeneratorModelArray
"""
allowed_methods = ["POST"]
async def post(self):
"""
POST /api/v2/generate_changes
Generates a ChangeModelArray JSON from ChangeGeneratorModelArray JSON.
Request example:
{"changes": [
{
"principal": {
"principal_arn": "arn:aws:iam::123456789012:role/aRole",
"principal_type": "AwsResource"
},
"generator_type": "s3",
"resource_arn": ["arn:aws:s3:::123456789012-bucket"],
"bucket_prefix": "/*",
"effect": "Allow",
"action_groups": [
"get",
"list"
]
}
]}
Response example:
{ "changes" : [
{
"principal": {
"principal_arn": "arn:aws:iam::123456789012:role/aRole",
"principal_type": "AwsResource"
},
"change_type": "inline_policy",
"resource_arn": [
"arn:aws:s3:::123456789012-bucket"
],
"resource": null,
"condition": null,
"policy_name": "cm_user_1592499820_gmli",
"new": true,
"policy": {
"version": "2012-10-17",
"statements": null,
"policy_document": "{\"Version\":\"2012-10-17\",\"Statement\":[[{\"Action\"...",
"policy_sha256": "cb300def8dd1deaf4db2bfeef4bc6fc740be18e8ccae74c399affe781f82ba6e"
},
"old_policy": null
}
]
}
"""
log_data = {
"user": self.user,
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user-agent": self.request.headers.get("User-Agent"),
"ip": self.ip,
"request_id": self.request_uuid,
}
try:
# Validate the model
changes = ChangeGeneratorModelArray.parse_raw(self.request.body)
# Override user attribute for each change
for change in changes.changes:
change.user = self.user
# Loop through the raw json object to retrieve attributes that would be parsed out in the
# ChangeGeneratorModelArray, such as bucket_prefix for S3ChangeGeneratorModel
change_model_array = await generate_change_model_array(changes)
except (InvalidRequestParameter, ValidationError) as e:
log_data["message"] = "Validation Exception"
log.error(log_data, exc_info=True)
stats.count(
f"{log_data['function']}.validation_exception", tags={"user": self.user}
)
self.write_error(400, message="Error validating input: " + str(e))
return
except Exception as e:
log_data["message"] = "Unknown Exception occurred while generating changes"
log.error(log_data, exc_info=True)
stats.count(f"{log_data['function']}.exception", tags={"user": self.user})
sentry_sdk.capture_exception(tags={"user": self.user})
self.write_error(500, message="Error generating changes: " + str(e))
return
log_data["message"] = "Successfully generated changes requested"
log.info(log_data)
stats.count(f"{log_data['function']}.success", tags={"user": self.user})
self.write(change_model_array.json())
class GeneratePolicyHandler(BaseAPIV2Handler):
"""Handler for /api/v2/generate_policy
Generates an AWS role / resource policy given a set of CRUD permissions
"""
allowed_methods = ["GET", "POST"]
async def get(self):
self.write(BASE_INLINE_POLICY)
async def post(self):
"""
POST /api/v2/generate_policy
Determine which user is requesting access to which resource, and the type of access based on their
selections in self-service
"""
self.write(BASE_INLINE_POLICY)
tags = {"user": self.user}
stats.count("RequestsHandler.post", tags=tags)
log_data = {
"function": "RequestsHandler.post",
"user": self.user,
"message": "Creating request",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
log.debug(log_data)
self.write_error(501, message="Create request")
class EligibleRoleHandler(BaseHandler):
async def post(self):
"""
Post to the index endpoint. This will generate a list of roles the user is eligible to access on the console
---
description: Retrieves a user's eligible roles for AWS console access.
responses:
200:
description: json list of roles
"""
roles = []
for arn in self.eligible_roles:
role_name = arn.split("/")[-1]
account_id = arn.split(":")[4]
account_name = self.eligible_accounts.get(account_id, "")
formatted_account_name = config.get(
"role_select_page.formatted_account_name", "{account_name}"
).format(account_name=account_name, account_id=account_id)
roles.append(
{
"arn": arn,
"account_name": formatted_account_name,
"account_id": account_id,
"role_name": f"[{role_name}](/policies/edit/{account_id}/iamrole/{role_name})",
"redirect_uri": f"/role/{arn}",
}
)
# Default sort by account name
roles = sorted(roles, key=lambda i: i.get("account_name", 0))
total_count = len(roles)
self.set_header("Content-Type", "application/json")
res = DataTableResponse(
totalCount=total_count, filteredCount=total_count, data=roles
)
self.write(res.json())
await self.finish()
class EligibleRolePageConfigHandler(BaseHandler):
async def get(self):
"""
/eligible_roles_page_config
---
get:
description: Retrieve Role Page Configuration
responses:
200:
description: Returns Role Page Configuration
"""
page_configuration = {
"pageName": "Select a Role",
"pageDescription": config.get(
"role_select_page.table_description",
"Select a role to login to the AWS console.",
),
"tableConfig": {
"expandableRows": True,
"dataEndpoint": "/api/v2/eligible_roles",
"sortable": False,
"totalRows": 1000,
"rowsPerPage": 50,
"serverSideFiltering": False,
"allowCsvExport": False,
"allowJsonExport": False,
"columns": [
{
"placeholder": "AWS Console Sign-In",
"key": "redirect_uri",
"type": "button",
"icon": "sign-in",
"content": "Sign-In",
"onClick": {"action": "redirect"},
"style": {"maxWidth": "300px"},
},
{
"placeholder": "Account Name",
"key": "account_name",
"type": "input",
},
{"placeholder": "Role Name", "key": "role_name", "type": "link"},
{"placeholder": "Account ID", "key": "account_id", "type": "input"},
],
},
}
table_configuration = config.get(
"role_table_config.table_configuration_override", page_configuration
)
self.write(table_configuration)
class FrontendHandler(tornado.web.StaticFileHandler):
def validate_absolute_path(self, root, absolute_path):
try:
return super().validate_absolute_path(root, absolute_path)
except tornado.web.HTTPError as exc:
if exc.status_code == 404 and self.default_filename is not None:
absolute_path = self.get_absolute_path(self.root, self.default_filename)
return super().validate_absolute_path(root, absolute_path)
raise exc
class LogOutHandler(BaseHandler):
async def get(self):
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Attempting to log out user",
"user-agent": self.request.headers.get("User-Agent"),
"ip": self.ip,
}
if not config.get("auth.set_auth_cookie"):
await handle_generic_error_response(
self,
"Unable to log out",
[
(
"Configuration value `auth.set_auth_cookie` is not enabled. "
"ConsoleMe isn't able to delete an auth cookie if setting auth "
"cookies is not enabled."
)
],
400,
"logout_failure",
log_data,
)
return
cookie_name: str = config.get("auth_cookie_name", "consoleme_auth")
if not cookie_name:
await handle_generic_error_response(
self,
"Unable to log out",
[
(
"Configuration value `auth_cookie_name` is not set. "
"ConsoleMe isn't able to delete an auth cookie if the auth cookie name "
"is not known."
)
],
400,
"logout_failure",
log_data,
)
return
self.clear_cookie(cookie_name)
extra_auth_cookies: list = config.get("auth.extra_auth_cookies", [])
for cookie in extra_auth_cookies:
self.clear_cookie(cookie)
redirect_url: str = config.get("auth.logout_redirect_url", "/")
res = WebResponse(
status="redirect",
redirect_url=redirect_url,
status_code=200,
reason="logout_redirect",
message="User has successfully logged out. Redirecting to landing page",
)
log.debug({**log_data, "message": "Successfully logged out user."})
self.write(res.json())
class ManagedPoliciesOnPrincipalHandler(BaseAPIV2Handler):
"""
Handler for /api/v2/managed_policies_on_principal/{arn}
Returns managed policy and latest policy version information for a principal
"""
async def get(self, arn):
if config.get("policy_editor.disallow_contractors", True) and self.contractor:
if self.user not in config.get(
"groups.can_bypass_contractor_restrictions", []
):
raise MustBeFte("Only FTEs are authorized to view this page.")
errors = []
if not arn.startswith("arn:aws:iam::"):
errors.append("ARN must start with 'arn:aws:iam::'")
principal_name = tornado.escape.xhtml_escape(arn.split("/")[-1])
principal_type = tornado.escape.xhtml_escape(arn.split(":")[5].split("/")[0])
account_id = tornado.escape.xhtml_escape(arn.split(":")[4])
if principal_type not in ["role", "user"]:
errors.append(f"Principal type must be role or user. not {principal_type}")
log_data = {
"function": "ManagedPoliciesOnRoleHandler.get",
"user": self.user,
"ip": self.ip,
"message": "Retrieving managed policies for role",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"account_id": account_id,
"principal_name": principal_name,
"principal_type": principal_type,
}
log.debug(log_data)
if errors:
log.error(
{**log_data, "errors": errors, "message": "Unable to process request"}
)
res = WebResponse(
status=Status2.error,
reason="bad_request",
status_code=400,
errors=errors,
)
self.write(res.json())
return
if principal_type == "role":
managed_policy_details = await sync_to_async(
get_role_managed_policy_documents
)(
{"RoleName": principal_name},
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
elif principal_type == "user":
managed_policy_details = await sync_to_async(
get_user_managed_policy_documents
)(
{"UserName": principal_name},
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
else:
raise Exception("Invalid principal type")
res = WebResponse(
status=Status2.success,
status_code=200,
data=managed_policy_details,
)
self.write(res.json())
class ManagedPoliciesHandler(BaseAPIV2Handler):
"""
Handler for /api/v2/managed_policies/{policyArn}
Returns details about the specified managed policy
"""
async def get(self, policy_arn: str):
if config.get("policy_editor.disallow_contractors", True) and self.contractor:
if self.user not in config.get(
"groups.can_bypass_contractor_restrictions", []
):
raise MustBeFte("Only FTEs are authorized to view this page.")
account_id = policy_arn.split(":")[4]
policy_name = policy_arn.split("/")[-1]
log_data = {
"function": "ManagedPoliciesHandler.get",
"user": self.user,
"ip": self.ip,
"message": "Retrieving managed policy",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"account_id": account_id,
"policy_name": policy_name,
"policy_arn": policy_arn,
}
log.debug(log_data)
managed_policy_details = await sync_to_async(get_managed_policy_document)(
policy_arn=policy_arn,
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
res = WebResponse(
status=Status2.success,
status_code=200,
data=managed_policy_details,
)
self.write(res.json())
class ManagedPoliciesForAccountHandler(BaseAPIV2Handler):
async def get(self, account_id):
"""
Retrieve a list of managed policies for an account.
"""
if config.get("policy_editor.disallow_contractors", True) and self.contractor:
if self.user not in config.get(
"groups.can_bypass_contractor_restrictions", []
):
raise MustBeFte("Only FTEs are authorized to view this page.")
try:
all_account_managed_policies = (
await get_all_iam_managed_policies_for_account(account_id)
)
except Exception:
sentry_sdk.capture_exception()
all_account_managed_policies = []
self.write(json.dumps(all_account_managed_policies))
class NotificationsHandler(BaseAPIV2Handler):
"""
A web handler for serving, updating, and (in the future) creating notifications. Current notifications are based
around policy generation from CloudTrail errors.
"""
async def get(self):
try:
notification_response: GetNotificationsForUserResponse = (
await get_notifications_for_user(self.user, self.groups)
)
notifications: List[
ConsoleMeUserNotification
] = notification_response.notifications
response = WebResponse(
status="success",
status_code=200,
data={
"unreadNotificationCount": notification_response.unread_count,
"notifications": notifications,
},
)
self.write(response.json())
except Exception as e:
sentry_sdk.capture_exception()
self.set_status(500)
response = WebResponse(
status=Status2.error, status_code=500, errors=[str(e)], data=[]
)
self.write(response.json())
return
async def post(self):
# Create a notification
raise NotImplementedError()
async def put(self):
"""
Allows an "authorized user" (Any user the notification is intended for) to mark the notification as read/unread
or hidden/unhidden for themselves or all other notification recipients
:return:
"""
change = ConsoleMeNotificationUpdateRequest.parse_raw(self.request.body)
errors = []
for untrusted_notification in change.notifications:
notification = await fetch_notification(
untrusted_notification.predictable_id
)
if not notification:
errors.append("Unable to find matching notification")
continue
authorized = is_in_group(
self.user, self.groups, notification.users_or_groups
)
if not authorized:
errors.append(
f"Unauthorized because user is not associated with notification: {notification.predictable_id}"
)
continue
if (
change.action
== ConsoleMeNotificationUpdateAction.toggle_read_for_current_user
):
if self.user in notification.read_by_users:
# Mark as unread
notification.read_by_users.remove(self.user)
else:
# Mark as read
notification.read_by_users.append(self.user)
elif (
change.action
== ConsoleMeNotificationUpdateAction.toggle_read_for_all_users
):
# Mark or unmark notification as `read_by_all`. If unmarked,
# ConsoleMe will fall back to `notification.read_by_user` to determine if
# a given user has read the notification
notification.read_by_all = not notification.read_by_all
elif (
change.action
== ConsoleMeNotificationUpdateAction.toggle_hidden_for_current_user
):
if self.user in notification.hidden_for_users:
# Unmark as hidden
notification.hidden_for_users.remove(self.user)
else:
# Mark as hidden
notification.hidden_for_users.append(self.user)
elif (
change.action
== ConsoleMeNotificationUpdateAction.toggle_hidden_for_all_users
):
# Mark or unmark as "Hidden for all users". If unmarked, falls back to `hidden_for_users.read_by_user`
# to determine whether to show the notification to a given user
notification.hidden_for_all = not notification.hidden_for_all
else:
raise Exception("Unknown or unsupported change action.")
await write_notification(notification)
try:
# Retrieve and return updated notifications for user
notification_response: GetNotificationsForUserResponse = (
await get_notifications_for_user(
self.user, self.groups, force_refresh=True
)
)
notifications: List[
ConsoleMeUserNotification
] = notification_response.notifications
response = WebResponse(
status="success",
status_code=200,
data={
"unreadNotificationCount": notification_response.unread_count,
"notifications": notifications,
},
)
self.write(response.json())
except Exception as e:
sentry_sdk.capture_exception()
self.set_status(500)
response = WebResponse(
status=Status2.error, status_code=500, errors=[str(e)], data=[]
)
self.write(response.json())
return
class PoliciesPageConfigHandler(BaseHandler):
async def get(self):
"""
/api/v2/policies_page_config
---
get:
description: Retrieve Policies Page Configuration
responses:
200:
description: Returns Policies Page Configuration
"""
default_configuration = {
"pageName": "Policies",
"pageDescription": "View all of the AWS Resources we know about.",
"tableConfig": {
"expandableRows": True,
"dataEndpoint": "/api/v2/policies?markdown=true",
"sortable": False,
"totalRows": 1000,
"rowsPerPage": 50,
"serverSideFiltering": True,
"allowCsvExport": True,
"allowJsonExport": True,
"columns": [
{
"placeholder": "Account ID",
"key": "account_id",
"type": "input",
"style": {"width": "110px"},
},
{
"placeholder": "Account",
"key": "account_name",
"type": "input",
"style": {"width": "90px"},
},
{
"placeholder": "Resource",
"key": "arn",
"type": "link",
"width": 6,
"style": {"whiteSpace": "normal", "wordBreak": "break-all"},
},
{
"placeholder": "Tech",
"key": "technology",
"type": "input",
"style": {"width": "70px"},
},
],
},
}
table_configuration = config.get(
"PoliciesTableConfigHandler.configuration", default_configuration
)
self.write(table_configuration)
class PoliciesHandler(BaseAPIV2Handler):
"""Handler for /api/v2/policies
Api endpoint to list and filter policy requests.
"""
allowed_methods = ["POST"]
async def post(self):
"""
POST /api/v2/policies
"""
arguments = {k: self.get_argument(k) for k in self.request.arguments}
markdown = arguments.get("markdown")
arguments = json.loads(self.request.body)
filters = arguments.get("filters")
limit = arguments.get("limit", 1000)
tags = {"user": self.user}
stats.count("PoliciesHandler.post", tags=tags)
log_data = {
"function": "PoliciesHandler.post",
"user": self.user,
"message": "Writing policies",
"limit": limit,
"filters": filters,
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
log.debug(log_data)
policies = await retrieve_json_data_from_redis_or_s3(
redis_key=config.get("policies.redis_policies_key", "ALL_POLICIES"),
s3_bucket=config.get("cache_policies_table_details.s3.bucket"),
s3_key=config.get(
"cache_policies_table_details.s3.file",
"policies_table/cache_policies_table_details_v1.json.gz",
),
default=[],
)
total_count = len(policies)
if filters:
try:
with Timeout(seconds=5):
for filter_key, filter_value in filters.items():
policies = await filter_table(
filter_key, filter_value, policies
)
except TimeoutError:
self.write("Query took too long to run. Check your filter.")
await self.finish()
raise
if markdown:
policies_to_write = []
for policy in policies[0:limit]:
resource_name = policy.get("arn").split(":")[5]
if "/" in resource_name:
resource_name = resource_name.split("/")[-1]
region = policy["arn"].split(":")[3]
try:
url = await get_url_for_resource(
policy["arn"],
policy["technology"],
policy["account_id"],
region,
resource_name,
)
except ResourceNotFound:
url = ""
if url:
policy["arn"] = f"[{policy['arn']}]({url})"
if not policy.get("templated"):
policy["templated"] = "N/A"
else:
if "/" in policy["templated"]:
link_name = policy["templated"].split("/")[-1]
policy["templated"] = f"[{link_name}]({policy['templated']})"
policies_to_write.append(policy)
else:
policies_to_write = policies[0:limit]
filtered_count = len(policies_to_write)
res = DataTableResponse(
totalCount=total_count, filteredCount=filtered_count, data=policies_to_write
)
self.write(res.json())
return
class CheckPoliciesHandler(BaseAPIV2Handler):
async def post(self):
"""
POST /api/v2/policies/check
"""
policy = tornado.escape.json_decode(self.request.body)
if isinstance(policy, dict):
policy = json.dumps(policy)
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user": self.user,
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"policy": policy,
}
findings = await validate_iam_policy(policy, log_data)
self.write(json.dumps(findings))
return
class RequestHandler(BaseAPIV2Handler):
"""Handler for /api/v2/request
Allows for creation of a request.
"""
allowed_methods = ["POST"]
def on_finish(self) -> None:
if self.request.method != "POST":
return
celery_app.send_task(
"consoleme.celery_tasks.celery_tasks.cache_policy_requests"
)
celery_app.send_task(
"consoleme.celery_tasks.celery_tasks.cache_credential_authorization_mapping"
)
async def post(self):
"""
POST /api/v2/request
Request example JSON: (Request Schema is RequestCreationModel in models.py)
{
"changes": {
"changes": [
{
"principal": {
"principal_arn": "arn:aws:iam::123456789012:role/curtisTestRole1",
"principal_type": "AwsResource"
},
"change_type": "inline_policy",
"action": "attach",
"policy": {
"policy_document": {
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:ListMultipartUploadParts*",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::curtis-nflx-test/*",
"arn:aws:s3:::curtis-nflx-test"
],
"Sid": "cmccastrapel159494014dsd1shak"
},
{
"Action": [
"ec2:describevolumes",
"ec2:detachvolume",
"ec2:describelicenses",
"ec2:AssignIpv6Addresses",
"ec2:reportinstancestatus"
],
"Effect": "Allow",
"Resource": [
"*"
],
"Sid": "cmccastrapel1594940141hlvvv"
},
{
"Action": [
"sts:AssumeRole"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::123456789012:role/curtisTestInstanceProfile"
],
"Sid": "cmccastrapel1596483596easdits"
}
]
}
}
},
{
"principal": {
"principal_arn": "arn:aws:iam::123456789012:role/curtisTestRole1",
"principal_type": "AwsResource"
},
"change_type": "assume_role_policy",
"policy": {
"policy_document": {
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::123456789012:role/consolemeInstanceProfile"
},
"Sid": "AllowConsoleMeProdAssumeRolses"
}
],
"Version": "2012-10-17"
}
}
},
{
"principal": {
"principal_arn": "arn:aws:iam::123456789012:role/curtisTestRole1",
"principal_type": "AwsResource"
},
"change_type": "managed_policy",
"policy_name": "ApiProtect",
"action": "attach",
"arn": "arn:aws:iam::123456789012:policy/ApiProtect"
},
{
"principal": {
"principal_arn": "arn:aws:iam::123456789012:role/curtisTestRole1",
"principal_type": "AwsResource"
},
"change_type": "managed_policy",
"policy_name": "TagProtect",
"action": "detach",
"arn": "arn:aws:iam::123456789012:policy/TagProtect"
},
{
"principal": {
"principal_arn": "arn:aws:iam::123456789012:role/curtisTestRole1",
"principal_type": "AwsResource"
},
"change_type": "inline_policy",
"policy_name": "random_policy254",
"action": "attach",
"policy": {
"policy_document": {
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ec2:AssignIpv6Addresses"
],
"Effect": "Allow",
"Resource": [
"*"
],
"Sid": "cmccastrapel1594940141shakabcd"
}
]
}
}
}
]
},
"justification": "testing this out.",
"admin_auto_approve": false
}
Response example JSON: (Response Schema is RequestCreationResponse in models.py)
{
"errors": 1,
"request_created": true,
"request_id": "0c9fb298-c8ea-4d50-917c-3212da07b3ad",
"action_results": [
{
"status": "success",
"message": "Success description"
},
{
"status": "error",
"message": "Error description"
}
]
}
"""
if config.get("policy_editor.disallow_contractors", True) and self.contractor:
if self.user not in config.get(
"groups.can_bypass_contractor_restrictions", []
):
raise MustBeFte("Only FTEs are authorized to view this page.")
tags = {"user": self.user}
stats.count("RequestHandler.post", tags=tags)
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user": self.user,
"message": "Create request initialization",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"ip": self.ip,
"admin_auto_approved": False,
"probe_auto_approved": False,
}
log.debug(log_data)
try:
# Validate the model
changes = RequestCreationModel.parse_raw(self.request.body)
extended_request = await generate_request_from_change_model_array(
changes, self.user
)
log_data["request"] = extended_request.dict()
log.debug(log_data)
if changes.dry_run:
response = RequestCreationResponse(
errors=0, request_created=False, extended_request=extended_request
)
self.write(response.json())
await self.finish()
return
admin_approved = False
approval_probe_approved = False
if extended_request.principal.principal_type == "AwsResource":
# TODO: Provide a note to the requester that admin_auto_approve will apply the requested policies only.
# It will not automatically apply generated policies. The administrative user will need to visit
# the policy request page to do this manually.
if changes.admin_auto_approve:
# make sure user is allowed to use admin_auto_approve
can_manage_policy_request = (
can_admin_policies(self.user, self.groups),
)
if can_manage_policy_request:
extended_request.request_status = RequestStatus.approved
admin_approved = True
extended_request.reviewer = self.user
self_approval_comment = CommentModel(
id=str(uuid.uuid4()),
timestamp=int(time.time()),
user_email=self.user,
user=extended_request.requester_info,
last_modified=int(time.time()),
text=f"Self-approved by admin: {self.user}",
)
extended_request.comments.append(self_approval_comment)
log_data["admin_auto_approved"] = True
log_data["request"] = extended_request.dict()
log.debug(log_data)
stats.count(
f"{log_data['function']}.post.admin_auto_approved",
tags={"user": self.user},
)
else:
# someone is trying to use admin bypass without being an admin, don't allow request to proceed
stats.count(
f"{log_data['function']}.post.unauthorized_admin_bypass",
tags={"user": self.user},
)
log_data[
"message"
] = "Unauthorized user trying to use admin bypass"
log.error(log_data)
await write_json_error("Unauthorized", obj=self)
return
else:
# If admin auto approve is false, check for auto-approve probe eligibility
is_eligible_for_auto_approve_probe = (
await is_request_eligible_for_auto_approval(
extended_request, self.user
)
)
# If we have only made requests that are eligible for auto-approval probe, check against them
if is_eligible_for_auto_approve_probe:
should_auto_approve_request = (
await should_auto_approve_policy_v2(
extended_request, self.user, self.groups
)
)
if should_auto_approve_request["approved"]:
extended_request.request_status = RequestStatus.approved
approval_probe_approved = True
stats.count(
f"{log_data['function']}.probe_auto_approved",
tags={"user": self.user},
)
approving_probes = []
for approving_probe in should_auto_approve_request[
"approving_probes"
]:
approving_probe_comment = CommentModel(
id=str(uuid.uuid4()),
timestamp=int(time.time()),
user_email=f"Auto-Approve Probe: {approving_probe['name']}",
last_modified=int(time.time()),
text=(
f"Policy {approving_probe['policy']} auto-approved by probe: "
f"{approving_probe['name']}"
),
)
extended_request.comments.append(
approving_probe_comment
)
approving_probes.append(approving_probe["name"])
extended_request.reviewer = (
f"Auto-Approve Probe: {','.join(approving_probes)}"
)
log_data["probe_auto_approved"] = True
log_data["request"] = extended_request.dict()
log.debug(log_data)
dynamo = UserDynamoHandler(self.user)
request = await dynamo.write_policy_request_v2(extended_request)
log_data["message"] = "New request created in Dynamo"
log_data["request"] = extended_request.dict()
log_data["dynamo_request"] = request
log.debug(log_data)
except (InvalidRequestParameter, ValidationError) as e:
log_data["message"] = "Validation Exception"
log.error(log_data, exc_info=True)
stats.count(
f"{log_data['function']}.validation_exception", tags={"user": self.user}
)
self.write_error(400, message="Error validating input: " + str(e))
if config.get("development"):
raise
return
except Exception as e:
log_data["message"] = "Unknown Exception occurred while parsing request"
log.error(log_data, exc_info=True)
stats.count(f"{log_data['function']}.exception", tags={"user": self.user})
sentry_sdk.capture_exception(tags={"user": self.user})
self.write_error(500, message="Error parsing request: " + str(e))
if config.get("development"):
raise
return
request_url = await get_request_url(extended_request)
# If here, request has been successfully created
response = RequestCreationResponse(
errors=0,
request_created=True,
request_id=extended_request.id,
request_url=request_url,
action_results=[],
extended_request=extended_request,
)
# If approved is true due to an auto-approval probe or admin auto-approval, apply the non-autogenerated changes
if extended_request.request_status == RequestStatus.approved:
for change in extended_request.changes.changes:
if change.autogenerated:
continue
policy_request_modification_model = (
PolicyRequestModificationRequestModel.parse_obj(
{
"modification_model": {
"command": "apply_change",
"change_id": change.id,
}
}
)
)
policy_apply_response = (
await parse_and_apply_policy_request_modification(
extended_request,
policy_request_modification_model,
self.user,
self.groups,
int(time.time()),
approval_probe_approved,
)
)
response.errors = policy_apply_response.errors
response.action_results = policy_apply_response.action_results
# Update in dynamo
await dynamo.write_policy_request_v2(extended_request)
account_id = await get_resource_account(
extended_request.principal.principal_arn
)
# Force a refresh of the role in Redis/DDB
arn_parsed = parse_arn(extended_request.principal.principal_arn)
if arn_parsed["service"] == "iam" and arn_parsed["resource"] == "role":
await aws.fetch_iam_role(
account_id,
extended_request.principal.principal_arn,
force_refresh=True,
)
log_data["request"] = extended_request.dict()
log_data["message"] = "Applied changes based on approved request"
log_data["response"] = response.dict()
log.debug(log_data)
await aws.send_communications_new_policy_request(
extended_request, admin_approved, approval_probe_approved
)
await send_slack_notification_new_policy_request(
extended_request, admin_approved, approval_probe_approved
)
self.write(response.json())
await self.finish()
return
class RequestsHandler(BaseAPIV2Handler):
"""Handler for /api/v2/requests
Api endpoint to list and filter policy requests.
"""
allowed_methods = ["POST"]
async def post(self):
"""
POST /api/v2/requests
"""
arguments = {k: self.get_argument(k) for k in self.request.arguments}
markdown = arguments.get("markdown")
cache_key = config.get(
"cache_all_policy_requests.redis_key", "ALL_POLICY_REQUESTS"
)
s3_bucket = config.get("cache_policy_requests.s3.bucket")
s3_key = config.get(
"cache_policy_requests.s3.file",
"policy_requests/all_policy_requests_v1.json.gz",
)
arguments = json.loads(self.request.body)
filters = arguments.get("filters")
# TODO: Add server-side sorting
# sort = arguments.get("sort")
limit = arguments.get("limit", 1000)
tags = {"user": self.user}
stats.count("RequestsHandler.post", tags=tags)
log_data = {
"function": "RequestsHandler.post",
"user": self.user,
"message": "Writing requests",
"limit": limit,
"filters": filters,
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
log.debug(log_data)
requests = await retrieve_json_data_from_redis_or_s3(
cache_key, s3_bucket=s3_bucket, s3_key=s3_key
)
total_count = len(requests)
if filters:
try:
with Timeout(seconds=5):
for filter_key, filter_value in filters.items():
requests = await filter_table(
filter_key, filter_value, requests
)
except TimeoutError:
self.write("Query took too long to run. Check your filter.")
await self.finish()
raise
if markdown:
requests_to_write = []
for request in requests[0:limit]:
principal_arn = request.get("principal", {}).get("principal_arn", "")
url = request.get("principal", {}).get("resource_url", "")
resource_name = principal_arn
if "/" in resource_name:
resource_name = resource_name.split("/")[-1]
if not resource_name:
resource_name = request.get("principal", {}).get(
"resource_identifier"
)
if principal_arn and principal_arn.count(":") == 5 and not url:
region = principal_arn.split(":")[3]
service_type = principal_arn.split(":")[2]
account_id = principal_arn.split(":")[4]
if request.get("principal", {}).get("principal_arn"):
try:
url = await get_url_for_resource(
principal_arn,
service_type,
account_id,
region,
resource_name,
)
except ResourceNotFound:
pass
# Convert request_id and role ARN to link
request_url = request.get("extended_request", {}).get("request_url")
if not request_url:
request_url = f"/policies/request/{request['request_id']}"
request["request_id"] = f"[{request['request_id']}]({request_url})"
if url:
request["arn"] = f"[{principal_arn or resource_name}]({url})"
requests_to_write.append(request)
else:
requests_to_write = requests[0:limit]
filtered_count = len(requests_to_write)
res = DataTableResponse(
totalCount=total_count, filteredCount=filtered_count, data=requests_to_write
)
self.write(res.json())
return
class RequestDetailHandler(BaseAPIV2Handler):
"""Handler for /api/v2/requests/{request_id}
Allows read and update access to a specific request.
"""
allowed_methods = ["GET", "PUT"]
def on_finish(self) -> None:
if self.request.method != "PUT":
return
celery_app.send_task(
"consoleme.celery_tasks.celery_tasks.cache_policy_requests"
)
celery_app.send_task(
"consoleme.celery_tasks.celery_tasks.cache_credential_authorization_mapping"
)
async def _get_extended_request(self, request_id, log_data):
dynamo = UserDynamoHandler(self.user)
requests = await dynamo.get_policy_requests(request_id=request_id)
if len(requests) == 0:
log_data["message"] = "Request with that ID not found"
log.warning(log_data)
stats.count(f"{log_data['function']}.not_found", tags={"user": self.user})
raise NoMatchingRequest(log_data["message"])
if len(requests) > 1:
log_data["message"] = "Multiple requests with that ID found"
log.error(log_data)
stats.count(
f"{log_data['function']}.multiple_requests_found",
tags={"user": self.user},
)
raise InvalidRequestParameter(log_data["message"])
request = requests[0]
if request.get("version") != "2":
# Request format is not compatible with this endpoint version
raise InvalidRequestParameter("Request with that ID is not a v2 request")
extended_request = ExtendedRequestModel.parse_obj(
request.get("extended_request")
)
return extended_request, request.get("last_updated")
async def get(self, request_id):
"""
GET /api/v2/requests/{request_id}
"""
tags = {"user": self.user}
stats.count("RequestDetailHandler.get", tags=tags)
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user": self.user,
"message": "Get request details",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"policy_request_id": request_id,
}
log.debug(log_data)
if config.get("policy_editor.disallow_contractors", True) and self.contractor:
if self.user not in config.get(
"groups.can_bypass_contractor_restrictions", []
):
self.write_error(
403, message="Only FTEs are authorized to view this page."
)
return
try:
extended_request, last_updated = await self._get_extended_request(
request_id, log_data
)
except InvalidRequestParameter as e:
sentry_sdk.capture_exception(tags={"user": self.user})
self.write_error(400, message="Error validating input: " + str(e))
return
except NoMatchingRequest as e:
sentry_sdk.capture_exception(tags={"user": self.user})
self.write_error(404, message="Error getting request:" + str(e))
return
# Run these tasks concurrently.
concurrent_results = await asyncio.gather(
populate_old_policies(extended_request, self.user),
populate_cross_account_resource_policies(extended_request, self.user),
populate_old_managed_policies(extended_request, self.user),
)
extended_request = concurrent_results[0]
populate_cross_account_resource_policies_result = concurrent_results[1]
if populate_cross_account_resource_policies_result["changed"]:
extended_request = populate_cross_account_resource_policies_result[
"extended_request"
]
# Update in dynamo with the latest resource policy changes
dynamo = UserDynamoHandler(self.user)
updated_request = await dynamo.write_policy_request_v2(extended_request)
last_updated = updated_request.get("last_updated")
populate_old_managed_policies_result = concurrent_results[2]
if populate_old_managed_policies_result["changed"]:
extended_request = populate_old_managed_policies_result["extended_request"]
# Update in dynamo with the latest resource policy changes
dynamo = UserDynamoHandler(self.user)
updated_request = await dynamo.write_policy_request_v2(extended_request)
last_updated = updated_request.get("last_updated")
can_approve_reject = can_admin_policies(self.user, self.groups)
can_update_cancel = await can_update_cancel_requests_v2(
extended_request.requester_email, self.user, self.groups
)
can_move_back_to_pending = await can_move_back_to_pending_v2(
extended_request, last_updated, self.user, self.groups
)
# In the future request_specific_config will have specific approvers for specific changes based on ABAC
request_specific_config = {
"can_approve_reject": can_approve_reject,
"can_update_cancel": can_update_cancel,
"can_move_back_to_pending": can_move_back_to_pending,
}
template = None
# Force a refresh of the role in Redis/DDB
arn_parsed = parse_arn(extended_request.principal.principal_arn)
if arn_parsed["service"] == "iam" and arn_parsed["resource"] == "role":
iam_role = await aws.fetch_iam_role(
arn_parsed["account"], extended_request.principal.principal_arn
)
template = iam_role.get("templated")
response = {
"request": extended_request.json(),
"last_updated": last_updated,
"request_config": request_specific_config,
"template": template,
}
self.write(response)
async def put(self, request_id):
"""
PUT /api/v2/requests/{request_id}
"""
tags = {"user": self.user}
stats.count("RequestDetailHandler.put", tags=tags)
log_data = {
"function": "RequestDetailHandler.put",
"user": self.user,
"message": "Incoming request",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"policy_request_id": request_id,
}
log.debug(log_data)
if config.get("policy_editor.disallow_contractors", True) and self.contractor:
if self.user not in config.get(
"groups.can_bypass_contractor_restrictions", []
):
raise MustBeFte("Only FTEs are authorized to view this page.")
try:
# Validate the request body
request_changes = PolicyRequestModificationRequestModel.parse_raw(
self.request.body
)
log_data["message"] = "Parsed request body"
log_data["request"] = request_changes.dict()
log.debug(log_data)
extended_request, last_updated = await self._get_extended_request(
request_id, log_data
)
response = await parse_and_apply_policy_request_modification(
extended_request, request_changes, self.user, self.groups, last_updated
)
except (NoMatchingRequest, InvalidRequestParameter, ValidationError) as e:
log_data["message"] = "Validation Exception"
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception(tags={"user": self.user})
stats.count(
f"{log_data['function']}.validation_exception", tags={"user": self.user}
)
self.write_error(400, message="Error validating input: " + str(e))
if config.get("development"):
raise
return
except Unauthorized as e:
log_data["message"] = "Unauthorized"
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception(tags={"user": self.user})
stats.count(
f"{log_data['function']}.unauthorized", tags={"user": self.user}
)
self.write_error(403, message=str(e))
if config.get("development"):
raise
return
self.write(response.json())
await self.finish()
return
class RequestsPageConfigHandler(BaseHandler):
async def get(self):
"""
/requests_page_config
---
get:
description: Retrieve Requests Page Configuration
responses:
200:
description: Returns Requests Page Configuration
"""
default_configuration = {
"pageName": "Requests",
"pageDescription": "View all IAM policy requests created through ConsoleMe",
"tableConfig": {
"expandableRows": True,
"dataEndpoint": "/api/v2/requests?markdown=true",
"sortable": False,
"totalRows": 200,
"rowsPerPage": 50,
"serverSideFiltering": True,
"allowCsvExport": True,
"allowJsonExport": True,
"columns": [
{
"placeholder": "Username",
"key": "username",
"type": "input",
"style": {"width": "100px"},
},
{
"placeholder": "Arn",
"key": "arn",
"type": "link",
"style": {"whiteSpace": "normal", "wordBreak": "break-all"},
"width": 3,
},
{
"placeholder": "Request Time",
"key": "request_time",
"type": "daterange",
},
{
"placeholder": "Status",
"key": "status",
"type": "dropdown",
"style": {"width": "90px"},
},
{
"placeholder": "Request ID",
"key": "request_id",
"type": "link",
"style": {"whiteSpace": "normal", "wordBreak": "break-all"},
"width": 2,
},
],
},
}
table_configuration = config.get(
"RequestsTableConfigHandler.configuration", default_configuration
)
self.write(table_configuration)
class ResourceDetailHandler(BaseAPIV2Handler):
async def get(self, account_id, resource_type, region=None, resource_name=None):
if not self.user:
return
if config.get("policy_editor.disallow_contractors", True) and self.contractor:
if self.user not in config.get(
"groups.can_bypass_contractor_restrictions", []
):
raise MustBeFte("Only FTEs are authorized to view this page.")
read_only = False
can_save_delete = (can_admin_policies(self.user, self.groups),)
account_id_for_arn: str = account_id
if resource_type == "s3":
account_id_for_arn = ""
arn = f"arn:aws:{resource_type}:{region or ''}:{account_id_for_arn}:{resource_name}"
path = ""
if resource_type == "managed_policy":
# special case for managed policies
path = region or ""
if path:
arn = f"arn:aws:iam::{account_id}:policy/{path}/{resource_name}"
else:
arn = f"arn:aws:iam::{account_id}:policy/{resource_name}"
stats.count(
"ResourcePolicyEditHandler.get", tags={"user": self.user, "arn": arn}
)
log_data = {
"user": self.user,
"ip": self.ip,
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Incoming request",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"arn": arn,
}
log.debug(log_data)
error = ""
try:
resource_details = await fetch_resource_details(
account_id, resource_type, resource_name, region, path
)
except Exception as e:
sentry_sdk.capture_exception()
log.error({**log_data, "error": e}, exc_info=True)
resource_details = None
error = str(e)
if not resource_details:
self.send_error(
404,
message=(
f"Unable to retrieve the specified {resource_type} resource: "
f"{account_id}/{resource_name}/{region}. {error}",
),
)
return
# TODO: Get S3 errors for s3 buckets only, else CT errors
yesterday = (datetime.today() - timedelta(days=1)).strftime("%Y%m%d")
s3_query_url = None
if resource_type == "s3":
s3_query_url = config.get("s3.bucket_query_url")
all_s3_errors = None
if s3_query_url:
s3_query_url = s3_query_url.format(
yesterday=yesterday, bucket_name=f"'{resource_name}'"
)
s3_error_topic = config.get("redis.s3_errors", "S3_ERRORS")
all_s3_errors = self.red.get(s3_error_topic)
s3_errors = []
if all_s3_errors:
s3_errors = json.loads(all_s3_errors).get(arn, [])
account_ids_to_name = await get_account_id_to_name_mapping()
# TODO(ccastrapel/psanders): Make a Swagger spec for this
self.write(
dict(
arn=arn,
resource_details=resource_details,
account_id=account_id,
account_name=account_ids_to_name.get(account_id, None),
read_only=read_only,
can_save_delete=can_save_delete,
s3_errors=s3_errors,
error_url=s3_query_url,
config_timeline_url=resource_details.get("config_timeline_url"),
)
)
class GetResourceURLHandler(BaseMtlsHandler):
"""consoleme CLI resource URL handler. Parameters accepted: arn."""
def initialize(self):
self.user: str = None
self.eligible_roles: list = []
async def get(self):
"""
/api/v2/get_resource_url - Endpoint used to get an URL from an ARN
---
get:
description: Get the resource URL for ConsoleMe, given an ARN
responses:
200:
description: Returns a URL generated from the ARN in JSON form
400:
description: Malformed Request
403:
description: Forbidden
"""
self.user: str = self.requester["email"]
arn: str = self.get_argument("arn", None)
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user": self.user,
"arn": arn,
"message": "Generating URL for resource",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
log.debug(log_data)
stats.count("GetResourceURL.get", tags={"user": self.user})
if not arn:
generic_error_message: str = "Missing required parameter"
errors = ["arn is a required parameter"]
await handle_generic_error_response(
self, generic_error_message, errors, 404, "missing_data", log_data
)
return
try:
# parse_arn will raise an exception on invalid arns
parse_arn(arn)
resources_from_aws_config_redis_key = config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
)
if not red.exists(resources_from_aws_config_redis_key):
# This will force a refresh of our redis cache if the data exists in S3
await retrieve_json_data_from_redis_or_s3(
redis_key=resources_from_aws_config_redis_key,
s3_bucket=config.get("aws_config_cache_combined.s3.bucket"),
s3_key=config.get(
"aws_config_cache_combined.s3.file",
"aws_config_cache_combined/aws_config_resource_cache_combined_v1.json.gz",
),
redis_data_type="hash",
)
resource_info = await redis_hget(resources_from_aws_config_redis_key, arn)
if not resource_info:
raise ValueError("Resource not found in organization cache")
resource_url = await get_url_for_resource(arn)
if not resource_url:
raise ValueError("This resource type is currently not supported")
except (ResourceNotFound, ValueError) as e:
generic_error_message: str = "Unsupported data"
errors = [str(e)]
await handle_generic_error_response(
self, generic_error_message, errors, 404, "invalid_data", log_data
)
return
except Exception as e:
generic_error_message: str = "Malformed data"
errors = [str(e)]
await handle_generic_error_response(
self, generic_error_message, errors, 404, "malformed_data", log_data
)
return
res = WebResponse(
status="success",
status_code=200,
message="Successfully generated URL for ARN",
data={"url": resource_url},
)
self.write(res.json())
await self.finish()
class RoleConsoleLoginHandler(BaseAPIV2Handler):
async def get(self, role=None):
"""
Attempt to retrieve credentials and redirect the user to the AWS Console
---
description: Retrieves credentials and redirects user to AWS console.
responses:
302:
description: Redirects to AWS console
"""
arguments = {k: self.get_argument(k) for k in self.request.arguments}
role = role.lower()
selected_roles = await group_mapping.filter_eligible_roles(role, self)
region = arguments.get("r", config.get("aws.region", "us-east-1"))
redirect = arguments.get("redirect")
log_data = {
"user": self.user,
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"role": role,
"region": region,
"redirect": redirect,
"ip": self.ip,
}
log_data["role"] = role
if not selected_roles:
# Not authorized
stats.count(
"RoleConsoleLoginHandler.post",
tags={
"user": self.user,
"role": role,
"authorized": False,
"redirect": bool(redirect),
},
)
log_data[
"message"
] = "You do not have any roles matching your search criteria. "
log.debug(log_data)
self.set_status(404)
self.write({"type": "error", "message": log_data["message"]})
return
stats.count(
"RoleConsoleLoginHandler.post",
tags={
"user": self.user,
"role": role,
"authorized": True,
"redirect": bool(redirect),
},
)
if len(selected_roles) > 1:
# Not sure which role the user wants. Redirect them to main page to select one.
stats.count(
"RoleConsoleLoginHandler.post",
tags={
"user": self.user,
"role": role,
"authorized": False,
"redirect": bool(redirect),
},
)
log_data[
"message"
] = "You have more than one role matching your query. Please select one."
log.debug(log_data)
warning_message_arg = {
"warningMessage": base64.b64encode(log_data["message"].encode()).decode(
"utf-8"
)
}
redirect_url = furl(f"/?arn={role}")
redirect_url.args = {
**redirect_url.args,
**arguments,
**warning_message_arg,
}
self.write(
{
"type": "redirect",
"message": log_data["message"],
"reason": "multiple_roles",
"redirect_url": redirect_url.url,
}
)
return
log_data["message"] = "Incoming request"
log.debug(log_data)
# User is authorized
try:
# User-role logic:
# User-role should come in as cm-[username or truncated username]_[N or NC]
user_role = False
account_id = None
selected_role = selected_roles[0]
# User role must be defined as a user attribute
if (
self.user_role_name
and "role/" in selected_role
and selected_role.split("role/")[1] == self.user_role_name
):
user_role = True
account_id = selected_role.split("arn:aws:iam::")[1].split(":role")[0]
url = await aws.generate_url(
self.user,
selected_role,
region,
user_role=user_role,
account_id=account_id,
)
except Exception as e:
log_data["message"] = f"Exception generating AWS console URL: {str(e)}"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
stats.count("index.post.exception")
self.write(
{
"type": "console_url",
"message": tornado.escape.xhtml_escape(log_data["message"]),
"error": tornado.escape.xhtml_escape(str(log_data["error"])),
}
)
return
if redirect:
parsed_url = urlparse(url)
parsed_url_query = parse_qs(parsed_url.query)
parsed_url_query["Destination"] = redirect
updated_query = urlencode(parsed_url_query, doseq=True)
url = parsed_url._replace(query=updated_query).geturl()
self.write(
{
"type": "redirect",
"redirect_url": url,
"reason": "console_login",
"role": selected_role,
}
)
return
class RolesHandler(BaseAPIV2Handler):
"""Handler for /api/v2/roles
GET - Allows read access to a list of roles across all accounts. Returned roles are
limited to what the requesting user has access to.
POST - Allows (authorized) users to create a role
"""
allowed_methods = ["GET", "POST"]
def on_finish(self) -> None:
if self.request.method != "POST":
return
# Force refresh of crednetial authorization mapping after the dynamic config sync period to ensure all workers
# have the updated configuration
celery_app.send_task(
"consoleme.celery_tasks.celery_tasks.cache_policies_table_details",
)
celery_app.send_task(
"consoleme.celery_tasks.celery_tasks.cache_credential_authorization_mapping",
)
async def get(self):
payload = {"eligible_roles": self.eligible_roles}
self.set_header("Content-Type", "application/json")
self.write(json.dumps(payload, escape_forward_slashes=False))
await self.finish()
async def post(self):
log_data = {
"user": self.user,
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"ip": self.ip,
}
can_create_role = can_create_roles(self.user, self.groups)
if not can_create_role:
stats.count(
f"{log_data['function']}.unauthorized",
tags={"user": self.user, "authorized": can_create_role},
)
log_data["message"] = "User is unauthorized to create a role"
log.error(log_data)
self.write_error(403, message="User is unauthorized to create a role")
return
try:
create_model = RoleCreationRequestModel.parse_raw(self.request.body)
except ValidationError as e:
log_data["message"] = f"Validation Exception: {str(e)}"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
stats.count(
f"{log_data['function']}.validation_exception", tags={"user": self.user}
)
sentry_sdk.capture_exception()
self.write_error(400, message="Error validating input: " + str(e))
return
try:
results = await create_iam_role(create_model, self.user)
except Exception as e:
log_data["message"] = f"Exception creating role: {str(e)}"
log_data["error"] = str(e)
log_data["account_id"] = create_model.account_id
log_data["role_name"] = create_model.role_name
log.error(log_data, exc_info=True)
stats.count(
f"{log_data['function']}.exception",
tags={
"user": self.user,
"account_id": create_model.account_id,
"role_name": create_model.role_name,
},
)
sentry_sdk.capture_exception()
self.write_error(500, message="Exception occurred cloning role: " + str(e))
return
# if here, role has been successfully cloned
stats.count(
f"{log_data['function']}.success",
tags={
"user": self.user,
"account_id": create_model.account_id,
"role_name": create_model.role_name,
},
)
self.write(results)
class AccountRolesHandler(BaseAPIV2Handler):
"""Handler for /api/v2/roles/{account_number}
Allows read access to a list of roles by account. Roles are limited to what the
requesting user has access to.
"""
allowed_methods = ["GET"]
async def get(self, account_id):
"""
GET /api/v2/roles/{account_id}
"""
log_data = {
"function": "AccountRolesHandler.get",
"user": self.user,
"message": "Writing all eligible user roles",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
log.debug(log_data)
self.write_error(501, message="Get roles by account")
class RoleDetailHandler(BaseAPIV2Handler):
"""Handler for /api/v2/roles/{accountNumber}/{roleName}
Allows read and update access to a specific role in an account.
"""
allowed_methods = ["GET", "PUT", "DELETE"]
def initialize(self):
self.user: str = None
self.eligible_roles: list = []
async def get(self, account_id, role_name):
"""
GET /api/v2/roles/{account_number}/{role_name}
"""
log_data = {
"function": "RoleDetailHandler.get",
"user": self.user,
"ip": self.ip,
"message": "Retrieving role details",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"account_id": account_id,
"role_name": role_name,
}
stats.count(
"RoleDetailHandler.get",
tags={"user": self.user, "account_id": account_id, "role_name": role_name},
)
log.debug(log_data)
force_refresh = str2bool(
self.request.arguments.get("force_refresh", [False])[0]
)
error = ""
try:
role_details = await get_role_details(
account_id, role_name, extended=True, force_refresh=force_refresh
)
except Exception as e:
sentry_sdk.capture_exception()
log.error({**log_data, "error": e}, exc_info=True)
role_details = None
error = str(e)
if role_details:
if not allowed_to_sync_role(role_details.arn, role_details.tags):
role_details = None
if not role_details:
self.send_error(
404,
message=f"Unable to retrieve the specified role: {account_id}/{role_name}. {error}",
)
return
self.write(role_details.json())
async def put(self, account_id, role_name):
"""
PUT /api/v2/roles/{account_number}/{role_name}
"""
log_data = {
"function": "RoleDetailHandler.put",
"user": self.user,
"message": "Writing all eligible user roles",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
log.debug(log_data)
self.write_error(501, message="Update role details")
async def delete(self, account_id, role_name):
"""
DELETE /api/v2/roles/{account_id}/{role_name}
"""
if not self.user:
self.write_error(403, message="No user detected")
return
account_id = tornado.escape.xhtml_escape(account_id)
role_name = tornado.escape.xhtml_escape(role_name)
log_data = {
"user": self.user,
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"ip": self.ip,
"account": account_id,
"role": role_name,
}
can_delete_role = can_delete_iam_principals(self.user, self.groups)
if not can_delete_role:
stats.count(
f"{log_data['function']}.unauthorized",
tags={
"user": self.user,
"account": account_id,
"role": role_name,
"authorized": can_delete_role,
"ip": self.ip,
},
)
log_data["message"] = "User is unauthorized to delete a role"
log.error(log_data)
self.write_error(403, message="User is unauthorized to delete a role")
return
try:
await delete_iam_role(account_id, role_name, self.user)
except Exception as e:
log_data["message"] = "Exception deleting role"
log.error(log_data, exc_info=True)
stats.count(
f"{log_data['function']}.exception",
tags={
"user": self.user,
"account": account_id,
"role": role_name,
"authorized": can_delete_role,
"ip": self.ip,
},
)
self.write_error(500, message="Error occurred deleting role: " + str(e))
return
# if here, role has been successfully deleted
arn = f"arn:aws:iam::{account_id}:role/{role_name}"
await aws.fetch_iam_role(account_id, arn, force_refresh=True)
response_json = {
"status": "success",
"message": "Successfully deleted role from account",
"role": role_name,
"account": account_id,
}
self.write(response_json)
class RoleDetailAppHandler(BaseMtlsHandler):
"""Handler for /api/v2/mtls/roles/{accountNumber}/{roleName}
Allows apps to view or delete a role
"""
allowed_methods = ["DELETE", "GET"]
def check_xsrf_cookie(self):
pass
async def delete(self, account_id, role_name):
"""
DELETE /api/v2/mtls/roles/{account_id}/{role_name}
"""
account_id = tornado.escape.xhtml_escape(account_id)
role_name = tornado.escape.xhtml_escape(role_name)
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"account_id": account_id,
"role_name": role_name,
}
requester_type = self.requester.get("type")
if requester_type != "application":
log_data[
"message"
] = "Non-application trying to access application only endpoint"
log.error(log_data)
self.write_error(406, message="Endpoint not supported for non-applications")
return
app_name = self.requester.get("name")
can_delete_role = can_delete_iam_principals_app(app_name)
if not can_delete_role:
stats.count(
f"{log_data['function']}.unauthorized",
tags={
"app_name": app_name,
"account_id": account_id,
"role_name": role_name,
"authorized": can_delete_role,
},
)
log_data["message"] = "App is unauthorized to delete a role"
log.error(log_data)
self.write_error(403, message="App is unauthorized to delete a role")
return
try:
await delete_iam_role(account_id, role_name, app_name)
except Exception as e:
log_data["message"] = "Exception deleting role"
log.error(log_data, exc_info=True)
stats.count(
f"{log_data['function']}.exception",
tags={
"app_name": app_name,
"account_id": account_id,
"role_name": role_name,
"authorized": can_delete_role,
},
)
self.write_error(500, message="Error occurred deleting role: " + str(e))
return
# if here, role has been successfully deleted
arn = f"arn:aws:iam::{account_id}:role/{role_name}"
await aws.fetch_iam_role(account_id, arn, force_refresh=True)
response_json = {
"status": "success",
"message": "Successfully deleted role from account",
"role": role_name,
"account": account_id,
}
self.write(response_json)
async def get(self, account_id, role_name):
"""
GET /api/v2/mtls/roles/{account_id}/{role_name}
"""
account_id = tornado.escape.xhtml_escape(account_id)
role_name = tornado.escape.xhtml_escape(role_name)
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"ip": self.ip,
"message": "Retrieving role details",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"account_id": account_id,
"role_name": role_name,
}
app_name = self.requester.get("name") or self.requester.get("username")
stats.count(
"RoleDetailAppHandler.get",
tags={
"requester": app_name,
"account_id": account_id,
"role_name": role_name,
},
)
log.debug(log_data)
force_refresh = str2bool(
self.request.arguments.get("force_refresh", [False])[0]
)
error = ""
try:
role_details = await get_role_details(
account_id, role_name, extended=True, force_refresh=force_refresh
)
except Exception as e:
sentry_sdk.capture_exception()
log.error({**log_data, "error": e}, exc_info=True)
role_details = None
error = str(e)
if not role_details:
self.send_error(
404,
message=f"Unable to retrieve the specified role: {account_id}/{role_name}. {error}",
)
return
self.write(role_details.json())
class RoleCloneHandler(BaseAPIV2Handler):
"""Handler for /api/v2/clone/role
Allows cloning a role.
"""
allowed_methods = ["POST"]
async def post(self):
log_data = {
"user": self.user,
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"ip": self.ip,
}
can_create_role = can_create_roles(self.user, self.groups)
if not can_create_role:
stats.count(
f"{log_data['function']}.unauthorized",
tags={"user": self.user, "authorized": can_create_role},
)
log_data["message"] = "User is unauthorized to clone a role"
log.error(log_data)
self.write_error(403, message="User is unauthorized to clone a role")
return
try:
clone_model = CloneRoleRequestModel.parse_raw(self.request.body)
except ValidationError as e:
log_data["message"] = "Validation Exception"
log.error(log_data, exc_info=True)
stats.count(
f"{log_data['function']}.validation_exception", tags={"user": self.user}
)
sentry_sdk.capture_exception()
self.write_error(400, message="Error validating input: " + str(e))
return
try:
results = await clone_iam_role(clone_model, self.user)
except Exception as e:
log_data["message"] = "Exception cloning role"
log_data["error"] = str(e)
log_data["account_id"] = clone_model.account_id
log_data["role_name"] = clone_model.role_name
log.error(log_data, exc_info=True)
stats.count(
f"{log_data['function']}.exception",
tags={
"user": self.user,
"account_id": clone_model.account_id,
"role_name": clone_model.role_name,
},
)
sentry_sdk.capture_exception()
self.write_error(500, message="Exception occurred cloning role: " + str(e))
return
# if here, role has been successfully cloned
self.write(results)
class GetRolesMTLSHandler(BaseMtlsHandler):
"""
Handler for /api/v2/get_roles
Consoleme MTLS role handler - returns User's eligible roles and other details about eligible roles
Pass ?all=true to URL query to return all roles.
"""
def check_xsrf_cookie(self):
pass
def initialize(self):
self.user: str = None
self.eligible_roles: list = []
async def get(self):
"""
GET /api/v2/get_roles - Endpoint used to get details of eligible roles. Used by weep and newt.
---
get:
description: Returns a json-encoded list of objects of eligible roles for the user.
response format: WebResponse. The "data" field within WebResponse is of format EligibleRolesModelArray
Example response:
{
"status": "success",
"status_code": 200,
"data": {
"roles": [
{
"arn": "arn:aws:iam::123456789012:role/role_name",
"account_id": "123456789012",
"account_friendly_name": "prod",
"role_name": "role_name",
"apps": {
"app_details": [
{
"name": "consoleme",
"owner": "owner@example.com",
"owner_url": null,
"app_url": "https://example.com"
}
]
}
},
...
]
}
}
"""
self.user: str = self.requester["email"]
include_all_roles = self.get_arguments("all")
console_only = True
if include_all_roles == ["true"]:
console_only = False
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user": self.user,
"console_only": console_only,
"message": "Getting all eligible user roles",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
log.debug(log_data)
stats.count("GetRolesMTLSHandler.get", tags={"user": self.user})
await self.authorization_flow(user=self.user, console_only=console_only)
eligible_roles_details_array = await get_eligible_role_details(
sorted(self.eligible_roles)
)
res = WebResponse(
status=Status2.success,
status_code=200,
data=eligible_roles_details_array.dict(),
)
self.write(res.json(exclude_unset=True))
await self.finish()
class SelfServiceConfigHandler(BaseAPIV2Handler):
allowed_methods = ["GET"]
async def get(self):
admin_bypass_approval_enabled: bool = can_admin_policies(self.user, self.groups)
export_to_terraform_enabled: bool = config.get(
"export_to_terraform_enabled", False
)
self_service_iam_config: dict = config.get(
"self_service_iam", SELF_SERVICE_IAM_DEFAULTS
)
# Help message can be configured with Markdown for link handling
help_message: str = config.get("self_service_iam_help_message")
self.write(
{
"admin_bypass_approval_enabled": admin_bypass_approval_enabled,
"export_to_terraform_enabled": export_to_terraform_enabled,
"help_message": help_message,
**self_service_iam_config,
}
)
class PermissionTemplatesHandler(BaseAPIV2Handler):
allowed_methods = ["GET"]
async def get(self):
"""
Returns permission templates.
Combines permission templates from dynamic configuration to the ones discovered in static configuration, with a
priority to the templates defined in dynamic configuration.
If no permission_templates are defined in static configuration, this function will substitute the static
configuration templates with PERMISSION_TEMPLATE_DEFAULTS.
"""
permission_templates_dynamic_config: List[Dict[str, Any]] = config.get(
"dynamic_config.permission_templates", []
)
permission_templates_config: List[Dict[str, Any]] = config.get(
"permission_templates", PERMISSION_TEMPLATE_DEFAULTS
)
seen = set()
compiled_permission_templates = []
for item in [
*permission_templates_dynamic_config,
*permission_templates_config,
]:
if item["key"] in seen:
continue
compiled_permission_templates.append(item)
seen.add(item["key"])
self.write({"permission_templates": compiled_permission_templates})
class ServiceControlPolicyHandler(BaseAPIV2Handler):
"""
Handler for /api/v2/service_control_policies/{accountNumberOrOuId}
Returns Service Control Policies targeting specified account or OU
"""
allowed_methods = ["GET"]
async def get(self, identifier):
if config.get("policy_editor.disallow_contractors", True) and self.contractor:
if self.user not in config.get(
"groups.can_bypass_contractor_restrictions", []
):
raise MustBeFte("Only FTEs are authorized to view this page.")
log_data = {
"function": "ServiceControlPolicyHandler.get",
"user": self.user,
"message": "Retrieving service control policies for identifier",
"identifier": identifier,
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
log.debug(log_data)
try:
scps = await get_scps_for_account_or_ou(identifier)
except Exception as e:
sentry_sdk.capture_exception()
response = WebResponse(
status=Status2.error, status_code=403, errors=[str(e)], data=[]
)
self.write(response.json())
return
response = WebResponse(
status=Status2.success, status_code=200, data=scps.__root__
)
self.write(response.json())
class TemplatedResourceDetailHandler(BaseAPIV2Handler):
async def get(self, repository_name, resource):
matching_template = await retrieve_cached_resource_templates(
repository_name=repository_name,
resource=resource,
return_first_result=True,
)
if not matching_template:
# TODO: Log here
# Return 404
self.write({})
return
self.write(matching_template.json())
class ResourceTypeAheadHandlerV2(BaseAPIV2Handler):
async def get(self):
try:
type_ahead: Optional[str] = (
self.request.arguments.get("typeahead")[0].decode("utf-8").lower()
)
except TypeError:
type_ahead = None
try:
account_id: Optional[str] = self.request.arguments.get("account_id")[
0
].decode("utf-8")
except TypeError:
account_id = None
try:
resource_type: Optional[str] = self.request.arguments.get("resource_type")[
0
].decode("utf-8")
except TypeError:
resource_type = None
try:
region: Optional[str] = self.request.arguments.get("region")[0].decode(
"utf-8"
)
except TypeError:
region = None
try:
limit: int = self.request.arguments.get("limit")[0].decode("utf-8")
if limit:
limit = int(limit)
except TypeError:
limit = 20
try:
ui_formatted: Optional[bool] = (
self.request.arguments.get("ui_formatted")[0].decode("utf-8").lower()
)
except TypeError:
ui_formatted = False
resource_redis_cache_key = config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
)
all_resource_arns = await sync_to_async(red.hkeys)(resource_redis_cache_key)
# Fall back to DynamoDB or S3?
if not all_resource_arns:
s3_bucket = config.get("aws_config_cache_combined.s3.bucket")
s3_key = config.get(
"aws_config_cache_combined.s3.file",
"aws_config_cache_combined/aws_config_resource_cache_combined_v1.json.gz",
)
try:
all_resources = await retrieve_json_data_from_redis_or_s3(
s3_bucket=s3_bucket, s3_key=s3_key
)
all_resource_arns = all_resources.keys()
await sync_to_async(red.hmset)(resource_redis_cache_key, all_resources)
except DataNotRetrievable:
sentry_sdk.capture_exception()
all_resource_arns = []
matching = set()
for arn in all_resource_arns:
if len(matching) >= limit:
break
# ARN format: 'arn:aws:sqs:us-east-1:123456789012:resource_name'
if resource_type and resource_type != arn.split(":")[2]:
continue
if region and region != arn.split(":")[3]:
continue
if account_id and account_id != arn.split(":")[4]:
continue
if type_ahead and type_ahead in arn.lower():
matching.add(arn)
elif not type_ahead:
# Oh, you want all the things do you?
matching.add(arn)
arn_array = ArnArray.parse_obj((list(matching)))
if ui_formatted:
self.write(json.dumps([{"title": arn} for arn in arn_array.__root__]))
else:
self.write(arn_array.json())
class SelfServiceStep1ResourceTypeahead(BaseAPIV2Handler):
async def get(self):
try:
# Get type ahead request arg
type_ahead: Optional[str] = (
self.request.arguments.get("typeahead")[0].decode("utf-8").lower()
)
except TypeError:
type_ahead = None
if not type_ahead:
self.write(json.dumps([]))
return
max_limit: int = config.get(
"self_service_step_1_resource_typeahead.max_limit", 10000
)
limit: int = 20
try:
# Get limit request arg
limit_raw: str = self.request.arguments.get("limit")[0].decode("utf-8")
if limit_raw:
limit = int(limit_raw)
if limit > max_limit:
limit = max_limit
except TypeError:
pass
typehead_data = await retrieve_json_data_from_redis_or_s3(
redis_key=config.get(
"cache_self_service_typeahead.redis.key",
"cache_self_service_typeahead_v1",
),
s3_bucket=config.get("cache_self_service_typeahead.s3.bucket"),
s3_key=config.get(
"cache_self_service_typeahead.s3.file",
"cache_self_service_typeahead/cache_self_service_typeahead_v1.json.gz",
),
)
matching = []
for entry in typehead_data.get("typeahead_entries", []):
if len(matching) >= limit:
break
if (
entry.get("display_text")
and type_ahead.lower() in entry["display_text"].lower()
):
matching.append(entry)
continue
if (
entry.get("principal", {}).get("resource_identifier")
and type_ahead.lower()
in entry["principal"]["resource_identifier"].lower()
):
matching.append(entry)
continue
if (
entry.get("principal", {}).get("principal_arn")
and type_ahead.lower() in entry["principal"]["principal_arn"].lower()
):
matching.append(entry)
continue
if (
entry.get("application_name")
and type_ahead.lower() in entry["application_name"].lower()
):
matching.append(entry)
continue
self.write(json.dumps(matching))
class UserRegistrationHandler(TornadoRequestHandler):
"""
Allows user registration if it is configured.
"""
def initialize(self):
self.ddb = UserDynamoHandler()
async def post(self):
# TODO: Send verification e-mail to proposed user
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Attempting to register user",
"user-agent": self.request.headers.get("User-Agent"),
}
generic_error_message: str = "User registration failed"
# Fail if getting users by password is not enabled
if not config.get("auth.get_user_by_password"):
errors = [
"Expected configuration `auth.get_user_by_password`, but it is not enabled."
]
await handle_generic_error_response(
self, generic_error_message, errors, 403, "not_configured", log_data
)
return
# Fail if user registration not allowed
if not config.get("auth.allow_user_registration"):
errors = [
"Expected configuration `auth.allow_user_registration`, but it is not enabled."
]
await handle_generic_error_response(
self, generic_error_message, errors, 403, "not_configured", log_data
)
return
registration_attempt = RegistrationAttemptModel.parse_raw(self.request.body)
log_data["username"] = registration_attempt.username
# Fail if username not valid email address
try:
if not validate_email(registration_attempt.username):
errors = ["Username must be a valid e-mail address."]
await handle_generic_error_response(
self,
generic_error_message,
errors,
403,
"invalid_request",
log_data,
)
return
except Exception as e:
sentry_sdk.capture_exception()
await handle_generic_error_response(
self, generic_error_message, [str(e)], 403, "invalid_request", log_data
)
return
# Fail if user already exists
if await self.ddb.get_user(registration_attempt.username):
errors = ["User already exists"]
await handle_generic_error_response(
self, generic_error_message, errors, 403, "invalid_request", log_data
)
return
# Fails if password is not strong enough.
password_strength_errors = await check_password_strength(
registration_attempt.password
)
if password_strength_errors:
await handle_generic_error_response(
self,
password_strength_errors["message"],
password_strength_errors["errors"],
403,
"weak_password",
log_data,
)
return
self.ddb.create_user(
registration_attempt.username, registration_attempt.password
)
res = WebResponse(
status="success",
status_code=200,
message=f"Successfully created user {registration_attempt.username}.",
)
self.write(res.json(exclude_unset=True))
class LoginConfigurationHandler(TornadoRequestHandler):
def get(self):
default_configuration = {
"enabled": config.get("auth.get_user_by_password"),
"page_title": config.get(
"LoginConfigurationHandler.page_title",
"Welcome to ConsoleMe - Please Sign-In",
),
"allow_password_login": config.get("auth.get_user_by_password", True),
"allow_sso_login": config.get(
"LoginConfigurationHandler.allow_sso_login", True
),
"allow_sign_up": config.get("auth.allow_user_registrationp", False),
"custom_message": "",
}
login_configuration = config.get(
"LoginConfigurationHandler.login_configuration", default_configuration
)
self.write(login_configuration)
class LoginHandler(TornadoRequestHandler):
"""
Handles user log-in flow if password authentication is enabled.
"""
def initialize(self):
self.ddb = UserDynamoHandler()
def check_xsrf_cookie(self):
pass
def set_default_headers(self) -> None:
self.set_header("Content-Type", "application/json")
async def post(self):
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Attempting to authenticate User",
"user-agent": self.request.headers.get("User-Agent"),
}
generic_error_message = "Authentication failed"
if not config.get("auth.get_user_by_password"):
errors = [
"Expected configuration `auth.get_user_by_password`, but it is not enabled."
]
await handle_generic_error_response(
self, generic_error_message, errors, 403, "not_configured", log_data
)
return
# Auth cookie must be set to use password authentication.
if not config.get("auth.set_auth_cookie"):
errors = [
"Expected configuration `auth.set_auth_cookie`, but it is not enabled."
]
await handle_generic_error_response(
self, generic_error_message, errors, 403, "not_configured", log_data
)
return
login_attempt = LoginAttemptModel.parse_raw(self.request.body)
log_data["username"] = login_attempt.username
log_data["after_redirect_uri"] = login_attempt.after_redirect_uri
authenticated_response: AuthenticationResponse = (
await self.ddb.authenticate_user(login_attempt)
)
if not authenticated_response.authenticated:
# Wait 1 second to protect from single-host brute-force
await asyncio.sleep(1)
await handle_generic_error_response(
self,
generic_error_message,
authenticated_response.errors,
403,
"authentication_failure",
log_data,
)
return
# Make and set jwt for user
expiration = datetime.utcnow().replace(tzinfo=pytz.UTC) + timedelta(
minutes=config.get("jwt.expiration_minutes", 60)
)
encoded_cookie = await generate_jwt_token(
authenticated_response.username,
authenticated_response.groups,
exp=expiration,
)
self.set_cookie(
config.get("auth_cookie_name", "consoleme_auth"),
encoded_cookie,
expires=expiration,
secure=config.get(
"auth.cookie.secure",
True if "https://" in config.get("url") else False,
),
httponly=config.get("auth.cookie.httponly", True),
samesite=config.get("auth.cookie.samesite", True),
)
res = WebResponse(
status="redirect",
redirect_url=login_attempt.after_redirect_uri,
status_code=200,
reason="authenticated_redirect",
message="User has successfully authenticated. Redirecting to their intended destination.",
)
self.write(res.json(exclude_unset=True, exclude_none=True))
class UserManagementHandler(BaseAPIV2Handler):
"""
Handles creating and updating users. Only authorized users are allowed to access this endpoint.
"""
def initialize(self):
self.ddb = UserDynamoHandler()
async def post(self):
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user": self.user,
"message": "Create/Update User",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"ip": self.ip,
}
generic_error_message = "Unable to create/update user"
log.debug(log_data)
# Checks authz levels of current user
if not can_admin_all(self.user, self.groups):
errors = ["User is not authorized to access this endpoint."]
await handle_generic_error_response(
self, generic_error_message, errors, 403, "unauthorized", log_data
)
return
request = UserManagementModel.parse_raw(self.request.body)
log_data["requested_user"] = request.username
if request.user_management_action.value == "create":
log.debug(
{
**log_data,
"message": "Creating user",
"requested_user": request.username,
"requested_groups": request.groups,
}
)
# Fails if password is not strong enough.
password_strength_errors = await check_password_strength(request.password)
if password_strength_errors:
await handle_generic_error_response(
self,
password_strength_errors["message"],
password_strength_errors["errors"],
403,
"weak_password",
log_data,
)
return
self.ddb.create_user(
request.username,
request.password,
request.groups,
)
res = WebResponse(
status="success",
status_code=200,
message=f"Successfully created user {request.username}.",
)
self.write(res.json(exclude_unset=True, exclude_none=True))
return
elif request.user_management_action.value == "update":
log.debug(
{
**log_data,
"message": "Updating user",
"requested_user": request.username,
"requested_groups": request.groups,
}
)
if request.password:
# Fails if password is not strong enough.
password_strength_errors = await check_password_strength(
request.password
)
if password_strength_errors:
await handle_generic_error_response(
self,
password_strength_errors["message"],
password_strength_errors["errors"],
403,
"weak_password",
log_data,
)
return
self.ddb.update_user(
request.username,
request.password,
request.groups,
)
res = WebResponse(
status="success",
status_code=200,
message=f"Successfully updated user {request.username}.",
)
self.write(res.json(exclude_unset=True, exclude_none=True))
return
elif request.user_management_action.value == "delete":
log.debug(
{
**log_data,
"message": "Deleting user",
"requested_user": request.username,
}
)
self.ddb.delete_user(
request.username,
)
res = WebResponse(
status="success",
status_code=200,
message=f"Successfully deleted user {request.username}.",
)
self.write(res.json(exclude_unset=True, exclude_none=True))
return
else:
errors = ["Change type is not supported by this endpoint."]
await handle_generic_error_response(
self, generic_error_message, errors, 403, "invalid_request", log_data
)
return
class UserProfileHandler(BaseAPIV1Handler):
async def get(self):
"""
Provide information about site configuration for the frontend
:return:
"""
is_contractor = config.config_plugin().is_contractor(self.user)
site_config = {
"consoleme_logo": await get_random_security_logo(),
"google_analytics": {
"tracking_id": config.get("google_analytics.tracking_id"),
"options": config.get("google_analytics.options", {}),
},
"documentation_url": config.get(
"documentation_page", "https://hawkins.gitbook.io/consoleme/"
),
"support_contact": config.get("support_contact"),
"support_chat_url": config.get(
"support_chat_url", "https://discord.com/invite/nQVpNGGkYu"
),
"security_logo": config.get("security_logo.image"),
"security_url": config.get("security_logo.url"),
# If site_config.landing_url is set, users will be redirected to the landing URL after authenticating
# on the frontend.
"landing_url": config.get("site_config.landing_url"),
"temp_policy_support": config.get("policies.temp_policy_support"),
"notifications": {
"enabled": config.get("site_config.notifications.enabled"),
"request_interval": config.get(
"site_config.notifications.request_interval", 60
),
},
"cloudtrail_denies_policy_generation": config.get(
"celery.cache_cloudtrail_denies.enabled", False
),
}
custom_page_header: Dict[str, str] = await get_custom_page_header(
self.user, self.groups
)
user_profile = {
"site_config": site_config,
"user": self.user,
"can_logout": config.get("auth.set_auth_cookie", False),
"is_contractor": is_contractor,
"employee_photo_url": config.config_plugin().get_employee_photo_url(
self.user
),
"employee_info_url": config.config_plugin().get_employee_info_url(
self.user
),
"authorization": {
"can_edit_policies": can_admin_policies(self.user, self.groups),
"can_create_roles": can_create_roles(self.user, self.groups),
"can_delete_iam_principals": can_delete_iam_principals(
self.user, self.groups
),
},
"pages": {
"header": {
"custom_header_message_title": custom_page_header.get(
"custom_header_message_title", ""
),
"custom_header_message_text": custom_page_header.get(
"custom_header_message_text", ""
),
"custom_header_message_route": custom_page_header.get(
"custom_header_message_route", ""
),
},
"groups": {
"enabled": config.get("headers.group_access.enabled", False)
},
"users": {"enabled": config.get("headers.group_access.enabled", False)},
"policies": {
"enabled": config.get("headers.policies.enabled", True)
and not is_contractor
},
"self_service": {
"enabled": config.get("enable_self_service", True)
and not is_contractor
},
"api_health": {
"enabled": is_in_group(
self.user,
self.groups,
config.get("groups.can_edit_health_alert", []),
)
},
"audit": {
"enabled": is_in_group(
self.user, self.groups, config.get("groups.can_audit", [])
)
},
"config": {"enabled": can_edit_dynamic_config(self.user, self.groups)},
},
"accounts": await get_account_id_to_name_mapping(),
}
self.set_header("Content-Type", "application/json")
self.write(user_profile)
The provided code snippet includes necessary dependencies for implementing the `make_app` function. Write a Python function `def make_app(jwt_validator=None)` to solve the following problem:
make_app.
Here is the function:
def make_app(jwt_validator=None):
"""make_app."""
path = pkg_resources.resource_filename("consoleme", "templates")
oss_routes = [
(r"/auth", AuthHandler),
(r"/healthcheck", HealthHandler),
(
r"/static/(.*)",
tornado.web.StaticFileHandler,
dict(path=os.path.join(path, "static")),
),
(
r"/images/(.*)",
tornado.web.StaticFileHandler,
dict(path=os.path.join(path, "images")),
),
(
r"/(favicon.ico)",
tornado.web.StaticFileHandler,
dict(path=path),
),
(r"/api/v1/get_credentials", GetCredentialsHandler),
(r"/api/v1/get_roles", GetRolesHandler),
(r"/api/v2/get_roles", GetRolesMTLSHandler),
(r"/api/v2/get_resource_url", GetResourceURLHandler),
# Used to autocomplete AWS permissions
(r"/api/v1/policyuniverse/autocomplete/?", AutocompleteHandler),
(r"/api/v2/user_profile/?", UserProfileHandler),
(r"/api/v2/self_service_config/?", SelfServiceConfigHandler),
(r"/api/v2/permission_templates/?", PermissionTemplatesHandler),
(r"/api/v1/myheaders/?", ApiHeaderHandler),
(r"/api/v1/policies/typeahead", ApiResourceTypeAheadHandler),
(r"/api/v2/policies/check", CheckPoliciesHandler),
(r"/api/v2/dynamic_config", DynamicConfigApiHandler),
(r"/api/v2/eligible_roles", EligibleRoleHandler),
(r"/api/v2/eligible_roles_page_config", EligibleRolePageConfigHandler),
(r"/api/v2/policies_page_config", PoliciesPageConfigHandler),
(r"/api/v2/requests_page_config", RequestsPageConfigHandler),
(r"/api/v2/generate_policy", GeneratePolicyHandler),
(r"/api/v2/notifications/?", NotificationsHandler),
(r"/api/v2/managed_policies/(\d{12})", ManagedPoliciesForAccountHandler),
(r"/api/v2/managed_policies/(.*)", ManagedPoliciesHandler),
(
r"/api/v2/templated_resource/([a-zA-Z0-9_-]+)/(.*)",
TemplatedResourceDetailHandler,
),
(
r"/api/v2/managed_policies_on_principal/(.*)",
ManagedPoliciesOnPrincipalHandler,
),
(r"/api/v2/login", LoginHandler),
(r"/api/v2/login_configuration", LoginConfigurationHandler),
(r"/api/v2/logout", LogOutHandler),
(
r"/api/v2/typeahead/self_service_resources",
SelfServiceStep1ResourceTypeahead,
),
(r"/api/v2/user", UserManagementHandler),
(r"/api/v2/user_registration", UserRegistrationHandler),
(r"/api/v2/policies", PoliciesHandler),
(r"/api/v2/request", RequestHandler),
(r"/api/v2/requests", RequestsHandler),
(r"/api/v2/requests/([a-zA-Z0-9_-]+)", RequestDetailHandler),
(r"/api/v2/roles/?", RolesHandler),
(r"/api/v2/roles/(\d{12})", AccountRolesHandler),
(r"/api/v2/roles/(\d{12})/(.*)", RoleDetailHandler),
(r"/api/v2/users/(\d{12})/(.*)", UserDetailHandler),
(
r"/api/v2/resources/(\d{12})/(s3|sqs|sns|managed_policy)(?:/([a-z\-1-9]+))?/(.*)",
ResourceDetailHandler,
),
(r"/api/v2/service_control_policies/(.*)", ServiceControlPolicyHandler),
(r"/api/v2/mtls/roles/(\d{12})/(.*)", RoleDetailAppHandler),
(r"/api/v2/clone/role", RoleCloneHandler),
(r"/api/v2/generate_changes/?", GenerateChangesHandler),
(r"/api/v2/typeahead/resources", ResourceTypeAheadHandlerV2),
(r"/api/v2/role_login/(.*)", RoleConsoleLoginHandler),
(r"/myheaders/?", HeaderHandler),
(r"/policies/typeahead/?", ResourceTypeAheadHandler),
(r"/saml/(.*)", SamlHandler),
(
r"/api/v2/challenge_validator/([a-zA-Z0-9_-]+)",
ChallengeValidatorHandler,
),
(r"/noauth/v1/challenge_generator/(.*)", ChallengeGeneratorHandler),
(r"/noauth/v1/challenge_poller/([a-zA-Z0-9_-]+)", ChallengePollerHandler),
(r"/api/v2/audit/roles", AuditRolesHandler),
(r"/api/v2/audit/roles/(\d{12})/(.*)/access", AuditRolesAccessHandler),
(r"/api/v2/.*", V2NotFoundHandler),
(
r"/(.*)",
FrontendHandler,
dict(path=path, default_filename="index.html"),
),
]
# Prioritize internal routes before OSS routes so that OSS routes can be overridden if desired.
internal_route_list = internal_routes.get_internal_routes(
make_jwt_validator, jwt_validator
)
routes = internal_route_list + oss_routes
app = tornado.web.Application(
routes,
debug=config.get("tornado.debug", False),
xsrf_cookies=config.get("tornado.xsrf", True),
xsrf_cookie_kwargs=config.get("tornado.xsrf_cookie_kwargs", {}),
template_path=config.get(
"tornado.template_path", f"{os.path.dirname(consoleme.__file__)}/templates"
),
ui_modules=internal_routes.ui_modules,
)
sentry_dsn = config.get("sentry.dsn")
if sentry_dsn:
sentry_sdk.init(
dsn=sentry_dsn,
integrations=[
TornadoIntegration(),
AioHttpIntegration(),
RedisIntegration(),
],
)
return app | make_app. |
162,202 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
log = config.get_logger()
red = RedisHandler().redis_sync()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
schedule = {
"cache_iam_resources_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_iam_resources_across_accounts",
"options": {"expires": 1000},
"schedule": schedule_45_minute,
},
"clear_old_redis_iam_cache": {
"task": "consoleme.celery_tasks.celery_tasks.clear_old_redis_iam_cache",
"options": {"expires": 180},
"schedule": schedule_6_hours,
},
"cache_policies_table_details": {
"task": "consoleme.celery_tasks.celery_tasks.cache_policies_table_details",
"options": {"expires": 1000},
"schedule": schedule_30_minute,
},
"report_celery_last_success_metrics": {
"task": "consoleme.celery_tasks.celery_tasks.report_celery_last_success_metrics",
"options": {"expires": 60},
"schedule": schedule_minute,
},
"cache_managed_policies_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_managed_policies_across_accounts",
"options": {"expires": 1000},
"schedule": schedule_45_minute,
},
"cache_s3_buckets_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_s3_buckets_across_accounts",
"options": {"expires": 300},
"schedule": schedule_45_minute,
},
"cache_sqs_queues_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_sqs_queues_across_accounts",
"options": {"expires": 300},
"schedule": schedule_45_minute,
},
"cache_sns_topics_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_sns_topics_across_accounts",
"options": {"expires": 300},
"schedule": schedule_45_minute,
},
"get_iam_role_limit": {
"task": "consoleme.celery_tasks.celery_tasks.get_iam_role_limit",
"options": {"expires": 300},
"schedule": schedule_24_hours,
},
"cache_resources_from_aws_config_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_resources_from_aws_config_across_accounts",
"options": {"expires": 300},
"schedule": schedule_1_hour,
},
"cache_policy_requests": {
"task": "consoleme.celery_tasks.celery_tasks.cache_policy_requests",
"options": {"expires": 1000},
"schedule": schedule_5_minutes,
},
"cache_cloud_account_mapping": {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloud_account_mapping",
"options": {"expires": 1000},
"schedule": schedule_1_hour,
},
"cache_credential_authorization_mapping": {
"task": "consoleme.celery_tasks.celery_tasks.cache_credential_authorization_mapping",
"options": {"expires": 1000},
"schedule": schedule_5_minutes,
},
"cache_scps_across_organizations": {
"task": "consoleme.celery_tasks.celery_tasks.cache_scps_across_organizations",
"options": {"expires": 1000},
"schedule": schedule_1_hour,
},
"cache_organization_structure": {
"task": "consoleme.celery_tasks.celery_tasks.cache_organization_structure",
"options": {"expires": 1000},
"schedule": schedule_1_hour,
},
"cache_resource_templates_task": {
"task": "consoleme.celery_tasks.celery_tasks.cache_resource_templates_task",
"options": {"expires": 1000},
"schedule": schedule_30_minute,
},
"cache_self_service_typeahead_task": {
"task": "consoleme.celery_tasks.celery_tasks.cache_self_service_typeahead_task",
"options": {"expires": 1000},
"schedule": schedule_30_minute,
},
}
The provided code snippet includes necessary dependencies for implementing the `report_celery_last_success_metrics` function. Write a Python function `def report_celery_last_success_metrics() -> bool` to solve the following problem:
For each celery task, this will determine the number of seconds since it has last been successful. Celery tasks should be emitting redis stats with a deterministic key (In our case, `f"{task}.last_success"`. report_celery_last_success_metrics should be ran periodically to emit metrics on when a task was last successful. We can then alert when tasks are not ran when intended. We should also alert when no metrics are emitted from this function.
Here is the function:
def report_celery_last_success_metrics() -> bool:
"""
For each celery task, this will determine the number of seconds since it has last been successful.
Celery tasks should be emitting redis stats with a deterministic key (In our case, `f"{task}.last_success"`.
report_celery_last_success_metrics should be ran periodically to emit metrics on when a task was last successful.
We can then alert when tasks are not ran when intended. We should also alert when no metrics are emitted
from this function.
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {"function": function}
current_time = int(time.time())
global schedule
for _, t in schedule.items():
task = t.get("task")
last_success = int(red.get(f"{task}.last_success") or 0)
if last_success == 0:
log_data["message"] = "Last Success Value is 0"
log_data["task_last_success_key"] = f"{task}.last_success"
log.warning(log_data)
stats.gauge(f"{task}.time_since_last_success", current_time - last_success)
red.set(f"{task}.time_since_last_success", current_time - last_success)
red.set(
f"{function}.last_success", int(time.time())
) # Alert if this metric is not seen
stats.count(f"{function}.success")
stats.timer("worker.healthy")
return True | For each celery task, this will determine the number of seconds since it has last been successful. Celery tasks should be emitting redis stats with a deterministic key (In our case, `f"{task}.last_success"`. report_celery_last_success_metrics should be ran periodically to emit metrics on when a task was last successful. We can then alert when tasks are not ran when intended. We should also alert when no metrics are emitted from this function. |
162,203 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
red = RedisHandler().redis_sync()
def get_celery_request_tags(**kwargs):
request = kwargs.get("request")
sender_hostname = "unknown"
sender = kwargs.get("sender")
if sender:
try:
sender_hostname = sender.hostname
except AttributeError:
sender_hostname = vars(sender.request).get("origin", "unknown")
if request and not isinstance(
request, Context
): # unlike others, task_revoked sends a Context for `request`
task_name = request.name
task_id = request.id
receiver_hostname = request.hostname
else:
try:
task_name = sender.name
except AttributeError:
task_name = kwargs.pop("name", "")
try:
task_id = sender.request.id
except AttributeError:
task_id = kwargs.pop("id", "")
try:
receiver_hostname = sender.request.hostname
except AttributeError:
receiver_hostname = ""
tags = {
"task_name": task_name,
"task_id": task_id,
"sender_hostname": sender_hostname,
"receiver_hostname": receiver_hostname,
}
tags["expired"] = kwargs.get("expired", False)
exception = kwargs.get("exception")
if not exception:
exception = kwargs.get("exc")
if exception:
tags["error"] = repr(exception)
if isinstance(exception, SoftTimeLimitExceeded):
tags["timed_out"] = True
return tags
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
if config.get("celery.trigger_credential_mapping_refresh_from_role_changes.enabled"):
schedule["trigger_credential_mapping_refresh_from_role_changes"] = {
"task": "consoleme.celery_tasks.celery_tasks.trigger_credential_mapping_refresh_from_role_changes",
"options": {"expires": 300},
"schedule": schedule_minute,
}
if config.get("celery.cache_cloudtrail_denies.enabled"):
schedule["cache_cloudtrail_denies"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies",
"options": {"expires": 300},
"schedule": schedule_minute,
}
schedule["cache_cloudtrail_errors_by_arn"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
def refresh_dynamic_config_in_worker(**kwargs):
tags = get_celery_request_tags(**kwargs)
log_data = {"function": f"{__name__}.{sys._getframe().f_code.co_name}", **tags}
config.CONFIG.load_dynamic_config_from_redis(log_data, red) | null |
162,204 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def get_celery_request_tags(**kwargs):
request = kwargs.get("request")
sender_hostname = "unknown"
sender = kwargs.get("sender")
if sender:
try:
sender_hostname = sender.hostname
except AttributeError:
sender_hostname = vars(sender.request).get("origin", "unknown")
if request and not isinstance(
request, Context
): # unlike others, task_revoked sends a Context for `request`
task_name = request.name
task_id = request.id
receiver_hostname = request.hostname
else:
try:
task_name = sender.name
except AttributeError:
task_name = kwargs.pop("name", "")
try:
task_id = sender.request.id
except AttributeError:
task_id = kwargs.pop("id", "")
try:
receiver_hostname = sender.request.hostname
except AttributeError:
receiver_hostname = ""
tags = {
"task_name": task_name,
"task_id": task_id,
"sender_hostname": sender_hostname,
"receiver_hostname": receiver_hostname,
}
tags["expired"] = kwargs.get("expired", False)
exception = kwargs.get("exception")
if not exception:
exception = kwargs.get("exc")
if exception:
tags["error"] = repr(exception)
if isinstance(exception, SoftTimeLimitExceeded):
tags["timed_out"] = True
return tags
The provided code snippet includes necessary dependencies for implementing the `report_number_pending_tasks` function. Write a Python function `def report_number_pending_tasks(**kwargs)` to solve the following problem:
Report the number of pending tasks to our metrics broker every time a task is published. This metric can be used for autoscaling workers. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-received :param sender: :param headers: :param body: :param kwargs: :return:
Here is the function:
def report_number_pending_tasks(**kwargs):
"""
Report the number of pending tasks to our metrics broker every time a task is published. This metric can be used
for autoscaling workers.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-received
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
tags = get_celery_request_tags(**kwargs)
tags.pop("task_id", None)
stats.timer("celery.new_pending_task", tags=tags) | Report the number of pending tasks to our metrics broker every time a task is published. This metric can be used for autoscaling workers. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-received :param sender: :param headers: :param body: :param kwargs: :return: |
162,205 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
red = RedisHandler().redis_sync()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def get_celery_request_tags(**kwargs):
request = kwargs.get("request")
sender_hostname = "unknown"
sender = kwargs.get("sender")
if sender:
try:
sender_hostname = sender.hostname
except AttributeError:
sender_hostname = vars(sender.request).get("origin", "unknown")
if request and not isinstance(
request, Context
): # unlike others, task_revoked sends a Context for `request`
task_name = request.name
task_id = request.id
receiver_hostname = request.hostname
else:
try:
task_name = sender.name
except AttributeError:
task_name = kwargs.pop("name", "")
try:
task_id = sender.request.id
except AttributeError:
task_id = kwargs.pop("id", "")
try:
receiver_hostname = sender.request.hostname
except AttributeError:
receiver_hostname = ""
tags = {
"task_name": task_name,
"task_id": task_id,
"sender_hostname": sender_hostname,
"receiver_hostname": receiver_hostname,
}
tags["expired"] = kwargs.get("expired", False)
exception = kwargs.get("exception")
if not exception:
exception = kwargs.get("exc")
if exception:
tags["error"] = repr(exception)
if isinstance(exception, SoftTimeLimitExceeded):
tags["timed_out"] = True
return tags
The provided code snippet includes necessary dependencies for implementing the `report_successful_task` function. Write a Python function `def report_successful_task(**kwargs)` to solve the following problem:
Report a generic success metric as tasks to our metrics broker every time a task finished correctly. This metric can be used for autoscaling workers. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-success :param sender: :param headers: :param body: :param kwargs: :return:
Here is the function:
def report_successful_task(**kwargs):
"""
Report a generic success metric as tasks to our metrics broker every time a task finished correctly.
This metric can be used for autoscaling workers.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-success
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
tags = get_celery_request_tags(**kwargs)
red.set(f"{tags['task_name']}.last_success", int(time.time()))
tags.pop("error", None)
tags.pop("task_id", None)
stats.timer("celery.successful_task", tags=tags) | Report a generic success metric as tasks to our metrics broker every time a task finished correctly. This metric can be used for autoscaling workers. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-success :param sender: :param headers: :param body: :param kwargs: :return: |
162,206 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def get_celery_request_tags(**kwargs):
request = kwargs.get("request")
sender_hostname = "unknown"
sender = kwargs.get("sender")
if sender:
try:
sender_hostname = sender.hostname
except AttributeError:
sender_hostname = vars(sender.request).get("origin", "unknown")
if request and not isinstance(
request, Context
): # unlike others, task_revoked sends a Context for `request`
task_name = request.name
task_id = request.id
receiver_hostname = request.hostname
else:
try:
task_name = sender.name
except AttributeError:
task_name = kwargs.pop("name", "")
try:
task_id = sender.request.id
except AttributeError:
task_id = kwargs.pop("id", "")
try:
receiver_hostname = sender.request.hostname
except AttributeError:
receiver_hostname = ""
tags = {
"task_name": task_name,
"task_id": task_id,
"sender_hostname": sender_hostname,
"receiver_hostname": receiver_hostname,
}
tags["expired"] = kwargs.get("expired", False)
exception = kwargs.get("exception")
if not exception:
exception = kwargs.get("exc")
if exception:
tags["error"] = repr(exception)
if isinstance(exception, SoftTimeLimitExceeded):
tags["timed_out"] = True
return tags
The provided code snippet includes necessary dependencies for implementing the `report_task_retry` function. Write a Python function `def report_task_retry(**kwargs)` to solve the following problem:
Report a generic retry metric as tasks to our metrics broker every time a task is retroed. This metric can be used for alerting. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-retry :param sender: :param headers: :param body: :param kwargs: :return:
Here is the function:
def report_task_retry(**kwargs):
"""
Report a generic retry metric as tasks to our metrics broker every time a task is retroed.
This metric can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-retry
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Retry",
}
# Add traceback if exception info is in the kwargs
einfo = kwargs.get("einfo")
if einfo:
log_data["traceback"] = einfo.traceback
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.retried_task", tags=error_tags) | Report a generic retry metric as tasks to our metrics broker every time a task is retroed. This metric can be used for alerting. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-retry :param sender: :param headers: :param body: :param kwargs: :return: |
162,207 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def get_celery_request_tags(**kwargs):
request = kwargs.get("request")
sender_hostname = "unknown"
sender = kwargs.get("sender")
if sender:
try:
sender_hostname = sender.hostname
except AttributeError:
sender_hostname = vars(sender.request).get("origin", "unknown")
if request and not isinstance(
request, Context
): # unlike others, task_revoked sends a Context for `request`
task_name = request.name
task_id = request.id
receiver_hostname = request.hostname
else:
try:
task_name = sender.name
except AttributeError:
task_name = kwargs.pop("name", "")
try:
task_id = sender.request.id
except AttributeError:
task_id = kwargs.pop("id", "")
try:
receiver_hostname = sender.request.hostname
except AttributeError:
receiver_hostname = ""
tags = {
"task_name": task_name,
"task_id": task_id,
"sender_hostname": sender_hostname,
"receiver_hostname": receiver_hostname,
}
tags["expired"] = kwargs.get("expired", False)
exception = kwargs.get("exception")
if not exception:
exception = kwargs.get("exc")
if exception:
tags["error"] = repr(exception)
if isinstance(exception, SoftTimeLimitExceeded):
tags["timed_out"] = True
return tags
The provided code snippet includes necessary dependencies for implementing the `report_failed_task` function. Write a Python function `def report_failed_task(**kwargs)` to solve the following problem:
Report a generic failure metric as tasks to our metrics broker every time a task fails. This is also called when a task has hit a SoftTimeLimit. The metric emited by this function can be used for alerting. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-failure :param sender: :param headers: :param body: :param kwargs: :return:
Here is the function:
def report_failed_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task fails. This is also called when
a task has hit a SoftTimeLimit.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-failure
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Failure",
}
# Add traceback if exception info is in the kwargs
einfo = kwargs.get("einfo")
if einfo:
log_data["traceback"] = einfo.traceback
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.failed_task", tags=error_tags) | Report a generic failure metric as tasks to our metrics broker every time a task fails. This is also called when a task has hit a SoftTimeLimit. The metric emited by this function can be used for alerting. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-failure :param sender: :param headers: :param body: :param kwargs: :return: |
162,208 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def get_celery_request_tags(**kwargs):
request = kwargs.get("request")
sender_hostname = "unknown"
sender = kwargs.get("sender")
if sender:
try:
sender_hostname = sender.hostname
except AttributeError:
sender_hostname = vars(sender.request).get("origin", "unknown")
if request and not isinstance(
request, Context
): # unlike others, task_revoked sends a Context for `request`
task_name = request.name
task_id = request.id
receiver_hostname = request.hostname
else:
try:
task_name = sender.name
except AttributeError:
task_name = kwargs.pop("name", "")
try:
task_id = sender.request.id
except AttributeError:
task_id = kwargs.pop("id", "")
try:
receiver_hostname = sender.request.hostname
except AttributeError:
receiver_hostname = ""
tags = {
"task_name": task_name,
"task_id": task_id,
"sender_hostname": sender_hostname,
"receiver_hostname": receiver_hostname,
}
tags["expired"] = kwargs.get("expired", False)
exception = kwargs.get("exception")
if not exception:
exception = kwargs.get("exc")
if exception:
tags["error"] = repr(exception)
if isinstance(exception, SoftTimeLimitExceeded):
tags["timed_out"] = True
return tags
The provided code snippet includes necessary dependencies for implementing the `report_unknown_task` function. Write a Python function `def report_unknown_task(**kwargs)` to solve the following problem:
Report a generic failure metric as tasks to our metrics broker every time a worker receives an unknown task. The metric emited by this function can be used for alerting. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-unknown :param sender: :param headers: :param body: :param kwargs: :return:
Here is the function:
def report_unknown_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a worker receives an unknown task.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-unknown
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Unknown",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.unknown_task", tags=error_tags) | Report a generic failure metric as tasks to our metrics broker every time a worker receives an unknown task. The metric emited by this function can be used for alerting. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-unknown :param sender: :param headers: :param body: :param kwargs: :return: |
162,209 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def get_celery_request_tags(**kwargs):
request = kwargs.get("request")
sender_hostname = "unknown"
sender = kwargs.get("sender")
if sender:
try:
sender_hostname = sender.hostname
except AttributeError:
sender_hostname = vars(sender.request).get("origin", "unknown")
if request and not isinstance(
request, Context
): # unlike others, task_revoked sends a Context for `request`
task_name = request.name
task_id = request.id
receiver_hostname = request.hostname
else:
try:
task_name = sender.name
except AttributeError:
task_name = kwargs.pop("name", "")
try:
task_id = sender.request.id
except AttributeError:
task_id = kwargs.pop("id", "")
try:
receiver_hostname = sender.request.hostname
except AttributeError:
receiver_hostname = ""
tags = {
"task_name": task_name,
"task_id": task_id,
"sender_hostname": sender_hostname,
"receiver_hostname": receiver_hostname,
}
tags["expired"] = kwargs.get("expired", False)
exception = kwargs.get("exception")
if not exception:
exception = kwargs.get("exc")
if exception:
tags["error"] = repr(exception)
if isinstance(exception, SoftTimeLimitExceeded):
tags["timed_out"] = True
return tags
The provided code snippet includes necessary dependencies for implementing the `report_rejected_task` function. Write a Python function `def report_rejected_task(**kwargs)` to solve the following problem:
Report a generic failure metric as tasks to our metrics broker every time a task is rejected. The metric emited by this function can be used for alerting. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-rejected :param sender: :param headers: :param body: :param kwargs: :return:
Here is the function:
def report_rejected_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task is rejected.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-rejected
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Rejected",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.rejected_task", tags=error_tags) | Report a generic failure metric as tasks to our metrics broker every time a task is rejected. The metric emited by this function can be used for alerting. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-rejected :param sender: :param headers: :param body: :param kwargs: :return: |
162,210 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def get_celery_request_tags(**kwargs):
request = kwargs.get("request")
sender_hostname = "unknown"
sender = kwargs.get("sender")
if sender:
try:
sender_hostname = sender.hostname
except AttributeError:
sender_hostname = vars(sender.request).get("origin", "unknown")
if request and not isinstance(
request, Context
): # unlike others, task_revoked sends a Context for `request`
task_name = request.name
task_id = request.id
receiver_hostname = request.hostname
else:
try:
task_name = sender.name
except AttributeError:
task_name = kwargs.pop("name", "")
try:
task_id = sender.request.id
except AttributeError:
task_id = kwargs.pop("id", "")
try:
receiver_hostname = sender.request.hostname
except AttributeError:
receiver_hostname = ""
tags = {
"task_name": task_name,
"task_id": task_id,
"sender_hostname": sender_hostname,
"receiver_hostname": receiver_hostname,
}
tags["expired"] = kwargs.get("expired", False)
exception = kwargs.get("exception")
if not exception:
exception = kwargs.get("exc")
if exception:
tags["error"] = repr(exception)
if isinstance(exception, SoftTimeLimitExceeded):
tags["timed_out"] = True
return tags
The provided code snippet includes necessary dependencies for implementing the `report_revoked_task` function. Write a Python function `def report_revoked_task(**kwargs)` to solve the following problem:
Report a generic failure metric as tasks to our metrics broker every time a task is revoked. This metric can be used for alerting. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-revoked :param sender: :param headers: :param body: :param kwargs: :return:
Here is the function:
def report_revoked_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task is revoked.
This metric can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-revoked
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Revoked",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.revoked_task", tags=error_tags) | Report a generic failure metric as tasks to our metrics broker every time a task is revoked. This metric can be used for alerting. https://docs.celeryproject.org/en/latest/userguide/signals.html#task-revoked :param sender: :param headers: :param body: :param kwargs: :return: |
162,211 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
red = RedisHandler().redis_sync()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
if config.get("celery.trigger_credential_mapping_refresh_from_role_changes.enabled"):
schedule["trigger_credential_mapping_refresh_from_role_changes"] = {
"task": "consoleme.celery_tasks.celery_tasks.trigger_credential_mapping_refresh_from_role_changes",
"options": {"expires": 300},
"schedule": schedule_minute,
}
if config.get("celery.cache_cloudtrail_denies.enabled"):
schedule["cache_cloudtrail_denies"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies",
"options": {"expires": 300},
"schedule": schedule_minute,
}
schedule["cache_cloudtrail_errors_by_arn"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
async def get_account_id_to_name_mapping(
status="active", environment=None, force_sync=False
):
redis_key = config.get(
"cache_cloud_accounts.redis.key.all_accounts_key", "ALL_AWS_ACCOUNTS"
)
accounts = await retrieve_json_data_from_redis_or_s3(redis_key, default={})
if force_sync or not accounts or not accounts.get("accounts"):
# Force a re-sync and then retry
await cache_cloud_accounts()
accounts = await retrieve_json_data_from_redis_or_s3(
redis_key,
s3_bucket=config.get("cache_cloud_accounts.s3.bucket"),
s3_key=config.get(
"cache_cloud_accounts.s3.file",
"cache_cloud_accounts/accounts_v1.json.gz",
),
default={},
)
account_id_to_name = {}
for account in accounts.get("accounts", []):
if status and account.get("status") != status:
continue
if environment and account.get("environment") != environment:
continue
account_id_to_name[account["id"]] = account["name"]
return account_id_to_name
def allowed_to_sync_role(
role_arn: str, role_tags: List[Optional[Dict[str, str]]]
) -> bool:
"""
This function determines whether ConsoleMe is allowed to sync or otherwise manipulate an IAM role. By default,
ConsoleMe will sync all roles that it can get its grubby little hands on. However, ConsoleMe administrators can tell
ConsoleMe to only sync roles with either 1) Specific ARNs, or 2) Specific tag key/value pairs. All configured tags
must exist on the role for ConsoleMe to sync it., or 3) Specific tag keys
Here's an example configuration for a tag-based restriction:
```
roles:
allowed_tags:
tag1: value1
tag2: value2
```
And another one for an ARN-based restriction:
```
roles:
allowed_arns:
- arn:aws:iam::111111111111:role/role-name-here-1
- arn:aws:iam::111111111111:role/role-name-here-2
- arn:aws:iam::111111111111:role/role-name-here-3
- arn:aws:iam::222222222222:role/role-name-here-1
- arn:aws:iam::333333333333:role/role-name-here-1
```
And another one for an tag key based restriction:
```
roles:
allowed_tag_keys:
- cosoleme-authorized
- consoleme-authorized-cli-only
```
:param
arn: The AWS role arn
role_tags: A dictionary of role tags
:return: boolean specifying whether ConsoleMe is allowed to sync / access the role
"""
allowed_tags = config.get("roles.allowed_tags", {})
allowed_arns = config.get("roles.allowed_arns", [])
allowed_tag_keys = config.get("roles.allowed_tag_keys", [])
if not allowed_tags and not allowed_arns and not allowed_tag_keys:
return True
if role_arn in allowed_arns:
return True
# Convert list of role tag dicts to an array of tag keys
# ex:
# role_tags = [{'Key': 'consoleme-authorized', 'Value': 'consoleme_admins'},
# {'Key': 'Description', 'Value': 'ConsoleMe OSS Demo Role'}]
# so: actual_tag_keys = ['consoleme-authorized', 'Description']
actual_tag_keys = [d["Key"] for d in role_tags]
# If any allowed tag key exists in the role's actual_tags this condition will pass
if allowed_tag_keys and any(x in allowed_tag_keys for x in actual_tag_keys):
return True
# Convert list of role tag dicts to a single key/value dict of tags
# ex:
# role_tags = [{'Key': 'consoleme-authorized', 'Value': 'consoleme_admins'},
# {'Key': 'Description', 'Value': 'ConsoleMe OSS Demo Role'}]
# so: actual_tags = {'consoleme-authorized': 'consoleme_admins', 'Description': 'ConsoleMe OSS Demo Role'}
actual_tags = {
d["Key"]: d["Value"] for d in role_tags
} # Convert List[Dicts] to 1 Dict
# All configured allowed_tags must exist in the role's actual_tags for this condition to pass
if allowed_tags and allowed_tags.items() <= actual_tags.items():
return True
return False
async def store_json_results_in_redis_and_s3(
data: Union[
Dict[str, set],
Dict[str, str],
List[
Union[
Dict[str, Union[Union[str, int], Any]],
Dict[str, Union[Union[str, None, int], Any]],
]
],
str,
Dict[str, list],
],
redis_key: str = None,
redis_data_type: str = "str",
s3_bucket: str = None,
s3_key: str = None,
json_encoder=None,
s3_expires: int = None,
):
"""
Stores data in Redis and S3, depending on configuration
:param s3_expires: Epoch time integer for when the written S3 object should expire
:param redis_data_type: "str" or "hash", depending on how we're storing data in Redis
:param data: Python dictionary or list that will be encoded in JSON for storage
:param redis_key: Redis Key to store data to
:param s3_bucket: S3 bucket to store data
:param s3_key: S3 key to store data
:return:
"""
last_updated_redis_key = config.get(
"store_json_results_in_redis_and_s3.last_updated_redis_key",
"STORE_JSON_RESULTS_IN_REDIS_AND_S3_LAST_UPDATED",
)
function = f"{__name__}.{sys._getframe().f_code.co_name}"
last_updated = int(time.time())
stats.count(
f"{function}.called",
tags={"redis_key": redis_key, "s3_bucket": s3_bucket, "s3_key": s3_key},
)
# If we've defined an S3 key, but not a bucket, let's use the default bucket if it's defined in configuration.
if s3_key and not s3_bucket:
s3_bucket = config.get("consoleme_s3_bucket")
if redis_key:
if redis_data_type == "str":
if isinstance(data, str):
red.set(redis_key, data)
else:
red.set(
redis_key, json.dumps(data, cls=SetEncoder, default=json_encoder)
)
elif redis_data_type == "hash":
if data:
red.hmset(redis_key, data)
else:
raise UnsupportedRedisDataType("Unsupported redis_data_type passed")
red.hset(last_updated_redis_key, redis_key, last_updated)
if s3_bucket and s3_key:
s3_extra_kwargs = {}
if isinstance(s3_expires, int):
s3_extra_kwargs["Expires"] = datetime.utcfromtimestamp(s3_expires)
data_for_s3 = json.dumps(
{"last_updated": last_updated, "data": data},
cls=SetEncoder,
default=json_encoder,
indent=2,
).encode()
if s3_key.endswith(".gz"):
data_for_s3 = gzip.compress(data_for_s3)
put_object(Bucket=s3_bucket, Key=s3_key, Body=data_for_s3, **s3_extra_kwargs)
async def retrieve_json_data_from_redis_or_s3(
redis_key: str = None,
redis_data_type: str = "str",
s3_bucket: str = None,
s3_key: str = None,
cache_to_redis_if_data_in_s3: bool = True,
max_age: Optional[int] = None,
default: Optional = None,
json_object_hook: Optional = None,
json_encoder: Optional = None,
):
"""
Retrieve data from Redis as a priority. If data is unavailable in Redis, fall back to S3 and attempt to store
data in Redis for quicker retrieval later.
:param redis_data_type: "str" or "hash", depending on how the data is stored in Redis
:param redis_key: Redis Key to retrieve data from
:param s3_bucket: S3 bucket to retrieve data from
:param s3_key: S3 key to retrieve data from
:param cache_to_redis_if_data_in_s3: Cache the data in Redis if the data is in S3 but not Redis
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
last_updated_redis_key = config.get(
"store_json_results_in_redis_and_s3.last_updated_redis_key",
"STORE_JSON_RESULTS_IN_REDIS_AND_S3_LAST_UPDATED",
)
stats.count(
f"{function}.called",
tags={"redis_key": redis_key, "s3_bucket": s3_bucket, "s3_key": s3_key},
)
# If we've defined an S3 key, but not a bucket, let's use the default bucket if it's defined in configuration.
if s3_key and not s3_bucket:
s3_bucket = config.get("consoleme_s3_bucket")
data = None
if redis_key:
if redis_data_type == "str":
data_s = red.get(redis_key)
if data_s:
data = json.loads(data_s, object_hook=json_object_hook)
elif redis_data_type == "hash":
data = red.hgetall(redis_key)
else:
raise UnsupportedRedisDataType("Unsupported redis_data_type passed")
if data and max_age:
current_time = int(time.time())
last_updated = int(red.hget(last_updated_redis_key, redis_key))
if current_time - last_updated > max_age:
data = None
# Fall back to S3 if expired.
if not s3_bucket or not s3_key:
raise ExpiredData(f"Data in Redis is older than {max_age} seconds.")
# Fall back to S3 if there's no data
if not data and s3_bucket and s3_key:
try:
s3_object = get_object(Bucket=s3_bucket, Key=s3_key)
except ClientError as e:
if str(e) == (
"An error occurred (NoSuchKey) when calling the GetObject operation: "
"The specified key does not exist."
):
if default is not None:
return default
raise
s3_object_content = await sync_to_async(s3_object["Body"].read)()
if s3_key.endswith(".gz"):
s3_object_content = gzip.decompress(s3_object_content)
data_object = json.loads(s3_object_content, object_hook=json_object_hook)
data = data_object["data"]
if data and max_age:
current_time = int(time.time())
last_updated = data_object["last_updated"]
if current_time - last_updated > max_age:
raise ExpiredData(f"Data in S3 is older than {max_age} seconds.")
if redis_key and cache_to_redis_if_data_in_s3:
await store_json_results_in_redis_and_s3(
data,
redis_key=redis_key,
redis_data_type=redis_data_type,
json_encoder=json_encoder,
)
if data is not None:
return data
if default is not None:
return default
raise DataNotRetrievable("Unable to retrieve expected data.")
async def get_aws_config_history_url_for_resource(
account_id,
resource_id,
resource_name,
technology,
region=config.get("aws.region", "us-east-1"),
):
if config.get("get_aws_config_history_url_for_resource.generate_conglomo_url"):
return await get_conglomo_url_for_resource(
account_id, resource_id, technology, region
)
encoded_redirect = urllib.parse.quote_plus(
f"https://{region}.console.aws.amazon.com/config/home?#/resources/timeline?"
f"resourceId={resource_id}&resourceName={resource_name}&resourceType={technology}"
)
url = f"/role/{account_id}?redirect={encoded_redirect}"
return url
def cache_policies_table_details() -> bool:
items = []
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
cloudtrail_errors = {}
cloudtrail_errors_j = red.get(
config.get(
"celery.cache_cloudtrail_errors_by_arn.redis_key",
"CLOUDTRAIL_ERRORS_BY_ARN",
)
)
if cloudtrail_errors_j:
cloudtrail_errors = json.loads(cloudtrail_errors_j)
s3_error_topic = config.get("redis.s3_errors", "S3_ERRORS")
all_s3_errors = red.get(s3_error_topic)
s3_errors = {}
if all_s3_errors:
s3_errors = json.loads(all_s3_errors)
# IAM Roles
skip_iam_roles = config.get("cache_policies_table_details.skip_iam_roles", False)
if not skip_iam_roles:
all_iam_roles = async_to_sync(retrieve_json_data_from_redis_or_s3)(
redis_key=config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE"),
redis_data_type="hash",
s3_bucket=config.get(
"cache_iam_resources_across_accounts.all_roles_combined.s3.bucket"
),
s3_key=config.get(
"cache_iam_resources_across_accounts.all_roles_combined.s3.file",
"account_resource_cache/cache_all_roles_v1.json.gz",
),
default={},
)
for arn, role_details_j in all_iam_roles.items():
role_details = ujson.loads(role_details_j)
role_details_policy = ujson.loads(role_details.get("policy", {}))
role_tags = role_details_policy.get("Tags", {})
if not allowed_to_sync_role(arn, role_tags):
continue
error_count = cloudtrail_errors.get(arn, 0)
s3_errors_for_arn = s3_errors.get(arn, [])
for error in s3_errors_for_arn:
error_count += int(error.get("count"))
account_id = arn.split(":")[4]
account_name = accounts_d.get(str(account_id), "Unknown")
resource_id = role_details.get("resourceId")
items.append(
{
"account_id": account_id,
"account_name": account_name,
"arn": arn,
"technology": "AWS::IAM::Role",
"templated": red.hget(
config.get("templated_roles.redis_key", "TEMPLATED_ROLES_v2"),
arn.lower(),
),
"errors": error_count,
"config_history_url": async_to_sync(
get_aws_config_history_url_for_resource
)(account_id, resource_id, arn, "AWS::IAM::Role"),
}
)
# IAM Users
skip_iam_users = config.get("cache_policies_table_details.skip_iam_users", False)
if not skip_iam_users:
all_iam_users = async_to_sync(retrieve_json_data_from_redis_or_s3)(
redis_key=config.get("aws.iamusers_redis_key", "IAM_USER_CACHE"),
redis_data_type="hash",
s3_bucket=config.get(
"cache_iam_resources_across_accounts.all_users_combined.s3.bucket"
),
s3_key=config.get(
"cache_iam_resources_across_accounts.all_users_combined.s3.file",
"account_resource_cache/cache_all_users_v1.json.gz",
),
default={},
)
for arn, details_j in all_iam_users.items():
details = ujson.loads(details_j)
error_count = cloudtrail_errors.get(arn, 0)
s3_errors_for_arn = s3_errors.get(arn, [])
for error in s3_errors_for_arn:
error_count += int(error.get("count"))
account_id = arn.split(":")[4]
account_name = accounts_d.get(str(account_id), "Unknown")
resource_id = details.get("resourceId")
items.append(
{
"account_id": account_id,
"account_name": account_name,
"arn": arn,
"technology": "AWS::IAM::User",
"templated": red.hget(
config.get("templated_roles.redis_key", "TEMPLATED_ROLES_v2"),
arn.lower(),
),
"errors": error_count,
"config_history_url": async_to_sync(
get_aws_config_history_url_for_resource
)(account_id, resource_id, arn, "AWS::IAM::User"),
}
)
# S3 Buckets
skip_s3_buckets = config.get("cache_policies_table_details.skip_s3_buckets", False)
if not skip_s3_buckets:
s3_bucket_key: str = config.get("redis.s3_bucket_key", "S3_BUCKETS")
s3_accounts = red.hkeys(s3_bucket_key)
if s3_accounts:
for account in s3_accounts:
account_name = accounts_d.get(str(account), "Unknown")
buckets = json.loads(red.hget(s3_bucket_key, account))
for bucket in buckets:
bucket_arn = f"arn:aws:s3:::{bucket}"
s3_errors_for_arn = s3_errors.get(bucket_arn, [])
error_count = 0
for error in s3_errors_for_arn:
error_count += int(error.get("count"))
items.append(
{
"account_id": account,
"account_name": account_name,
"arn": f"arn:aws:s3:::{bucket}",
"technology": "AWS::S3::Bucket",
"templated": None,
"errors": error_count,
}
)
# SNS Topics
skip_sns_topics = config.get("cache_policies_table_details.skip_sns_topics", False)
if not skip_sns_topics:
sns_topic_key: str = config.get("redis.sns_topics_key", "SNS_TOPICS")
sns_accounts = red.hkeys(sns_topic_key)
if sns_accounts:
for account in sns_accounts:
account_name = accounts_d.get(str(account), "Unknown")
topics = json.loads(red.hget(sns_topic_key, account))
for topic in topics:
error_count = 0
items.append(
{
"account_id": account,
"account_name": account_name,
"arn": topic,
"technology": "AWS::SNS::Topic",
"templated": None,
"errors": error_count,
}
)
# SQS Queues
skip_sqs_queues = config.get("cache_policies_table_details.skip_sqs_queues", False)
if not skip_sqs_queues:
sqs_queue_key: str = config.get("redis.sqs_queues_key", "SQS_QUEUES")
sqs_accounts = red.hkeys(sqs_queue_key)
if sqs_accounts:
for account in sqs_accounts:
account_name = accounts_d.get(str(account), "Unknown")
queues = json.loads(red.hget(sqs_queue_key, account))
for queue in queues:
error_count = 0
items.append(
{
"account_id": account,
"account_name": account_name,
"arn": queue,
"technology": "AWS::SQS::Queue",
"templated": None,
"errors": error_count,
}
)
# Managed Policies
skip_managed_policies = config.get(
"cache_policies_table_details.skip_managed_policies", False
)
if not skip_managed_policies:
managed_policies_key: str = config.get(
"redis.iam_managed_policies_key", "IAM_MANAGED_POLICIES"
)
managed_policies_accounts = red.hkeys(managed_policies_key)
if managed_policies_accounts:
for managed_policies_account in managed_policies_accounts:
account_name = accounts_d.get(str(managed_policies_account), "Unknown")
managed_policies_in_account = json.loads(
red.hget(managed_policies_key, managed_policies_account)
)
for policy_arn in managed_policies_in_account:
# managed policies that are managed by AWS shouldn't be added to the policies table for 2 reasons:
# 1. We don't manage them, can't edit them
# 2. There are a LOT of them and we would just end up spamming the policy table...
# TODO: discuss if this is okay
if str(managed_policies_account) not in policy_arn:
continue
error_count = 0
items.append(
{
"account_id": managed_policies_account,
"account_name": account_name,
"arn": policy_arn,
"technology": "managed_policy",
"templated": None,
"errors": error_count,
}
)
# AWS Config Resources
skip_aws_config_resources = config.get(
"cache_policies_table_details.skip_aws_config_resources", False
)
if not skip_aws_config_resources:
resources_from_aws_config_redis_key: str = config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
)
resources_from_aws_config = red.hgetall(resources_from_aws_config_redis_key)
if resources_from_aws_config:
for arn, value in resources_from_aws_config.items():
resource = json.loads(value)
technology = resource["resourceType"]
# Skip technologies that we retrieve directly
if technology in [
"AWS::IAM::Role",
"AWS::SQS::Queue",
"AWS::SNS::Topic",
"AWS::S3::Bucket",
"AWS::IAM::ManagedPolicy",
]:
continue
account_id = arn.split(":")[4]
account_name = accounts_d.get(account_id, "Unknown")
items.append(
{
"account_id": account_id,
"account_name": account_name,
"arn": arn,
"technology": technology,
"templated": None,
"errors": 0,
}
)
s3_bucket = None
s3_key = None
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("cache_policies_table_details.s3.bucket")
s3_key = config.get(
"cache_policies_table_details.s3.file",
"policies_table/cache_policies_table_details_v1.json.gz",
)
async_to_sync(store_json_results_in_redis_and_s3)(
items,
redis_key=config.get("policies.redis_policies_key", "ALL_POLICIES"),
s3_bucket=s3_bucket,
s3_key=s3_key,
)
stats.count(
"cache_policies_table_details.success",
tags={"num_roles": len(all_iam_roles.keys())},
)
return True | null |
162,212 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
log = config.get_logger()
red = RedisHandler().redis_sync()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def is_task_already_running(fun, args):
"""
Returns True if an identical task for a given function (and arguments) is already being
ran by Celery.
"""
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
if not task_id:
return False
log.debug(task_id)
active_tasks = app.control.inspect()._request("active")
if not active_tasks:
return False
for _, tasks in active_tasks.items():
for task in tasks:
if task.get("id") == task_id:
continue
if task.get("name") == fun and task.get("args") == args:
return True
return False
stop_max_attempt_number=4,
wait_exponential_multiplier=1000,
wait_exponential_max=1000,
def _add_role_to_redis(redis_key: str, role_entry: Dict) -> None:
"""
This function will add IAM role data to redis so that policy details can be quickly retrieved by the policies
endpoint.
IAM role data is stored in the `redis_key` redis key by the role's ARN.
Parameters
----------
redis_key : str
The redis key (hash)
role_entry : Dict
The role entry
Example: {'name': 'nameOfRole', 'accountId': '123456789012', 'arn': 'arn:aws:iam::123456789012:role/nameOfRole',
'templated': None, 'ttl': 1562510908, 'policy': '<json_formatted_policy>'}
"""
try:
red.hset(redis_key, str(role_entry["arn"]), str(json.dumps(role_entry)))
except Exception as e: # noqa
stats.count(
"_add_role_to_redis.error",
tags={"redis_key": redis_key, "error": str(e), "role_entry": role_entry},
)
log_data = {
"message": "Error syncing Account's IAM roles to Redis",
"account_id": role_entry["account_id"],
"arn": role_entry["arn"],
"role_entry": role_entry,
}
log.error(log_data, exc_info=True)
raise
def cache_iam_resources_for_account(account_id: str) -> Dict[str, Any]:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {"function": function, "account_id": account_id}
cache_keys = {
"iam_roles": {
"temp_cache_key": config.get(
"aws.iamroles_redis_key_temp", "IAM_ROLE_CACHE_TEMP"
)
},
"iam_users": {
"temp_cache_key": config.get(
"aws.iamusers_redis_key_temp", "IAM_USER_CACHE_TEMP"
)
},
"iam_groups": {
"temp_cache_key": config.get(
"aws.iamgroups_redis_key_temp", "IAM_GROUP_CACHE_TEMP"
)
},
"iam_policies": {
"temp_cache_key": config.get(
"aws.iampolicies_redis_key_temp", "IAM_POLICIES_CACHE_TEMP"
)
},
}
# Get the DynamoDB handler:
dynamo = IAMRoleDynamoHandler()
cache_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
# Only query IAM and put data in Dynamo if we're in the active region
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
conn = dict(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
client_kwargs=config.get("boto3.client_kwargs", {}),
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=config.get(
"aws.sts_endpoint_url", "https://sts.{region}.amazonaws.com"
).format(region=config.region),
),
)
client = boto3_cached_conn("iam", **conn)
paginator = client.get_paginator("get_account_authorization_details")
response_iterator = paginator.paginate()
all_iam_resources = {}
for response in response_iterator:
if not all_iam_resources:
all_iam_resources = response
else:
all_iam_resources["UserDetailList"].extend(response["UserDetailList"])
all_iam_resources["GroupDetailList"].extend(response["GroupDetailList"])
all_iam_resources["RoleDetailList"].extend(response["RoleDetailList"])
all_iam_resources["Policies"].extend(response["Policies"])
for k in response.keys():
if k not in [
"UserDetailList",
"GroupDetailList",
"RoleDetailList",
"Policies",
"ResponseMetadata",
"Marker",
"IsTruncated",
]:
# Fail hard if we find something unexpected
raise RuntimeError("Unexpected key {0} in response".format(k))
# Store entire response in S3
async_to_sync(store_json_results_in_redis_and_s3)(
all_iam_resources,
s3_bucket=config.get("cache_iam_resources_for_account.s3.bucket"),
s3_key=config.get(
"cache_iam_resources_for_account.s3.file",
"get_account_authorization_details/get_account_authorization_details_{account_id}_v1.json.gz",
).format(account_id=account_id),
)
iam_roles = all_iam_resources["RoleDetailList"]
iam_users = all_iam_resources["UserDetailList"]
iam_groups = all_iam_resources["GroupDetailList"]
iam_policies = all_iam_resources["Policies"]
# Make sure these roles satisfy config -> roles.allowed_*
filtered_iam_roles = []
for role in iam_roles:
arn = role.get("Arn", "")
tags = role.get("Tags", [])
if allowed_to_sync_role(arn, tags):
filtered_iam_roles.append(role)
iam_roles = filtered_iam_roles
if iam_roles:
async_to_sync(store_json_results_in_redis_and_s3)(
iam_roles,
s3_bucket=config.get(
"cache_iam_resources_for_account.iam_roles.s3.bucket"
),
s3_key=config.get(
"cache_iam_resources_for_account.iam_roles.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="iam_roles", account_id=account_id),
)
log_data["num_iam_roles"] = len(iam_roles)
if iam_users:
async_to_sync(store_json_results_in_redis_and_s3)(
iam_users,
s3_bucket=config.get(
"cache_iam_resources_for_account.iam_users.s3.bucket"
),
s3_key=config.get(
"cache_iam_resources_for_account.iam_users.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="iam_users", account_id=account_id),
)
log_data["num_iam_users"] = len(iam_users)
if iam_groups:
async_to_sync(store_json_results_in_redis_and_s3)(
iam_groups,
s3_bucket=config.get(
"cache_iam_resources_for_account.iam_groups.s3.bucket"
),
s3_key=config.get(
"cache_iam_resources_for_account.iam_groups.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="iam_groups", account_id=account_id),
)
log_data["num_iam_groups"] = len(iam_groups)
if iam_policies:
async_to_sync(store_json_results_in_redis_and_s3)(
iam_policies,
s3_bucket=config.get(
"cache_iam_resources_for_account.iam_policies.s3.bucket"
),
s3_key=config.get(
"cache_iam_resources_for_account.iam_policies.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="iam_policies", account_id=account_id),
)
log_data["num_iam_policies"] = len(iam_policies)
ttl: int = int((datetime.utcnow() + timedelta(hours=36)).timestamp())
# Save them:
for role in iam_roles:
if remove_temp_policies(role, client):
role = aws.get_iam_role_sync(account_id, role.get("RoleName", conn))
async_to_sync(aws.cloudaux_to_aws)(role)
role_entry = {
"arn": role.get("Arn"),
"name": role.get("RoleName"),
"resourceId": role.get("RoleId"),
"accountId": account_id,
"ttl": ttl,
"owner": get_aws_principal_owner(role),
"policy": dynamo.convert_iam_resource_to_json(role),
"templated": red.hget(
config.get("templated_roles.redis_key", "TEMPLATED_ROLES_v2"),
role.get("Arn").lower(),
),
}
# DynamoDB:
dynamo.sync_iam_role_for_account(role_entry)
# Redis:
_add_role_to_redis(cache_key, role_entry)
# Run internal function on role. This can be used to inspect roles, add managed policies, or other actions
aws().handle_detected_role(role)
for user in iam_users:
user_entry = {
"arn": user.get("Arn"),
"name": user.get("UserName"),
"resourceId": user.get("UserId"),
"accountId": account_id,
"ttl": ttl,
"owner": get_aws_principal_owner(user),
"policy": dynamo.convert_iam_resource_to_json(user),
"templated": False, # Templates not supported for IAM users at this time
}
red.hset(
cache_keys["iam_users"]["temp_cache_key"],
str(user_entry["arn"]),
str(json.dumps(user_entry)),
)
for g in iam_groups:
group_entry = {
"arn": g.get("Arn"),
"name": g.get("GroupName"),
"resourceId": g.get("GroupId"),
"accountId": account_id,
"ttl": ttl,
"policy": dynamo.convert_iam_resource_to_json(g),
"templated": False, # Templates not supported for IAM groups at this time
}
red.hset(
cache_keys["iam_groups"]["temp_cache_key"],
str(group_entry["arn"]),
str(json.dumps(group_entry)),
)
for policy in iam_policies:
group_entry = {
"arn": policy.get("Arn"),
"name": policy.get("PolicyName"),
"resourceId": policy.get("PolicyId"),
"accountId": account_id,
"ttl": ttl,
"policy": dynamo.convert_iam_resource_to_json(policy),
"templated": False, # Templates not supported for IAM policies at this time
}
red.hset(
cache_keys["iam_policies"]["temp_cache_key"],
str(group_entry["arn"]),
str(json.dumps(group_entry)),
)
# Maybe store all resources in git
if config.get("cache_iam_resources_for_account.store_in_git.enabled"):
store_iam_resources_in_git(all_iam_resources, account_id)
stats.count(
"cache_iam_resources_for_account.success", tags={"account_id": account_id}
)
log.debug({**log_data, "message": "Finished caching IAM resources for account"})
return log_data
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
if config.get("celery.trigger_credential_mapping_refresh_from_role_changes.enabled"):
schedule["trigger_credential_mapping_refresh_from_role_changes"] = {
"task": "consoleme.celery_tasks.celery_tasks.trigger_credential_mapping_refresh_from_role_changes",
"options": {"expires": 300},
"schedule": schedule_minute,
}
if config.get("celery.cache_cloudtrail_denies.enabled"):
schedule["cache_cloudtrail_denies"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies",
"options": {"expires": 300},
"schedule": schedule_minute,
}
schedule["cache_cloudtrail_errors_by_arn"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
async def get_account_id_to_name_mapping(
status="active", environment=None, force_sync=False
):
redis_key = config.get(
"cache_cloud_accounts.redis.key.all_accounts_key", "ALL_AWS_ACCOUNTS"
)
accounts = await retrieve_json_data_from_redis_or_s3(redis_key, default={})
if force_sync or not accounts or not accounts.get("accounts"):
# Force a re-sync and then retry
await cache_cloud_accounts()
accounts = await retrieve_json_data_from_redis_or_s3(
redis_key,
s3_bucket=config.get("cache_cloud_accounts.s3.bucket"),
s3_key=config.get(
"cache_cloud_accounts.s3.file",
"cache_cloud_accounts/accounts_v1.json.gz",
),
default={},
)
account_id_to_name = {}
for account in accounts.get("accounts", []):
if status and account.get("status") != status:
continue
if environment and account.get("environment") != environment:
continue
account_id_to_name[account["id"]] = account["name"]
return account_id_to_name
async def store_json_results_in_redis_and_s3(
data: Union[
Dict[str, set],
Dict[str, str],
List[
Union[
Dict[str, Union[Union[str, int], Any]],
Dict[str, Union[Union[str, None, int], Any]],
]
],
str,
Dict[str, list],
],
redis_key: str = None,
redis_data_type: str = "str",
s3_bucket: str = None,
s3_key: str = None,
json_encoder=None,
s3_expires: int = None,
):
"""
Stores data in Redis and S3, depending on configuration
:param s3_expires: Epoch time integer for when the written S3 object should expire
:param redis_data_type: "str" or "hash", depending on how we're storing data in Redis
:param data: Python dictionary or list that will be encoded in JSON for storage
:param redis_key: Redis Key to store data to
:param s3_bucket: S3 bucket to store data
:param s3_key: S3 key to store data
:return:
"""
last_updated_redis_key = config.get(
"store_json_results_in_redis_and_s3.last_updated_redis_key",
"STORE_JSON_RESULTS_IN_REDIS_AND_S3_LAST_UPDATED",
)
function = f"{__name__}.{sys._getframe().f_code.co_name}"
last_updated = int(time.time())
stats.count(
f"{function}.called",
tags={"redis_key": redis_key, "s3_bucket": s3_bucket, "s3_key": s3_key},
)
# If we've defined an S3 key, but not a bucket, let's use the default bucket if it's defined in configuration.
if s3_key and not s3_bucket:
s3_bucket = config.get("consoleme_s3_bucket")
if redis_key:
if redis_data_type == "str":
if isinstance(data, str):
red.set(redis_key, data)
else:
red.set(
redis_key, json.dumps(data, cls=SetEncoder, default=json_encoder)
)
elif redis_data_type == "hash":
if data:
red.hmset(redis_key, data)
else:
raise UnsupportedRedisDataType("Unsupported redis_data_type passed")
red.hset(last_updated_redis_key, redis_key, last_updated)
if s3_bucket and s3_key:
s3_extra_kwargs = {}
if isinstance(s3_expires, int):
s3_extra_kwargs["Expires"] = datetime.utcfromtimestamp(s3_expires)
data_for_s3 = json.dumps(
{"last_updated": last_updated, "data": data},
cls=SetEncoder,
default=json_encoder,
indent=2,
).encode()
if s3_key.endswith(".gz"):
data_for_s3 = gzip.compress(data_for_s3)
put_object(Bucket=s3_bucket, Key=s3_key, Body=data_for_s3, **s3_extra_kwargs)
class IAMRoleDynamoHandler(BaseDynamoHandler):
def __init__(self) -> None:
try:
self.role_table = self._get_dynamo_table(
config.get("aws.iamroles_dynamo_table", "consoleme_iamroles_global")
)
except Exception:
log.error("Unable to get the IAM Role DynamoDB tables.", exc_info=True)
raise
stop_max_attempt_number=4,
wait_exponential_multiplier=1000,
wait_exponential_max=1000,
)
def _update_role_table_value(self, role_ddb: dict) -> None:
"""Run the specific DynamoDB update with retryability."""
self.role_table.put_item(Item=role_ddb)
stop_max_attempt_number=4,
wait_exponential_multiplier=1000,
wait_exponential_max=1000,
)
def fetch_iam_role(self, role_arn: str, account_id: str):
return self.role_table.get_item(Key={"arn": role_arn, "accountId": account_id})
def convert_iam_resource_to_json(self, role: dict) -> str:
return json.dumps(role, default=self._json_encode_timestamps)
def _json_encode_timestamps(self, field: datetime) -> str:
"""Solve those pesky timestamps and JSON annoyances."""
if isinstance(field, datetime):
return get_iso_string(field)
def sync_iam_role_for_account(self, role_ddb: dict) -> None:
"""Sync the IAM roles received to DynamoDB.
:param role_ddb:
:return:
"""
try:
# Unfortunately, DDB does not support batch updates :(... So, we need to update each item individually :/
self._update_role_table_value(role_ddb)
except Exception as e:
log_data = {
"message": "Error syncing Account's IAM roles to DynamoDB",
"account_id": role_ddb["accountId"],
"role_ddb": role_ddb,
"error": str(e),
}
log.error(log_data, exc_info=True)
raise
def fetch_all_roles(self):
response = self.role_table.scan()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = self.role_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
def cache_iam_resources_across_accounts(
run_subtasks: bool = True, wait_for_subtask_completion: bool = True
) -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
cache_keys = {
"iam_roles": {
"cache_key": config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE"),
"temp_cache_key": config.get(
"aws.iamroles_redis_key_temp", "IAM_ROLE_CACHE_TEMP"
),
},
"iam_users": {
"cache_key": config.get("aws.iamusers_redis_key", "IAM_USER_CACHE"),
"temp_cache_key": config.get(
"aws.iamusers_redis_key_temp", "IAM_USER_CACHE_TEMP"
),
},
"iam_groups": {
"cache_key": config.get("aws.iamgroups_redis_key", "IAM_GROUP_CACHE"),
"temp_cache_key": config.get(
"aws.iamgroups_redis_key_temp", "IAM_GROUP_CACHE_TEMP"
),
},
"iam_policies": {
"cache_key": config.get("aws.iampolicies_redis_key", "IAM_POLICY_CACHE"),
"temp_cache_key": config.get(
"aws.iampolicies_redis_key_temp", "IAM_POLICIES_CACHE_TEMP"
),
},
}
log_data = {"function": function, "cache_keys": cache_keys}
if is_task_already_running(function, []):
log_data["message"] = "Skipping task: An identical task is currently running"
log.debug(log_data)
return log_data
# Remove stale temporary cache keys to ensure we receive fresh results. Don't remove stale cache keys if we're
# running this as a part of `make redis` (`scripts/initialize_redis_oss.py`) because these cache keys are already
# populated appropriately
if run_subtasks and wait_for_subtask_completion:
for k, v in cache_keys.items():
temp_cache_key = v["temp_cache_key"]
red.delete(temp_cache_key)
accounts_d: Dict[str, str] = async_to_sync(get_account_id_to_name_mapping)()
tasks = []
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev"]:
# First, get list of accounts
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") in ["prod", "dev"]:
tasks.append(cache_iam_resources_for_account.s(account_id))
else:
log.debug(
{
**log_data,
"message": (
"`environment` configuration is not set. Only running tasks for accounts in configuration "
"key `celery.test_account_ids`"
),
}
)
if account_id in config.get("celery.test_account_ids", []):
tasks.append(cache_iam_resources_for_account.s(account_id))
if run_subtasks:
results = group(*tasks).apply_async()
if wait_for_subtask_completion:
# results.join() forces function to wait until all tasks are complete
results.join(disable_sync_subtasks=False)
else:
log.debug(
{
**log_data,
"message": (
"Running in non-active region. Caching roles from DynamoDB and not directly from AWS"
),
}
)
dynamo = IAMRoleDynamoHandler()
# In non-active regions, we just want to sync DDB data to Redis
roles = dynamo.fetch_all_roles()
for role_entry in roles:
_add_role_to_redis(cache_keys["iam_roles"]["cache_key"], role_entry)
# Delete roles in Redis cache with expired TTL
all_roles = red.hgetall(cache_keys["iam_roles"]["cache_key"])
roles_to_delete_from_cache = []
for arn, role_entry_j in all_roles.items():
role_entry = json.loads(role_entry_j)
if datetime.fromtimestamp(role_entry["ttl"]) < datetime.utcnow():
roles_to_delete_from_cache.append(arn)
if roles_to_delete_from_cache:
red.hdel(cache_keys["iam_roles"]["cache_key"], *roles_to_delete_from_cache)
for arn in roles_to_delete_from_cache:
all_roles.pop(arn, None)
log_data["num_iam_roles"] = len(all_roles)
# Store full list of roles in a single place. This list will be ~30 minutes out of date.
if all_roles:
async_to_sync(store_json_results_in_redis_and_s3)(
all_roles,
redis_key=cache_keys["iam_roles"]["cache_key"],
redis_data_type="hash",
s3_bucket=config.get(
"cache_iam_resources_across_accounts.all_roles_combined.s3.bucket"
),
s3_key=config.get(
"cache_iam_resources_across_accounts.all_roles_combined.s3.file",
"account_resource_cache/cache_all_roles_v1.json.gz",
),
)
all_iam_users = red.hgetall(cache_keys["iam_users"]["temp_cache_key"])
log_data["num_iam_users"] = len(all_iam_users)
if all_iam_users:
async_to_sync(store_json_results_in_redis_and_s3)(
all_iam_users,
redis_key=cache_keys["iam_users"]["cache_key"],
redis_data_type="hash",
s3_bucket=config.get(
"cache_iam_resources_across_accounts.all_users_combined.s3.bucket"
),
s3_key=config.get(
"cache_iam_resources_across_accounts.all_users_combined.s3.file",
"account_resource_cache/cache_all_users_v1.json.gz",
),
)
# IAM Groups
all_iam_groups = red.hgetall(cache_keys["iam_groups"]["temp_cache_key"])
log_data["num_iam_groups"] = len(all_iam_groups)
if all_iam_groups:
async_to_sync(store_json_results_in_redis_and_s3)(
all_iam_groups,
redis_key=cache_keys["iam_groups"]["cache_key"],
redis_data_type="hash",
s3_bucket=config.get(
"cache_iam_resources_across_accounts.all_groups_combined.s3.bucket"
),
s3_key=config.get(
"cache_iam_resources_across_accounts.all_groups_combined.s3.file",
"account_resource_cache/cache_all_groups_v1.json.gz",
),
)
# IAM Policies
all_iam_policies = red.hgetall(cache_keys["iam_policies"]["temp_cache_key"])
log_data["num_iam_policies"] = len(all_iam_groups)
if all_iam_policies:
async_to_sync(store_json_results_in_redis_and_s3)(
all_iam_policies,
redis_key=cache_keys["iam_policies"]["cache_key"],
redis_data_type="hash",
s3_bucket=config.get(
"cache_iam_resources_across_accounts.all_policies_combined.s3.bucket"
),
s3_key=config.get(
"cache_iam_resources_across_accounts.all_policies_combined.s3.file",
"account_resource_cache/cache_all_policies_v1.json.gz",
),
)
# Remove temporary cache keys that were populated by the `cache_iam_resources_for_account(account_id)` tasks
for k, v in cache_keys.items():
temp_cache_key = v["temp_cache_key"]
red.delete(temp_cache_key)
stats.count(f"{function}.success")
log_data["num_accounts"] = len(accounts_d)
log.debug(log_data)
return log_data | null |
162,213 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def cache_managed_policies_for_account(account_id: str) -> Dict[str, Union[str, int]]:
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
if config.get("celery.trigger_credential_mapping_refresh_from_role_changes.enabled"):
schedule["trigger_credential_mapping_refresh_from_role_changes"] = {
"task": "consoleme.celery_tasks.celery_tasks.trigger_credential_mapping_refresh_from_role_changes",
"options": {"expires": 300},
"schedule": schedule_minute,
}
if config.get("celery.cache_cloudtrail_denies.enabled"):
schedule["cache_cloudtrail_denies"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies",
"options": {"expires": 300},
"schedule": schedule_minute,
}
schedule["cache_cloudtrail_errors_by_arn"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
async def get_account_id_to_name_mapping(
status="active", environment=None, force_sync=False
):
def cache_managed_policies_across_accounts() -> bool:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
# First, get list of accounts
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") == "prod":
cache_managed_policies_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_managed_policies_for_account.delay(account_id)
stats.count(f"{function}.success")
return True | null |
162,214 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
log = config.get_logger()
red = RedisHandler().redis_sync()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def cache_s3_buckets_for_account(account_id: str) -> Dict[str, Union[str, int]]:
s3_buckets: List = list_buckets(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
read_only=True,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
buckets: List = []
for bucket in s3_buckets["Buckets"]:
buckets.append(bucket["Name"])
s3_bucket_key: str = config.get("redis.s3_buckets_key", "S3_BUCKETS")
red.hset(s3_bucket_key, account_id, json.dumps(buckets))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
"message": "Successfully cached S3 buckets for account",
"number_s3_buckets": len(buckets),
}
log.debug(log_data)
stats.count(
"cache_s3_buckets_for_account",
tags={"account_id": account_id, "number_s3_buckets": len(buckets)},
)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get(
"account_resource_cache.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="s3_buckets", account_id=account_id)
async_to_sync(store_json_results_in_redis_and_s3)(
buckets, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
stop_max_attempt_number=4,
wait_exponential_multiplier=1000,
wait_exponential_max=1000,
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
if config.get("celery.trigger_credential_mapping_refresh_from_role_changes.enabled"):
schedule["trigger_credential_mapping_refresh_from_role_changes"] = {
"task": "consoleme.celery_tasks.celery_tasks.trigger_credential_mapping_refresh_from_role_changes",
"options": {"expires": 300},
"schedule": schedule_minute,
}
if config.get("celery.cache_cloudtrail_denies.enabled"):
schedule["cache_cloudtrail_denies"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies",
"options": {"expires": 300},
"schedule": schedule_minute,
}
schedule["cache_cloudtrail_errors_by_arn"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
async def get_account_id_to_name_mapping(
status="active", environment=None, force_sync=False
):
redis_key = config.get(
"cache_cloud_accounts.redis.key.all_accounts_key", "ALL_AWS_ACCOUNTS"
)
accounts = await retrieve_json_data_from_redis_or_s3(redis_key, default={})
if force_sync or not accounts or not accounts.get("accounts"):
# Force a re-sync and then retry
await cache_cloud_accounts()
accounts = await retrieve_json_data_from_redis_or_s3(
redis_key,
s3_bucket=config.get("cache_cloud_accounts.s3.bucket"),
s3_key=config.get(
"cache_cloud_accounts.s3.file",
"cache_cloud_accounts/accounts_v1.json.gz",
),
default={},
)
account_id_to_name = {}
for account in accounts.get("accounts", []):
if status and account.get("status") != status:
continue
if environment and account.get("environment") != environment:
continue
account_id_to_name[account["id"]] = account["name"]
return account_id_to_name
async def store_json_results_in_redis_and_s3(
data: Union[
Dict[str, set],
Dict[str, str],
List[
Union[
Dict[str, Union[Union[str, int], Any]],
Dict[str, Union[Union[str, None, int], Any]],
]
],
str,
Dict[str, list],
],
redis_key: str = None,
redis_data_type: str = "str",
s3_bucket: str = None,
s3_key: str = None,
json_encoder=None,
s3_expires: int = None,
):
"""
Stores data in Redis and S3, depending on configuration
:param s3_expires: Epoch time integer for when the written S3 object should expire
:param redis_data_type: "str" or "hash", depending on how we're storing data in Redis
:param data: Python dictionary or list that will be encoded in JSON for storage
:param redis_key: Redis Key to store data to
:param s3_bucket: S3 bucket to store data
:param s3_key: S3 key to store data
:return:
"""
last_updated_redis_key = config.get(
"store_json_results_in_redis_and_s3.last_updated_redis_key",
"STORE_JSON_RESULTS_IN_REDIS_AND_S3_LAST_UPDATED",
)
function = f"{__name__}.{sys._getframe().f_code.co_name}"
last_updated = int(time.time())
stats.count(
f"{function}.called",
tags={"redis_key": redis_key, "s3_bucket": s3_bucket, "s3_key": s3_key},
)
# If we've defined an S3 key, but not a bucket, let's use the default bucket if it's defined in configuration.
if s3_key and not s3_bucket:
s3_bucket = config.get("consoleme_s3_bucket")
if redis_key:
if redis_data_type == "str":
if isinstance(data, str):
red.set(redis_key, data)
else:
red.set(
redis_key, json.dumps(data, cls=SetEncoder, default=json_encoder)
)
elif redis_data_type == "hash":
if data:
red.hmset(redis_key, data)
else:
raise UnsupportedRedisDataType("Unsupported redis_data_type passed")
red.hset(last_updated_redis_key, redis_key, last_updated)
if s3_bucket and s3_key:
s3_extra_kwargs = {}
if isinstance(s3_expires, int):
s3_extra_kwargs["Expires"] = datetime.utcfromtimestamp(s3_expires)
data_for_s3 = json.dumps(
{"last_updated": last_updated, "data": data},
cls=SetEncoder,
default=json_encoder,
indent=2,
).encode()
if s3_key.endswith(".gz"):
data_for_s3 = gzip.compress(data_for_s3)
put_object(Bucket=s3_bucket, Key=s3_key, Body=data_for_s3, **s3_extra_kwargs)
async def retrieve_json_data_from_redis_or_s3(
redis_key: str = None,
redis_data_type: str = "str",
s3_bucket: str = None,
s3_key: str = None,
cache_to_redis_if_data_in_s3: bool = True,
max_age: Optional[int] = None,
default: Optional = None,
json_object_hook: Optional = None,
json_encoder: Optional = None,
):
"""
Retrieve data from Redis as a priority. If data is unavailable in Redis, fall back to S3 and attempt to store
data in Redis for quicker retrieval later.
:param redis_data_type: "str" or "hash", depending on how the data is stored in Redis
:param redis_key: Redis Key to retrieve data from
:param s3_bucket: S3 bucket to retrieve data from
:param s3_key: S3 key to retrieve data from
:param cache_to_redis_if_data_in_s3: Cache the data in Redis if the data is in S3 but not Redis
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
last_updated_redis_key = config.get(
"store_json_results_in_redis_and_s3.last_updated_redis_key",
"STORE_JSON_RESULTS_IN_REDIS_AND_S3_LAST_UPDATED",
)
stats.count(
f"{function}.called",
tags={"redis_key": redis_key, "s3_bucket": s3_bucket, "s3_key": s3_key},
)
# If we've defined an S3 key, but not a bucket, let's use the default bucket if it's defined in configuration.
if s3_key and not s3_bucket:
s3_bucket = config.get("consoleme_s3_bucket")
data = None
if redis_key:
if redis_data_type == "str":
data_s = red.get(redis_key)
if data_s:
data = json.loads(data_s, object_hook=json_object_hook)
elif redis_data_type == "hash":
data = red.hgetall(redis_key)
else:
raise UnsupportedRedisDataType("Unsupported redis_data_type passed")
if data and max_age:
current_time = int(time.time())
last_updated = int(red.hget(last_updated_redis_key, redis_key))
if current_time - last_updated > max_age:
data = None
# Fall back to S3 if expired.
if not s3_bucket or not s3_key:
raise ExpiredData(f"Data in Redis is older than {max_age} seconds.")
# Fall back to S3 if there's no data
if not data and s3_bucket and s3_key:
try:
s3_object = get_object(Bucket=s3_bucket, Key=s3_key)
except ClientError as e:
if str(e) == (
"An error occurred (NoSuchKey) when calling the GetObject operation: "
"The specified key does not exist."
):
if default is not None:
return default
raise
s3_object_content = await sync_to_async(s3_object["Body"].read)()
if s3_key.endswith(".gz"):
s3_object_content = gzip.decompress(s3_object_content)
data_object = json.loads(s3_object_content, object_hook=json_object_hook)
data = data_object["data"]
if data and max_age:
current_time = int(time.time())
last_updated = data_object["last_updated"]
if current_time - last_updated > max_age:
raise ExpiredData(f"Data in S3 is older than {max_age} seconds.")
if redis_key and cache_to_redis_if_data_in_s3:
await store_json_results_in_redis_and_s3(
data,
redis_key=redis_key,
redis_data_type=redis_data_type,
json_encoder=json_encoder,
)
if data is not None:
return data
if default is not None:
return default
raise DataNotRetrievable("Unable to retrieve expected data.")
def cache_s3_buckets_across_accounts(
run_subtasks: bool = True, wait_for_subtask_completion: bool = True
) -> Dict[str, Any]:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
s3_bucket_redis_key: str = config.get("redis.s3_buckets_key", "S3_BUCKETS")
s3_bucket = config.get("account_resource_cache.s3_combined.bucket")
s3_key = config.get(
"account_resource_cache.s3_combined.file",
"account_resource_cache/cache_s3_combined_v1.json.gz",
)
accounts_d: Dict[str, str] = async_to_sync(get_account_id_to_name_mapping)()
log_data = {
"function": function,
"num_accounts": len(accounts_d.keys()),
"run_subtasks": run_subtasks,
"wait_for_subtask_completion": wait_for_subtask_completion,
}
tasks = []
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev"]:
# Call tasks to enumerate all S3 buckets across all accounts
for account_id in accounts_d.keys():
if config.get("environment") in ["prod", "dev"]:
tasks.append(cache_s3_buckets_for_account.s(account_id))
else:
if account_id in config.get("celery.test_account_ids", []):
tasks.append(cache_s3_buckets_for_account.s(account_id))
log_data["num_tasks"] = len(tasks)
if tasks and run_subtasks:
results = group(*tasks).apply_async()
if wait_for_subtask_completion:
# results.join() forces function to wait until all tasks are complete
results.join(disable_sync_subtasks=False)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
all_buckets = red.hgetall(s3_bucket_redis_key)
async_to_sync(store_json_results_in_redis_and_s3)(
all_buckets, s3_bucket=s3_bucket, s3_key=s3_key
)
else:
redis_result_set = async_to_sync(retrieve_json_data_from_redis_or_s3)(
s3_bucket=s3_bucket, s3_key=s3_key
)
async_to_sync(store_json_results_in_redis_and_s3)(
redis_result_set,
redis_key=s3_bucket_redis_key,
redis_data_type="hash",
)
log.debug(
{**log_data, "message": "Successfully cached s3 buckets across known accounts"}
)
stats.count(f"{function}.success")
return log_data | null |
162,215 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
log = config.get_logger()
red = RedisHandler().redis_sync()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def cache_sqs_queues_for_account(account_id: str) -> Dict[str, Union[str, int]]:
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
}
all_queues: set = set()
enabled_regions = async_to_sync(get_enabled_regions_for_account)(account_id)
for region in enabled_regions:
try:
client = boto3_cached_conn(
"sqs",
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=region,
read_only=True,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=config.get(
"aws.sts_endpoint_url", "https://sts.{region}.amazonaws.com"
).format(region=config.region),
),
client_kwargs=config.get("boto3.client_kwargs", {}),
)
paginator = client.get_paginator("list_queues")
response_iterator = paginator.paginate(PaginationConfig={"PageSize": 1000})
for res in response_iterator:
for queue in res.get("QueueUrls", []):
arn = f"arn:aws:sqs:{region}:{account_id}:{queue.split('/')[4]}"
all_queues.add(arn)
except Exception as e:
log.error(
{
**log_data,
"region": region,
"message": "Unable to sync SQS queues from region",
"error": str(e),
}
)
sentry_sdk.capture_exception()
sqs_queue_key: str = config.get("redis.sqs_queues_key", "SQS_QUEUES")
red.hset(sqs_queue_key, account_id, json.dumps(list(all_queues)))
log_data["message"] = "Successfully cached SQS queues for account"
log_data["number_sqs_queues"] = len(all_queues)
log.debug(log_data)
stats.count(
"cache_sqs_queues_for_account",
tags={"account_id": account_id, "number_sqs_queues": len(all_queues)},
)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.sqs.bucket")
s3_key = config.get(
"account_resource_cache.{resource_type}.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="sqs_queues", account_id=account_id)
async_to_sync(store_json_results_in_redis_and_s3)(
all_queues, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
if config.get("celery.trigger_credential_mapping_refresh_from_role_changes.enabled"):
schedule["trigger_credential_mapping_refresh_from_role_changes"] = {
"task": "consoleme.celery_tasks.celery_tasks.trigger_credential_mapping_refresh_from_role_changes",
"options": {"expires": 300},
"schedule": schedule_minute,
}
if config.get("celery.cache_cloudtrail_denies.enabled"):
schedule["cache_cloudtrail_denies"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies",
"options": {"expires": 300},
"schedule": schedule_minute,
}
schedule["cache_cloudtrail_errors_by_arn"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
async def get_account_id_to_name_mapping(
status="active", environment=None, force_sync=False
):
redis_key = config.get(
"cache_cloud_accounts.redis.key.all_accounts_key", "ALL_AWS_ACCOUNTS"
)
accounts = await retrieve_json_data_from_redis_or_s3(redis_key, default={})
if force_sync or not accounts or not accounts.get("accounts"):
# Force a re-sync and then retry
await cache_cloud_accounts()
accounts = await retrieve_json_data_from_redis_or_s3(
redis_key,
s3_bucket=config.get("cache_cloud_accounts.s3.bucket"),
s3_key=config.get(
"cache_cloud_accounts.s3.file",
"cache_cloud_accounts/accounts_v1.json.gz",
),
default={},
)
account_id_to_name = {}
for account in accounts.get("accounts", []):
if status and account.get("status") != status:
continue
if environment and account.get("environment") != environment:
continue
account_id_to_name[account["id"]] = account["name"]
return account_id_to_name
async def store_json_results_in_redis_and_s3(
data: Union[
Dict[str, set],
Dict[str, str],
List[
Union[
Dict[str, Union[Union[str, int], Any]],
Dict[str, Union[Union[str, None, int], Any]],
]
],
str,
Dict[str, list],
],
redis_key: str = None,
redis_data_type: str = "str",
s3_bucket: str = None,
s3_key: str = None,
json_encoder=None,
s3_expires: int = None,
):
"""
Stores data in Redis and S3, depending on configuration
:param s3_expires: Epoch time integer for when the written S3 object should expire
:param redis_data_type: "str" or "hash", depending on how we're storing data in Redis
:param data: Python dictionary or list that will be encoded in JSON for storage
:param redis_key: Redis Key to store data to
:param s3_bucket: S3 bucket to store data
:param s3_key: S3 key to store data
:return:
"""
last_updated_redis_key = config.get(
"store_json_results_in_redis_and_s3.last_updated_redis_key",
"STORE_JSON_RESULTS_IN_REDIS_AND_S3_LAST_UPDATED",
)
function = f"{__name__}.{sys._getframe().f_code.co_name}"
last_updated = int(time.time())
stats.count(
f"{function}.called",
tags={"redis_key": redis_key, "s3_bucket": s3_bucket, "s3_key": s3_key},
)
# If we've defined an S3 key, but not a bucket, let's use the default bucket if it's defined in configuration.
if s3_key and not s3_bucket:
s3_bucket = config.get("consoleme_s3_bucket")
if redis_key:
if redis_data_type == "str":
if isinstance(data, str):
red.set(redis_key, data)
else:
red.set(
redis_key, json.dumps(data, cls=SetEncoder, default=json_encoder)
)
elif redis_data_type == "hash":
if data:
red.hmset(redis_key, data)
else:
raise UnsupportedRedisDataType("Unsupported redis_data_type passed")
red.hset(last_updated_redis_key, redis_key, last_updated)
if s3_bucket and s3_key:
s3_extra_kwargs = {}
if isinstance(s3_expires, int):
s3_extra_kwargs["Expires"] = datetime.utcfromtimestamp(s3_expires)
data_for_s3 = json.dumps(
{"last_updated": last_updated, "data": data},
cls=SetEncoder,
default=json_encoder,
indent=2,
).encode()
if s3_key.endswith(".gz"):
data_for_s3 = gzip.compress(data_for_s3)
put_object(Bucket=s3_bucket, Key=s3_key, Body=data_for_s3, **s3_extra_kwargs)
async def retrieve_json_data_from_redis_or_s3(
redis_key: str = None,
redis_data_type: str = "str",
s3_bucket: str = None,
s3_key: str = None,
cache_to_redis_if_data_in_s3: bool = True,
max_age: Optional[int] = None,
default: Optional = None,
json_object_hook: Optional = None,
json_encoder: Optional = None,
):
"""
Retrieve data from Redis as a priority. If data is unavailable in Redis, fall back to S3 and attempt to store
data in Redis for quicker retrieval later.
:param redis_data_type: "str" or "hash", depending on how the data is stored in Redis
:param redis_key: Redis Key to retrieve data from
:param s3_bucket: S3 bucket to retrieve data from
:param s3_key: S3 key to retrieve data from
:param cache_to_redis_if_data_in_s3: Cache the data in Redis if the data is in S3 but not Redis
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
last_updated_redis_key = config.get(
"store_json_results_in_redis_and_s3.last_updated_redis_key",
"STORE_JSON_RESULTS_IN_REDIS_AND_S3_LAST_UPDATED",
)
stats.count(
f"{function}.called",
tags={"redis_key": redis_key, "s3_bucket": s3_bucket, "s3_key": s3_key},
)
# If we've defined an S3 key, but not a bucket, let's use the default bucket if it's defined in configuration.
if s3_key and not s3_bucket:
s3_bucket = config.get("consoleme_s3_bucket")
data = None
if redis_key:
if redis_data_type == "str":
data_s = red.get(redis_key)
if data_s:
data = json.loads(data_s, object_hook=json_object_hook)
elif redis_data_type == "hash":
data = red.hgetall(redis_key)
else:
raise UnsupportedRedisDataType("Unsupported redis_data_type passed")
if data and max_age:
current_time = int(time.time())
last_updated = int(red.hget(last_updated_redis_key, redis_key))
if current_time - last_updated > max_age:
data = None
# Fall back to S3 if expired.
if not s3_bucket or not s3_key:
raise ExpiredData(f"Data in Redis is older than {max_age} seconds.")
# Fall back to S3 if there's no data
if not data and s3_bucket and s3_key:
try:
s3_object = get_object(Bucket=s3_bucket, Key=s3_key)
except ClientError as e:
if str(e) == (
"An error occurred (NoSuchKey) when calling the GetObject operation: "
"The specified key does not exist."
):
if default is not None:
return default
raise
s3_object_content = await sync_to_async(s3_object["Body"].read)()
if s3_key.endswith(".gz"):
s3_object_content = gzip.decompress(s3_object_content)
data_object = json.loads(s3_object_content, object_hook=json_object_hook)
data = data_object["data"]
if data and max_age:
current_time = int(time.time())
last_updated = data_object["last_updated"]
if current_time - last_updated > max_age:
raise ExpiredData(f"Data in S3 is older than {max_age} seconds.")
if redis_key and cache_to_redis_if_data_in_s3:
await store_json_results_in_redis_and_s3(
data,
redis_key=redis_key,
redis_data_type=redis_data_type,
json_encoder=json_encoder,
)
if data is not None:
return data
if default is not None:
return default
raise DataNotRetrievable("Unable to retrieve expected data.")
def cache_sqs_queues_across_accounts(
run_subtasks: bool = True, wait_for_subtask_completion: bool = True
) -> Dict[str, Any]:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
sqs_queue_redis_key: str = config.get("redis.sqs_queues_key", "SQS_QUEUES")
s3_bucket = config.get("account_resource_cache.sqs_combined.bucket")
s3_key = config.get(
"account_resource_cache.sqs_combined.file",
"account_resource_cache/cache_sqs_queues_combined_v1.json.gz",
)
accounts_d: Dict[str, str] = async_to_sync(get_account_id_to_name_mapping)()
log_data = {
"function": function,
"num_accounts": len(accounts_d.keys()),
"run_subtasks": run_subtasks,
"wait_for_subtask_completion": wait_for_subtask_completion,
}
tasks = []
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev"]:
for account_id in accounts_d.keys():
if config.get("environment") in ["prod", "dev"]:
tasks.append(cache_sqs_queues_for_account.s(account_id))
else:
if account_id in config.get("celery.test_account_ids", []):
tasks.append(cache_sqs_queues_for_account.s(account_id))
log_data["num_tasks"] = len(tasks)
if tasks and run_subtasks:
results = group(*tasks).apply_async()
if wait_for_subtask_completion:
# results.join() forces function to wait until all tasks are complete
results.join(disable_sync_subtasks=False)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
all_queues = red.hgetall(sqs_queue_redis_key)
async_to_sync(store_json_results_in_redis_and_s3)(
all_queues, s3_bucket=s3_bucket, s3_key=s3_key
)
else:
redis_result_set = async_to_sync(retrieve_json_data_from_redis_or_s3)(
s3_bucket=s3_bucket, s3_key=s3_key
)
async_to_sync(store_json_results_in_redis_and_s3)(
redis_result_set,
redis_key=sqs_queue_redis_key,
redis_data_type="hash",
)
log.debug(
{**log_data, "message": "Successfully cached SQS queues across known accounts"}
)
stats.count(f"{function}.success")
return log_data | null |
162,216 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
log = config.get_logger()
red = RedisHandler().redis_sync()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def cache_sns_topics_for_account(account_id: str) -> Dict[str, Union[str, int]]:
# Make sure it is regional
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
}
all_topics: set = set()
enabled_regions = async_to_sync(get_enabled_regions_for_account)(account_id)
for region in enabled_regions:
try:
topics = list_topics(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=region,
read_only=True,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=config.get(
"aws.sts_endpoint_url", "https://sts.{region}.amazonaws.com"
).format(region=config.region),
),
client_kwargs=config.get("boto3.client_kwargs", {}),
)
for topic in topics:
all_topics.add(topic["TopicArn"])
except Exception as e:
log.error(
{
**log_data,
"region": region,
"message": "Unable to sync SNS topics from region",
"error": str(e),
}
)
sentry_sdk.capture_exception()
sns_topic_key: str = config.get("redis.sns_topics_key", "SNS_TOPICS")
red.hset(sns_topic_key, account_id, json.dumps(list(all_topics)))
log_data["message"] = "Successfully cached SNS topics for account"
log_data["number_sns_topics"] = len(all_topics)
log.debug(log_data)
stats.count(
"cache_sns_topics_for_account",
tags={"account_id": account_id, "number_sns_topics": len(all_topics)},
)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get(
"account_resource_cache.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="sns_topics", account_id=account_id)
async_to_sync(store_json_results_in_redis_and_s3)(
all_topics, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
if config.get("celery.trigger_credential_mapping_refresh_from_role_changes.enabled"):
schedule["trigger_credential_mapping_refresh_from_role_changes"] = {
"task": "consoleme.celery_tasks.celery_tasks.trigger_credential_mapping_refresh_from_role_changes",
"options": {"expires": 300},
"schedule": schedule_minute,
}
if config.get("celery.cache_cloudtrail_denies.enabled"):
schedule["cache_cloudtrail_denies"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies",
"options": {"expires": 300},
"schedule": schedule_minute,
}
schedule["cache_cloudtrail_errors_by_arn"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
async def get_account_id_to_name_mapping(
status="active", environment=None, force_sync=False
):
redis_key = config.get(
"cache_cloud_accounts.redis.key.all_accounts_key", "ALL_AWS_ACCOUNTS"
)
accounts = await retrieve_json_data_from_redis_or_s3(redis_key, default={})
if force_sync or not accounts or not accounts.get("accounts"):
# Force a re-sync and then retry
await cache_cloud_accounts()
accounts = await retrieve_json_data_from_redis_or_s3(
redis_key,
s3_bucket=config.get("cache_cloud_accounts.s3.bucket"),
s3_key=config.get(
"cache_cloud_accounts.s3.file",
"cache_cloud_accounts/accounts_v1.json.gz",
),
default={},
)
account_id_to_name = {}
for account in accounts.get("accounts", []):
if status and account.get("status") != status:
continue
if environment and account.get("environment") != environment:
continue
account_id_to_name[account["id"]] = account["name"]
return account_id_to_name
async def store_json_results_in_redis_and_s3(
data: Union[
Dict[str, set],
Dict[str, str],
List[
Union[
Dict[str, Union[Union[str, int], Any]],
Dict[str, Union[Union[str, None, int], Any]],
]
],
str,
Dict[str, list],
],
redis_key: str = None,
redis_data_type: str = "str",
s3_bucket: str = None,
s3_key: str = None,
json_encoder=None,
s3_expires: int = None,
):
"""
Stores data in Redis and S3, depending on configuration
:param s3_expires: Epoch time integer for when the written S3 object should expire
:param redis_data_type: "str" or "hash", depending on how we're storing data in Redis
:param data: Python dictionary or list that will be encoded in JSON for storage
:param redis_key: Redis Key to store data to
:param s3_bucket: S3 bucket to store data
:param s3_key: S3 key to store data
:return:
"""
last_updated_redis_key = config.get(
"store_json_results_in_redis_and_s3.last_updated_redis_key",
"STORE_JSON_RESULTS_IN_REDIS_AND_S3_LAST_UPDATED",
)
function = f"{__name__}.{sys._getframe().f_code.co_name}"
last_updated = int(time.time())
stats.count(
f"{function}.called",
tags={"redis_key": redis_key, "s3_bucket": s3_bucket, "s3_key": s3_key},
)
# If we've defined an S3 key, but not a bucket, let's use the default bucket if it's defined in configuration.
if s3_key and not s3_bucket:
s3_bucket = config.get("consoleme_s3_bucket")
if redis_key:
if redis_data_type == "str":
if isinstance(data, str):
red.set(redis_key, data)
else:
red.set(
redis_key, json.dumps(data, cls=SetEncoder, default=json_encoder)
)
elif redis_data_type == "hash":
if data:
red.hmset(redis_key, data)
else:
raise UnsupportedRedisDataType("Unsupported redis_data_type passed")
red.hset(last_updated_redis_key, redis_key, last_updated)
if s3_bucket and s3_key:
s3_extra_kwargs = {}
if isinstance(s3_expires, int):
s3_extra_kwargs["Expires"] = datetime.utcfromtimestamp(s3_expires)
data_for_s3 = json.dumps(
{"last_updated": last_updated, "data": data},
cls=SetEncoder,
default=json_encoder,
indent=2,
).encode()
if s3_key.endswith(".gz"):
data_for_s3 = gzip.compress(data_for_s3)
put_object(Bucket=s3_bucket, Key=s3_key, Body=data_for_s3, **s3_extra_kwargs)
async def retrieve_json_data_from_redis_or_s3(
redis_key: str = None,
redis_data_type: str = "str",
s3_bucket: str = None,
s3_key: str = None,
cache_to_redis_if_data_in_s3: bool = True,
max_age: Optional[int] = None,
default: Optional = None,
json_object_hook: Optional = None,
json_encoder: Optional = None,
):
"""
Retrieve data from Redis as a priority. If data is unavailable in Redis, fall back to S3 and attempt to store
data in Redis for quicker retrieval later.
:param redis_data_type: "str" or "hash", depending on how the data is stored in Redis
:param redis_key: Redis Key to retrieve data from
:param s3_bucket: S3 bucket to retrieve data from
:param s3_key: S3 key to retrieve data from
:param cache_to_redis_if_data_in_s3: Cache the data in Redis if the data is in S3 but not Redis
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
last_updated_redis_key = config.get(
"store_json_results_in_redis_and_s3.last_updated_redis_key",
"STORE_JSON_RESULTS_IN_REDIS_AND_S3_LAST_UPDATED",
)
stats.count(
f"{function}.called",
tags={"redis_key": redis_key, "s3_bucket": s3_bucket, "s3_key": s3_key},
)
# If we've defined an S3 key, but not a bucket, let's use the default bucket if it's defined in configuration.
if s3_key and not s3_bucket:
s3_bucket = config.get("consoleme_s3_bucket")
data = None
if redis_key:
if redis_data_type == "str":
data_s = red.get(redis_key)
if data_s:
data = json.loads(data_s, object_hook=json_object_hook)
elif redis_data_type == "hash":
data = red.hgetall(redis_key)
else:
raise UnsupportedRedisDataType("Unsupported redis_data_type passed")
if data and max_age:
current_time = int(time.time())
last_updated = int(red.hget(last_updated_redis_key, redis_key))
if current_time - last_updated > max_age:
data = None
# Fall back to S3 if expired.
if not s3_bucket or not s3_key:
raise ExpiredData(f"Data in Redis is older than {max_age} seconds.")
# Fall back to S3 if there's no data
if not data and s3_bucket and s3_key:
try:
s3_object = get_object(Bucket=s3_bucket, Key=s3_key)
except ClientError as e:
if str(e) == (
"An error occurred (NoSuchKey) when calling the GetObject operation: "
"The specified key does not exist."
):
if default is not None:
return default
raise
s3_object_content = await sync_to_async(s3_object["Body"].read)()
if s3_key.endswith(".gz"):
s3_object_content = gzip.decompress(s3_object_content)
data_object = json.loads(s3_object_content, object_hook=json_object_hook)
data = data_object["data"]
if data and max_age:
current_time = int(time.time())
last_updated = data_object["last_updated"]
if current_time - last_updated > max_age:
raise ExpiredData(f"Data in S3 is older than {max_age} seconds.")
if redis_key and cache_to_redis_if_data_in_s3:
await store_json_results_in_redis_and_s3(
data,
redis_key=redis_key,
redis_data_type=redis_data_type,
json_encoder=json_encoder,
)
if data is not None:
return data
if default is not None:
return default
raise DataNotRetrievable("Unable to retrieve expected data.")
def cache_sns_topics_across_accounts(
run_subtasks: bool = True, wait_for_subtask_completion: bool = True
) -> Dict[str, Any]:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
sns_topic_redis_key: str = config.get("redis.sns_topics_key", "SNS_TOPICS")
s3_bucket = config.get("account_resource_cache.sns_topics_combined.bucket")
s3_key = config.get(
"account_resource_cache.{resource_type}_topics_combined.file",
"account_resource_cache/cache_{resource_type}_combined_v1.json.gz",
).format(resource_type="sns_topics")
# First, get list of accounts
accounts_d: Dict[str, str] = async_to_sync(get_account_id_to_name_mapping)()
log_data = {
"function": function,
"num_accounts": len(accounts_d.keys()),
"run_subtasks": run_subtasks,
"wait_for_subtask_completion": wait_for_subtask_completion,
}
tasks = []
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev"]:
for account_id in accounts_d.keys():
if config.get("environment") in ["prod", "dev"]:
tasks.append(cache_sns_topics_for_account.s(account_id))
else:
if account_id in config.get("celery.test_account_ids", []):
tasks.append(cache_sns_topics_for_account.s(account_id))
log_data["num_tasks"] = len(tasks)
if tasks and run_subtasks:
results = group(*tasks).apply_async()
if wait_for_subtask_completion:
# results.join() forces function to wait until all tasks are complete
results.join(disable_sync_subtasks=False)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
all_topics = red.hgetall(sns_topic_redis_key)
async_to_sync(store_json_results_in_redis_and_s3)(
all_topics, s3_bucket=s3_bucket, s3_key=s3_key
)
else:
redis_result_set = async_to_sync(retrieve_json_data_from_redis_or_s3)(
s3_bucket=s3_bucket, s3_key=s3_key
)
async_to_sync(store_json_results_in_redis_and_s3)(
redis_result_set,
redis_key=sns_topic_redis_key,
redis_data_type="hash",
)
log.debug(
{**log_data, "message": "Successfully cached SNS topics across known accounts"}
)
stats.count(f"{function}.success")
return log_data | null |
162,217 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
log = config.get_logger()
red = RedisHandler().redis_sync()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
REDIS_IAM_COUNT = 1000
def _scan_redis_iam_cache(
cache_key: str, index: int, count: int
) -> Tuple[int, Dict[str, str]]:
return red.hscan(cache_key, index, count=count)
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
if config.get("celery.trigger_credential_mapping_refresh_from_role_changes.enabled"):
schedule["trigger_credential_mapping_refresh_from_role_changes"] = {
"task": "consoleme.celery_tasks.celery_tasks.trigger_credential_mapping_refresh_from_role_changes",
"options": {"expires": 300},
"schedule": schedule_minute,
}
if config.get("celery.cache_cloudtrail_denies.enabled"):
schedule["cache_cloudtrail_denies"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies",
"options": {"expires": 300},
"schedule": schedule_minute,
}
schedule["cache_cloudtrail_errors_by_arn"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
def clear_old_redis_iam_cache() -> bool:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
# Do not run if this is not in the active region:
if config.region != config.get("celery.active_region", config.region):
return False
# Need to loop over all items in the set:
cache_key: str = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
index: int = 0
expire_ttl: int = int((datetime.utcnow() - timedelta(hours=6)).timestamp())
roles_to_expire = []
# We will loop over REDIS_IAM_COUNT items at a time:
try:
while True:
results = _scan_redis_iam_cache(cache_key, index, REDIS_IAM_COUNT)
index = results[0]
# Verify if the role is too old:
for arn, role in results[1].items():
role = json.loads(role)
if role["ttl"] <= expire_ttl:
roles_to_expire.append(arn)
# We will be complete if the next index is 0:
if not index:
break
except: # noqa
log_data = {
"function": function,
"message": "Error retrieving roles from Redis for cache cleanup.",
}
log.error(log_data, exc_info=True)
raise
# Delete all the roles that we need to delete:
try:
if roles_to_expire:
red.hdel(cache_key, *roles_to_expire)
except: # noqa
log_data = {
"function": function,
"message": "Error deleting roles from Redis for cache cleanup.",
}
log.error(log_data, exc_info=True)
raise
stats.count(f"{function}.success", tags={"expired_roles": len(roles_to_expire)})
return True | null |
162,218 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
log = config.get_logger()
red = RedisHandler().redis_sync()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
def cache_resources_from_aws_config_for_account(account_id) -> dict:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {
"function": function,
"account_id": account_id,
}
if not config.get(
"celery.cache_resources_from_aws_config_across_accounts.enabled",
config.get(
f"celery.cache_resources_from_aws_config_for_account.{account_id}.enabled",
True,
),
):
log_data[
"message"
] = "Skipping task: Caching resources from AWS Config is disabled."
log.debug(log_data)
return log_data
s3_bucket = config.get("aws_config_cache.s3.bucket")
s3_key = config.get(
"aws_config_cache.s3.file", "aws_config_cache/cache_{account_id}_v1.json.gz"
).format(account_id=account_id)
# Only query in active region, otherwise get data from DDB
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
results = aws_config.query(
config.get(
"cache_all_resources_from_aws_config.aws_config.all_resources_query",
"select * where accountId = '{account_id}'",
).format(account_id=account_id),
use_aggregator=False,
account_id=account_id,
)
ttl: int = int((datetime.utcnow() + timedelta(hours=36)).timestamp())
redis_result_set = {}
for result in results:
result["ttl"] = ttl
if result.get("arn"):
if redis_result_set.get(result["arn"]):
continue
redis_result_set[result["arn"]] = json.dumps(result)
if redis_result_set:
async_to_sync(store_json_results_in_redis_and_s3)(
un_wrap_json_and_dump_values(redis_result_set),
redis_key=config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
),
redis_data_type="hash",
s3_bucket=s3_bucket,
s3_key=s3_key,
)
if config.get(
"celery.cache_resources_from_aws_config_across_accounts.dynamo_enabled",
True,
):
dynamo = UserDynamoHandler()
dynamo.write_resource_cache_data(results)
else:
redis_result_set = async_to_sync(retrieve_json_data_from_redis_or_s3)(
s3_bucket=s3_bucket, s3_key=s3_key
)
async_to_sync(store_json_results_in_redis_and_s3)(
redis_result_set,
redis_key=config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
),
redis_data_type="hash",
)
log_data["message"] = "Successfully cached resources from AWS Config for account"
log_data["number_resources_synced"] = len(redis_result_set)
log.debug(log_data)
return log_data
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
if config.get("celery.trigger_credential_mapping_refresh_from_role_changes.enabled"):
schedule["trigger_credential_mapping_refresh_from_role_changes"] = {
"task": "consoleme.celery_tasks.celery_tasks.trigger_credential_mapping_refresh_from_role_changes",
"options": {"expires": 300},
"schedule": schedule_minute,
}
if config.get("celery.cache_cloudtrail_denies.enabled"):
schedule["cache_cloudtrail_denies"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies",
"options": {"expires": 300},
"schedule": schedule_minute,
}
schedule["cache_cloudtrail_errors_by_arn"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
async def get_account_id_to_name_mapping(
status="active", environment=None, force_sync=False
):
redis_key = config.get(
"cache_cloud_accounts.redis.key.all_accounts_key", "ALL_AWS_ACCOUNTS"
)
accounts = await retrieve_json_data_from_redis_or_s3(redis_key, default={})
if force_sync or not accounts or not accounts.get("accounts"):
# Force a re-sync and then retry
await cache_cloud_accounts()
accounts = await retrieve_json_data_from_redis_or_s3(
redis_key,
s3_bucket=config.get("cache_cloud_accounts.s3.bucket"),
s3_key=config.get(
"cache_cloud_accounts.s3.file",
"cache_cloud_accounts/accounts_v1.json.gz",
),
default={},
)
account_id_to_name = {}
for account in accounts.get("accounts", []):
if status and account.get("status") != status:
continue
if environment and account.get("environment") != environment:
continue
account_id_to_name[account["id"]] = account["name"]
return account_id_to_name
async def store_json_results_in_redis_and_s3(
data: Union[
Dict[str, set],
Dict[str, str],
List[
Union[
Dict[str, Union[Union[str, int], Any]],
Dict[str, Union[Union[str, None, int], Any]],
]
],
str,
Dict[str, list],
],
redis_key: str = None,
redis_data_type: str = "str",
s3_bucket: str = None,
s3_key: str = None,
json_encoder=None,
s3_expires: int = None,
):
"""
Stores data in Redis and S3, depending on configuration
:param s3_expires: Epoch time integer for when the written S3 object should expire
:param redis_data_type: "str" or "hash", depending on how we're storing data in Redis
:param data: Python dictionary or list that will be encoded in JSON for storage
:param redis_key: Redis Key to store data to
:param s3_bucket: S3 bucket to store data
:param s3_key: S3 key to store data
:return:
"""
last_updated_redis_key = config.get(
"store_json_results_in_redis_and_s3.last_updated_redis_key",
"STORE_JSON_RESULTS_IN_REDIS_AND_S3_LAST_UPDATED",
)
function = f"{__name__}.{sys._getframe().f_code.co_name}"
last_updated = int(time.time())
stats.count(
f"{function}.called",
tags={"redis_key": redis_key, "s3_bucket": s3_bucket, "s3_key": s3_key},
)
# If we've defined an S3 key, but not a bucket, let's use the default bucket if it's defined in configuration.
if s3_key and not s3_bucket:
s3_bucket = config.get("consoleme_s3_bucket")
if redis_key:
if redis_data_type == "str":
if isinstance(data, str):
red.set(redis_key, data)
else:
red.set(
redis_key, json.dumps(data, cls=SetEncoder, default=json_encoder)
)
elif redis_data_type == "hash":
if data:
red.hmset(redis_key, data)
else:
raise UnsupportedRedisDataType("Unsupported redis_data_type passed")
red.hset(last_updated_redis_key, redis_key, last_updated)
if s3_bucket and s3_key:
s3_extra_kwargs = {}
if isinstance(s3_expires, int):
s3_extra_kwargs["Expires"] = datetime.utcfromtimestamp(s3_expires)
data_for_s3 = json.dumps(
{"last_updated": last_updated, "data": data},
cls=SetEncoder,
default=json_encoder,
indent=2,
).encode()
if s3_key.endswith(".gz"):
data_for_s3 = gzip.compress(data_for_s3)
put_object(Bucket=s3_bucket, Key=s3_key, Body=data_for_s3, **s3_extra_kwargs)
def cache_resources_from_aws_config_across_accounts(
run_subtasks: bool = True,
wait_for_subtask_completion: bool = True,
) -> Dict[str, Union[Union[str, int], Any]]:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
resource_redis_cache_key = config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
)
log_data = {
"function": function,
"resource_redis_cache_key": resource_redis_cache_key,
}
if not config.get(
"celery.cache_resources_from_aws_config_across_accounts.enabled", True
):
log_data[
"message"
] = "Skipping task: Caching resources from AWS Config is disabled."
log.debug(log_data)
return log_data
tasks = []
# First, get list of accounts
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") in ["prod", "dev"]:
tasks.append(cache_resources_from_aws_config_for_account.s(account_id))
else:
if account_id in config.get("celery.test_account_ids", []):
tasks.append(cache_resources_from_aws_config_for_account.s(account_id))
if tasks:
if run_subtasks:
results = group(*tasks).apply_async()
if wait_for_subtask_completion:
# results.join() forces function to wait until all tasks are complete
results.join(disable_sync_subtasks=False)
# Delete roles in Redis cache with expired TTL
all_resources = red.hgetall(resource_redis_cache_key)
if all_resources:
expired_arns = []
for arn, resource_entry_j in all_resources.items():
resource_entry = ujson.loads(resource_entry_j)
if datetime.fromtimestamp(resource_entry["ttl"]) < datetime.utcnow():
expired_arns.append(arn)
if expired_arns:
for expired_arn in expired_arns:
all_resources.pop(expired_arn, None)
red.hdel(resource_redis_cache_key, *expired_arns)
log_data["number_of_resources"] = len(all_resources)
# Cache all resource ARNs into a single file. Note: This runs synchronously with this task. This task triggers
# resource collection on all accounts to happen asynchronously. That means when we store or delete data within
# this task, we're always going to be caching the results from the previous task.
if config.region == config.get(
"celery.active_region", config.region
) or config.get("environment") in ["dev"]:
# Refresh all resources after deletion of expired entries
all_resources = red.hgetall(resource_redis_cache_key)
s3_bucket = config.get("aws_config_cache_combined.s3.bucket")
s3_key = config.get(
"aws_config_cache_combined.s3.file",
"aws_config_cache_combined/aws_config_resource_cache_combined_v1.json.gz",
)
async_to_sync(store_json_results_in_redis_and_s3)(
all_resources, s3_bucket=s3_bucket, s3_key=s3_key
)
stats.count(f"{function}.success")
return log_data | null |
162,219 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
if config.get("celery.trigger_credential_mapping_refresh_from_role_changes.enabled"):
schedule["trigger_credential_mapping_refresh_from_role_changes"] = {
"task": "consoleme.celery_tasks.celery_tasks.trigger_credential_mapping_refresh_from_role_changes",
"options": {"expires": 300},
"schedule": schedule_minute,
}
if config.get("celery.cache_cloudtrail_denies.enabled"):
schedule["cache_cloudtrail_denies"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies",
"options": {"expires": 300},
"schedule": schedule_minute,
}
schedule["cache_cloudtrail_errors_by_arn"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
async def get_account_id_to_name_mapping(
status="active", environment=None, force_sync=False
):
redis_key = config.get(
"cache_cloud_accounts.redis.key.all_accounts_key", "ALL_AWS_ACCOUNTS"
)
accounts = await retrieve_json_data_from_redis_or_s3(redis_key, default={})
if force_sync or not accounts or not accounts.get("accounts"):
# Force a re-sync and then retry
await cache_cloud_accounts()
accounts = await retrieve_json_data_from_redis_or_s3(
redis_key,
s3_bucket=config.get("cache_cloud_accounts.s3.bucket"),
s3_key=config.get(
"cache_cloud_accounts.s3.file",
"cache_cloud_accounts/accounts_v1.json.gz",
),
default={},
)
account_id_to_name = {}
for account in accounts.get("accounts", []):
if status and account.get("status") != status:
continue
if environment and account.get("environment") != environment:
continue
account_id_to_name[account["id"]] = account["name"]
return account_id_to_name
The provided code snippet includes necessary dependencies for implementing the `get_iam_role_limit` function. Write a Python function `def get_iam_role_limit() -> dict` to solve the following problem:
This function will gather the number of existing IAM Roles and IAM Role quota in all owned AWS accounts.
Here is the function:
def get_iam_role_limit() -> dict:
"""
This function will gather the number of existing IAM Roles and IAM Role quota in all owned AWS accounts.
"""
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
num_accounts = 0
num_roles = 0
if not config.get("celery.get_iam_role_limit.enabled"):
return {}
success_message = "Not running - Inactive region"
if config.region == config.get(
"celery.active_region", config.region
) and config.get("environment") in ["prod", "dev"]:
@sts_conn("iam", client_kwargs=config.get("boto3.client_kwargs", {}))
def _get_delivery_channels(**kwargs) -> list:
"""Gets the delivery channels in the account/region -- calls are wrapped with CloudAux"""
return kwargs.pop("client").get_account_summary(**kwargs)
success_message = "Task successfully completed"
# First, get list of accounts
accounts_d: Dict = async_to_sync(get_account_id_to_name_mapping)()
num_accounts = len(accounts_d.keys())
for account_id, account_name in accounts_d.items():
try:
iam_summary = _get_delivery_channels(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
)
num_iam_roles = iam_summary["SummaryMap"]["Roles"]
iam_role_quota = iam_summary["SummaryMap"]["RolesQuota"]
iam_role_quota_ratio = num_iam_roles / iam_role_quota
num_roles += num_iam_roles
log_data = {
"function": function,
"message": "IAM role quota for account",
"num_iam_roles": num_iam_roles,
"iam_role_quota": iam_role_quota,
"iam_role_quota_ratio": iam_role_quota_ratio,
"account_id": account_id,
"account_name": account_name,
}
stats.gauge(
f"{function}.quota_ratio_gauge",
iam_role_quota_ratio,
tags={
"num_iam_roles": num_iam_roles,
"iam_role_quota": iam_role_quota,
"account_id": account_id,
"account_name": account_name,
},
)
log.debug(log_data)
except ClientError as e:
log_data = {
"function": function,
"message": "Error retrieving IAM quota",
"account_id": account_id,
"account_name": account_name,
"error": e,
}
stats.count(f"{function}.error", tags={"account_id": account_id})
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
raise
log_data = {
"function": function,
"num_accounts": num_accounts,
"num_roles": num_roles,
"message": success_message,
}
log.debug(log_data)
return log_data | This function will gather the number of existing IAM Roles and IAM Role quota in all owned AWS accounts. |
162,220 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
async def cache_all_policy_requests(
user="consoleme", redis_key=None, s3_bucket=None, s3_key=None
):
def cache_policy_requests() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
requests = async_to_sync(cache_all_policy_requests)()
log_data = {
"function": function,
"num_requests": len(requests),
"message": "Successfully cached requests",
}
return log_data | null |
162,221 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
log = config.get_logger()
async def cache_cloud_accounts() -> CloudAccountModelArray:
def cache_cloud_account_mapping() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
account_mapping = async_to_sync(cache_cloud_accounts)()
log_data = {
"function": function,
"num_accounts": len(account_mapping.accounts),
"message": "Successfully cached cloud account mapping",
}
log.debug(log_data)
return log_data | null |
162,222 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
log = config.get_logger()
async def cache_all_scps() -> Dict[str, Any]:
def cache_scps_across_organizations() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
scps = async_to_sync(cache_all_scps)()
log_data = {
"function": function,
"message": "Successfully cached service control policies",
"num_organizations": len(scps),
}
log.debug(log_data)
return log_data | null |
162,223 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
log = config.get_logger()
async def cache_org_structure() -> Dict[str, Any]:
"""Store a dictionary of the organization structure in the cache"""
all_org_structure = {}
for organization in config.get("cache_accounts_from_aws_organizations", []):
org_account_id = organization.get("organizations_master_account_id")
role_to_assume = organization.get(
"organizations_master_role_to_assume", config.get("policies.role_name")
)
if not org_account_id:
raise MissingConfigurationValue(
"Your AWS Organizations Master Account ID is not specified in configuration. "
"Unable to sync accounts from "
"AWS Organizations"
)
if not role_to_assume:
raise MissingConfigurationValue(
"ConsoleMe doesn't know what role to assume to retrieve account information "
"from AWS Organizations. please set the appropriate configuration value."
)
org_structure = await retrieve_org_structure(
org_account_id, region=config.region
)
all_org_structure.update(org_structure)
redis_key = config.get(
"cache_organization_structure.redis.key.org_structure_key", "AWS_ORG_STRUCTURE"
)
s3_bucket = None
s3_key = None
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("cache_organization_structure.s3.bucket")
s3_key = config.get(
"cache_organization_structure.s3.file",
"scps/cache_org_structure_v1.json.gz",
)
await store_json_results_in_redis_and_s3(
all_org_structure,
redis_key=redis_key,
s3_bucket=s3_bucket,
s3_key=s3_key,
)
return all_org_structure
def cache_organization_structure() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
org_structure = async_to_sync(cache_org_structure)()
log_data = {
"function": function,
"message": "Successfully cached organization structure",
"num_organizations": len(org_structure),
}
log.debug(log_data)
return log_data | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.