id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
156,159 | import logging
import os
from typing import Optional
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from opal_common.authentication.types import EncryptionKeyFormat, PrivateKey, PublicKey
from opal_common.logging.decorators import log_exception
def to_bytes(key: str, encoding: str = "utf-8"):
"""crypto lib expect 'bytes' keys, convert 'str' keys to 'bytes'."""
return key.encode(encoding)
def maybe_decode_multiline_key(key: str) -> bytes:
"""if key contents are passed via env var, we allow to encode multiline
keys with a simple replace of each newline (\n) char with underscore (_).
this method detects if the provided key contains such encoding, and
if so reverses it.
"""
if "\n" not in key:
key = key.replace("_", "\n")
if not key.endswith("\n"):
key = key + "\n"
return to_bytes(key)
PublicKey = PUBLIC_KEY_TYPES
class EncryptionKeyFormat(str, Enum):
"""represent the supported formats for storing encryption keys.
- PEM (https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail)
- SSH (RFC4716) or short format (RFC4253, section-6.6, explained here: https://coolaj86.com/articles/the-ssh-public-key-format/)
- DER (https://en.wikipedia.org/wiki/X.690#DER_encoding)
"""
pem = "pem"
ssh = "ssh"
der = "der"
The provided code snippet includes necessary dependencies for implementing the `cast_public_key` function. Write a Python function `def cast_public_key(value: str, key_format: EncryptionKeyFormat) -> Optional[PublicKey]` to solve the following problem:
Parse a string into a valid cryptographic public key. the string can represent a file path in which the key exists, or the actual key contents.
Here is the function:
def cast_public_key(value: str, key_format: EncryptionKeyFormat) -> Optional[PublicKey]:
"""Parse a string into a valid cryptographic public key.
the string can represent a file path in which the key exists, or the
actual key contents.
"""
if value is None:
return None
if isinstance(value, PublicKey.__args__):
return value
key_path = os.path.expanduser(value)
if os.path.isfile(key_path):
raw_key = open(key_path, "rb").read()
elif key_format == EncryptionKeyFormat.ssh: # ssh key format is one line
raw_key = to_bytes(value)
else:
raw_key = maybe_decode_multiline_key(value)
if key_format == EncryptionKeyFormat.pem:
return serialization.load_pem_public_key(raw_key, backend=default_backend())
if key_format == EncryptionKeyFormat.ssh:
return serialization.load_ssh_public_key(raw_key, backend=default_backend())
if key_format == EncryptionKeyFormat.der:
return serialization.load_der_public_key(raw_key, backend=default_backend()) | Parse a string into a valid cryptographic public key. the string can represent a file path in which the key exists, or the actual key contents. |
156,160 | from fastapi import FastAPI, Request, status
from fastapi.encoders import jsonable_encoder
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from opal_common.config import opal_common_config
from opal_common.logger import logger
from pydantic import BaseModel
def register_default_server_exception_handler(app: FastAPI):
"""Registers a default exception handler for HTTP 500 exceptions.
Since fastapi does not include CORS headers by default in 500
exceptions, we need to include them manually. Otherwise the frontend
cries on the wrong issue.
"""
async def default_server_exception_handler(request: Request, exception: Exception):
response = get_response()
logger.exception("Uncaught server exception: {exc}", exc=exception)
# Since the CORSMiddleware is not executed when an unhandled server exception
# occurs, we need to manually set the CORS headers ourselves if we want the FE
# to receive a proper JSON 500, opposed to a CORS error.
# Setting CORS headers on server errors is a bit of a philosophical topic of
# discussion in many frameworks, and it is currently not handled in FastAPI.
# See dotnet core for a recent discussion, where ultimately it was
# decided to return CORS headers on server failures:
# https://github.com/dotnet/aspnetcore/issues/2378
origin = request.headers.get("origin")
if origin:
# Have the middleware do the heavy lifting for us to parse
# all the config, then update our response headers
cors = CORSMiddleware(
app=app,
allow_origins=opal_common_config.ALLOWED_ORIGINS,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Logic directly from Starlette's CORSMiddleware:
# https://github.com/encode/starlette/blob/master/starlette/middleware/cors.py#L152
response.headers.update(cors.simple_headers)
has_cookie = "cookie" in request.headers
# If request includes any cookie headers, then we must respond
# with the specific origin instead of '*'.
if cors.allow_all_origins and has_cookie:
response.headers["Access-Control-Allow-Origin"] = origin
# If we only allow specific origins, then we have to mirror back
# the Origin header in the response.
elif not cors.allow_all_origins and cors.is_allowed_origin(origin=origin):
response.headers["Access-Control-Allow-Origin"] = origin
response.headers.add_vary_header("Origin")
return response
def configure_cors_middleware(app: FastAPI):
app.add_middleware(
CORSMiddleware,
allow_origins=opal_common_config.ALLOWED_ORIGINS,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def configure_middleware(app: FastAPI):
register_default_server_exception_handler(app)
configure_cors_middleware(app) | null |
156,161 | import asyncio
import redis.asyncio as redis
from opal_common.logger import logger
The provided code snippet includes necessary dependencies for implementing the `run_locked` function. Write a Python function `async def run_locked( _redis: redis.Redis, lock_name: str, coro: asyncio.coroutine, timeout: int = 10 )` to solve the following problem:
This function runs a coroutine wrapped in a redis lock, in a way that prevents hanging locks. Hanging locks can happen when a process crashes while holding a lock. This function sets a redis enforced timeout, and reacquires the lock every timeout * 0.8 (as long as it runs)
Here is the function:
async def run_locked(
_redis: redis.Redis, lock_name: str, coro: asyncio.coroutine, timeout: int = 10
):
"""This function runs a coroutine wrapped in a redis lock, in a way that
prevents hanging locks. Hanging locks can happen when a process crashes
while holding a lock.
This function sets a redis enforced timeout, and reacquires the lock every timeout * 0.8 (as long as it runs)
"""
lock = _redis.lock(lock_name, timeout=timeout)
try:
logger.debug(f"Trying to acquire redis lock: {lock_name}")
await lock.acquire()
logger.debug(f"Acquired lock: {lock_name}")
locked_task = asyncio.create_task(coro)
while True:
done, _ = await asyncio.wait(
(locked_task,),
timeout=timeout * 0.8,
return_when=asyncio.FIRST_COMPLETED,
)
if locked_task in done:
break
else:
# Extend lock timeout as long as the coroutine is still running
await lock.reacquire()
logger.debug(f"Reacquired lock: {lock_name}")
finally:
await lock.release()
logger.debug(f"Released lock: {lock_name}") | This function runs a coroutine wrapped in a redis lock, in a way that prevents hanging locks. Hanging locks can happen when a process crashes while holding a lock. This function sets a redis enforced timeout, and reacquires the lock every timeout * 0.8 (as long as it runs) |
156,162 | import logging
from typing import Optional
from urllib.parse import urlparse
from ddtrace import Span, config, patch, tracer
from ddtrace.filters import TraceFilter
from loguru import logger
The provided code snippet includes necessary dependencies for implementing the `configure_apm` function. Write a Python function `def configure_apm(enable_apm: bool, service_name: str)` to solve the following problem:
optionally enable datadog APM / profiler.
Here is the function:
def configure_apm(enable_apm: bool, service_name: str):
"""optionally enable datadog APM / profiler."""
if enable_apm:
logger.info("Enabling DataDog APM")
# logging.getLogger("ddtrace").propagate = False
class FilterRootPathTraces(TraceFilter):
def process_trace(self, trace: list[Span]) -> Optional[list[Span]]:
for span in trace:
if span.parent_id is not None:
return trace
if url := span.get_tag("http.url"):
parsed_url = urlparse(url)
if parsed_url.path == "/":
return None
return trace
patch(
fastapi=True,
redis=True,
asyncpg=True,
aiohttp=True,
loguru=True,
)
tracer.configure(
settings={
"FILTERS": [
FilterRootPathTraces(),
]
}
)
else:
logger.info("DataDog APM disabled")
tracer.configure(enabled=False) | optionally enable datadog APM / profiler. |
156,163 | import os
from typing import Optional
import datadog
from loguru import logger
def configure_metrics(
enable_metrics: bool, statsd_host: str, statsd_port: int, namespace: str = ""
):
if not enable_metrics:
logger.info("DogStatsD metrics disabled")
return
else:
logger.info(
"DogStatsD metrics enabled; statsd: {host}:{port}",
host=statsd_host,
port=statsd_port,
)
if not namespace:
namespace = os.environ.get("DD_SERVICE", "")
namespace = namespace.lower().replace("-", "_")
datadog.initialize(
statsd_host=statsd_host,
statsd_port=statsd_port,
statsd_namespace=f"permit.{namespace}",
) | null |
156,164 | import os
from typing import Optional
import datadog
from loguru import logger
def _format_tags(tags: Optional[dict[str, str]]) -> Optional[list[str]]:
if not tags:
return None
return [f"{k}:{v}" for k, v in tags.items()]
def increment(metric: str, tags: Optional[dict[str, str]] = None):
datadog.statsd.increment(metric, tags=_format_tags(tags)) | null |
156,165 | import os
from typing import Optional
import datadog
from loguru import logger
def _format_tags(tags: Optional[dict[str, str]]) -> Optional[list[str]]:
if not tags:
return None
return [f"{k}:{v}" for k, v in tags.items()]
def decrement(metric: str, tags: Optional[dict[str, str]] = None):
datadog.statsd.decrement(metric, tags=_format_tags(tags)) | null |
156,166 | import os
from typing import Optional
import datadog
from loguru import logger
def _format_tags(tags: Optional[dict[str, str]]) -> Optional[list[str]]:
if not tags:
return None
return [f"{k}:{v}" for k, v in tags.items()]
def gauge(metric: str, value: float, tags: Optional[dict[str, str]] = None):
datadog.statsd.gauge(metric, value, tags=_format_tags(tags)) | null |
156,167 | import os
import ssl
from typing import Optional
from opal_common.config import opal_common_config
opal_common_config = OpalCommonConfig(prefix="OPAL_")
The provided code snippet includes necessary dependencies for implementing the `get_custom_ssl_context` function. Write a Python function `def get_custom_ssl_context() -> Optional[ssl.SSLContext]` to solve the following problem:
Potentially (if enabled), returns a custom ssl context that respect self-signed certificates. More accurately, may return an ssl context that respects a local CA as a valid issuer.
Here is the function:
def get_custom_ssl_context() -> Optional[ssl.SSLContext]:
"""Potentially (if enabled), returns a custom ssl context that respect
self-signed certificates.
More accurately, may return an ssl context that respects a local CA
as a valid issuer.
"""
if not opal_common_config.CLIENT_SELF_SIGNED_CERTIFICATES_ALLOWED:
return None
ca_file: Optional[str] = opal_common_config.CLIENT_SSL_CONTEXT_TRUSTED_CA_FILE
if ca_file is None:
return None
if not ca_file:
return None
ca_file_path = os.path.expanduser(ca_file)
if not os.path.isfile(ca_file_path):
return None
return ssl.create_default_context(cafile=ca_file_path) | Potentially (if enabled), returns a custom ssl context that respect self-signed certificates. More accurately, may return an ssl context that respects a local CA as a valid issuer. |
156,168 | import asyncio
import json
import secrets
from datetime import timedelta
from enum import Enum
from typing import List, Optional, Tuple
from uuid import uuid4
import typer
from opal_common.schemas.data import DataSourceEntry, DataUpdate
from opal_common.schemas.security import AccessTokenRequest, PeerType
class SecretFormat(str, Enum):
hex = "hex"
bytes = "bytes"
urlsafe = "urlsafe"
def generate_secret(
size: int = typer.Option(32, help="size in bytes of the secret"),
format: SecretFormat = SecretFormat.urlsafe,
):
if format == SecretFormat.hex:
res = secrets.token_hex(size)
elif format == SecretFormat.bytes:
res = repr(secrets.token_bytes(size))
else:
res = secrets.token_urlsafe(size)
typer.echo(res) | null |
156,169 | import asyncio
import json
import secrets
from datetime import timedelta
from enum import Enum
from typing import List, Optional, Tuple
from uuid import uuid4
import typer
from opal_common.schemas.data import DataSourceEntry, DataUpdate
from opal_common.schemas.security import AccessTokenRequest, PeerType
class PeerType(str, Enum):
client = "client"
datasource = "datasource"
listener = "listener"
class AccessTokenRequest(BaseModel):
"""a request to generate an access token to opal server."""
id: UUID = Field(default_factory=uuid4)
type: PeerType = Field(PeerType.client, description=PEER_TYPE_DESCRIPTION)
ttl: timedelta = Field(timedelta(days=365), description=TTL_DESCRIPTION)
claims: dict = Field({}, description=CLAIMS_DESCRIPTION)
def force_enum(cls, v):
if isinstance(v, str):
return PeerType(v)
if isinstance(v, PeerType):
return v
raise ValueError(f"invalid value: {v}")
class Config:
use_enum_values = True
allow_population_by_field_name = True
The provided code snippet includes necessary dependencies for implementing the `obtain_token` function. Write a Python function `def obtain_token( master_token: str = typer.Argument( ..., help="The master token secret the OPAL-server was initialized with", envvar="OPAL_MASTER_TOKEN", ), server_url: str = typer.Option( "http://localhost:7002", help="url of the OPAL-server to obtain the token from" ), type: PeerType = PeerType("client"), ttl: Tuple[int, str] = typer.Option( (365, "days"), help="Time-To-Live / expiration for the token in `<int> <str>` e.g. `365 days`, or `1000000 milliseconds` ", ), claims: str = typer.Option( "{}", help="claims to to include in the returned signed JWT as a JSON string", callback=lambda x: json.loads(x), ), just_the_token: bool = typer.Option( True, help="Should the command return only the cryptographic token, or the full JSON object", ), )` to solve the following problem:
Obtain a secret JWT (JSON-Web-Token) from the server, to be used by clients or data sources for authentication Using the master token (as assigned to the server as OPAL_AUTH_MASTER_TOKEN)
Here is the function:
def obtain_token(
master_token: str = typer.Argument(
...,
help="The master token secret the OPAL-server was initialized with",
envvar="OPAL_MASTER_TOKEN",
),
server_url: str = typer.Option(
"http://localhost:7002", help="url of the OPAL-server to obtain the token from"
),
type: PeerType = PeerType("client"),
ttl: Tuple[int, str] = typer.Option(
(365, "days"),
help="Time-To-Live / expiration for the token in `<int> <str>` e.g. `365 days`, or `1000000 milliseconds` ",
),
claims: str = typer.Option(
"{}",
help="claims to to include in the returned signed JWT as a JSON string",
callback=lambda x: json.loads(x),
),
just_the_token: bool = typer.Option(
True,
help="Should the command return only the cryptographic token, or the full JSON object",
),
):
"""Obtain a secret JWT (JSON-Web-Token) from the server, to be used by
clients or data sources for authentication Using the master token (as
assigned to the server as OPAL_AUTH_MASTER_TOKEN)"""
from aiohttp import ClientSession
server_url = f"{server_url}/token"
ttl_number, ttl_unit = ttl
ttl = timedelta(**{ttl_unit: ttl_number})
async def fetch():
async with ClientSession(
headers={"Authorization": f"bearer {master_token}"}
) as session:
details = AccessTokenRequest(type=type, ttl=ttl, claims=claims).json()
res = await session.post(
server_url, data=details, headers={"content-type": "application/json"}
)
data = await res.json()
if just_the_token:
return data["token"]
else:
return data
res = asyncio.run(fetch())
typer.echo(res) | Obtain a secret JWT (JSON-Web-Token) from the server, to be used by clients or data sources for authentication Using the master token (as assigned to the server as OPAL_AUTH_MASTER_TOKEN) |
156,170 | import asyncio
import json
import secrets
from datetime import timedelta
from enum import Enum
from typing import List, Optional, Tuple
from uuid import uuid4
import typer
from opal_common.schemas.data import DataSourceEntry, DataUpdate
from opal_common.schemas.security import AccessTokenRequest, PeerType
class DataSourceEntry(BaseModel):
"""
Data source configuration - where client's should retrieve data from and how they should store it
"""
def validate_save_method(cls, value, values):
if values["save_method"] not in ["PUT", "PATCH"]:
raise ValueError("'save_method' must be either PUT or PATCH")
if values["save_method"] == "PATCH" and (
not isinstance(value, list)
or not all(isinstance(elem, JSONPatchAction) for elem in value)
):
raise TypeError(
"'data' must be of type JSON patch request when save_method is PATCH"
)
return value
# How to obtain the data
url: str = Field(..., description="Url source to query for data")
config: dict = Field(
None,
description="Suggested fetcher configuration (e.g. auth or method) to fetch data with",
)
# How to catalog data
topics: List[str] = Field(
[DEFAULT_DATA_TOPIC], description="topics the data applies to"
)
# How to save the data
# see https://www.openpolicyagent.org/docs/latest/rest-api/#data-api path is the path nested under <OPA_SERVER>/<version>/data
dst_path: str = Field("", description="OPA data api path to store the document at")
save_method: str = Field(
"PUT",
description="Method used to write into OPA - PUT/PATCH, when using the PATCH method the data field should conform to the JSON patch schema defined in RFC 6902(https://datatracker.ietf.org/doc/html/rfc6902#section-3)",
)
data: Optional[JsonableValue] = Field(
None,
description="Data payload to embed within the data update (instead of having "
"the client fetch it from the url).",
)
class DataUpdate(BaseModel):
"""DataSources used as OPAL-server configuration Data update sent to
clients."""
# a UUID to identify this update (used as part of an updates complition callback)
id: Optional[str] = None
entries: List[DataSourceEntry] = Field(
..., description="list of related updates the OPAL client should perform"
)
reason: str = Field(None, description="Reason for triggering the update")
# Configuration for how to notify other services on the status of Update
callback: UpdateCallback = UpdateCallback(callbacks=[])
The provided code snippet includes necessary dependencies for implementing the `publish_data_update` function. Write a Python function `def publish_data_update( token: Optional[str] = typer.Argument( None, help="the JWT obtained from the server for authentication (see obtain-token command)", envvar="OPAL_CLIENT_TOKEN", ), server_url: str = typer.Option( "http://localhost:7002", help="url of the OPAL-server to send the update through", ), server_route: str = typer.Option( "/data/config", help="route in the server for update" ), reason: str = typer.Option("", help="The reason for the update"), entries: str = typer.Option( "[]", "--entries", "-e", help="Pass in the the DataUpdate entries as JSON", callback=lambda x: json.loads(x), ), src_url: str = typer.Option( None, help="[SINGLE-ENTRY-UPDATE] url of the data-source this update relates to, which the clients should approach", ), topics: List[str] = typer.Option( None, "--topic", "-t", help="[SINGLE-ENTRY-UPDATE] [List] topic (can several) for the published update (to be matched to client subscriptions)", ), data: str = typer.Option( None, help="[SINGLE-ENTRY-UPDATE] actual data to include in the update (if src_url is also supplied, it would be sent but not used)", ), src_config: str = typer.Option( "{}", help="[SINGLE-ENTRY-UPDATE] Fetching Config as JSON", callback=lambda x: json.loads(x), ), dst_path: str = typer.Option( "", help="[SINGLE-ENTRY-UPDATE] Path the client should set this value in its data-store", ), save_method: str = typer.Option( "PUT", help="[SINGLE-ENTRY-UPDATE] How the data should be saved into the give dst-path", ), )` to solve the following problem:
Publish a DataUpdate through an OPAL-server (indicated by --server_url). [SINGLE-ENTRY-UPDATE] Send a single update DataSourceEntry via the --src-url, --src-config, --topics, --dst-path, --save-method must include --src-url to use this flow. [Multiple entries] Set DataSourceEntires as JSON (via --entries) if you include a single entry as well- it will be merged into the given JSON
Here is the function:
def publish_data_update(
token: Optional[str] = typer.Argument(
None,
help="the JWT obtained from the server for authentication (see obtain-token command)",
envvar="OPAL_CLIENT_TOKEN",
),
server_url: str = typer.Option(
"http://localhost:7002",
help="url of the OPAL-server to send the update through",
),
server_route: str = typer.Option(
"/data/config", help="route in the server for update"
),
reason: str = typer.Option("", help="The reason for the update"),
entries: str = typer.Option(
"[]",
"--entries",
"-e",
help="Pass in the the DataUpdate entries as JSON",
callback=lambda x: json.loads(x),
),
src_url: str = typer.Option(
None,
help="[SINGLE-ENTRY-UPDATE] url of the data-source this update relates to, which the clients should approach",
),
topics: List[str] = typer.Option(
None,
"--topic",
"-t",
help="[SINGLE-ENTRY-UPDATE] [List] topic (can several) for the published update (to be matched to client subscriptions)",
),
data: str = typer.Option(
None,
help="[SINGLE-ENTRY-UPDATE] actual data to include in the update (if src_url is also supplied, it would be sent but not used)",
),
src_config: str = typer.Option(
"{}",
help="[SINGLE-ENTRY-UPDATE] Fetching Config as JSON",
callback=lambda x: json.loads(x),
),
dst_path: str = typer.Option(
"",
help="[SINGLE-ENTRY-UPDATE] Path the client should set this value in its data-store",
),
save_method: str = typer.Option(
"PUT",
help="[SINGLE-ENTRY-UPDATE] How the data should be saved into the give dst-path",
),
):
"""Publish a DataUpdate through an OPAL-server (indicated by --server_url).
[SINGLE-ENTRY-UPDATE] Send a single update DataSourceEntry via
the --src-url, --src-config, --topics, --dst-path, --save-method
must include --src-url to use this flow. [Multiple entries] Set
DataSourceEntires as JSON (via --entries) if you include a
single entry as well- it will be merged into the given JSON
"""
from aiohttp import ClientResponse, ClientSession
if not entries and not src_url:
typer.secho(
"You must provide either multiple entries (-e / --entries) or a single entry update (--src_url)",
fg="red",
)
return
if not isinstance(entries, list):
typer.secho("Bad input for --entires was ignored", fg="red")
entries = []
entries: List[DataSourceEntry]
# single entry update (if used, we ignore the value of "entries")
if src_url is not None:
entries = [
DataSourceEntry(
url=src_url,
data=(None if data is None else json.loads(data)),
topics=topics,
dst_path=dst_path,
save_method=save_method,
config=src_config,
)
]
server_url = f"{server_url}{server_route}"
update = DataUpdate(entries=entries, reason=reason)
async def publish_update():
headers = {"content-type": "application/json"}
if token is not None:
headers.update({"Authorization": f"bearer {token}"})
async with ClientSession(headers=headers) as session:
body = update.json()
res = await session.post(server_url, data=body)
return res
async def get_response_text(res: ClientResponse):
return await res.text()
typer.echo(f"Publishing event:")
typer.secho(f"{str(update)}", fg="cyan")
res = asyncio.run(publish_update())
if res.status == 200:
typer.secho("Event Published Successfully", fg="green")
else:
typer.secho("Event publishing failed with status-code - {res.status}", fg="red")
text = asyncio.run(get_response_text(res))
typer.echo(text) | Publish a DataUpdate through an OPAL-server (indicated by --server_url). [SINGLE-ENTRY-UPDATE] Send a single update DataSourceEntry via the --src-url, --src-config, --topics, --dst-path, --save-method must include --src-url to use this flow. [Multiple entries] Set DataSourceEntires as JSON (via --entries) if you include a single entry as well- it will be merged into the given JSON |
156,171 | import typer
from .commands import all_commands
all_commands = [obtain_token, generate_secret, publish_data_update]
def get_typer_app():
app = typer.Typer()
for cmd in all_commands:
app.command()(cmd)
return app | null |
156,172 | from pathlib import Path
from typing import IO, Callable, Generator, List, Optional, Set
from git import Repo
from git.objects import Blob, Commit, IndexObject, Tree
from opal_common.paths import PathUtils
class VersionedFile(VersionedNode):
"""Each instance of this class represents *one version* of a file (blob) in
a git repo (the version of the file for a specific git commit)."""
def __init__(self, blob: Blob, commit: Commit):
super().__init__(blob, commit)
self._blob: Blob = blob
def blob(self) -> Blob:
"""the blob containing metadata for the file version."""
return self._blob
def stream(self) -> IO:
"""an io stream to the version of the file represented by that
instance.
reading that stream will return the contents of the file for
that specific version (commit).
"""
return self.blob.data_stream
def read_bytes(self) -> bytes:
"""returns the contents of the file as a byte array (without
encoding)."""
return self.stream.read()
def read(self, encoding="utf-8") -> str:
"""returns the contents of the file as a string, decoded according to
the input `encoding`.
(by default, git usually encodes source files as utf-8).
"""
return self.read_bytes().decode(encoding=encoding)
class PathUtils:
def intermediate_directories(paths: List[Path]) -> List[Path]:
"""returns the set of all parent directories for a list of paths.
i.e: calculate all partial paths that are directories.
"""
directories = set()
for path in paths:
directories.update(path.parents)
return sorted_list_from_set(directories)
def is_child_of_directories(path: Path, directories: Set[Path]) -> bool:
"""whether the input path is a child of one of the input
directories."""
return bool(directories & set(path.parents))
def filter_children_paths_of_directories(
paths: List[Path], directories: Set[Path]
) -> List[Path]:
"""returns only paths in :paths that are children of one of the paths
in :directories."""
return [
path
for path in paths
if PathUtils.is_child_of_directories(path, directories)
]
def non_intersecting_directories(paths: List[Path]) -> Set[Path]:
"""gets a list of paths (directories), and returns a set of directories
that are non-intersecting, meaning no directory in the set is a parent
of another directory in the set (i.e: parent directories "swallow"
their subdirectories)."""
output_paths = set()
for candidate in paths:
if set(candidate.parents) & output_paths:
# the next candidate is covered by a parent which is already in output -> SKIP
# or the next candidate is already in the list
continue
for out_path in list(output_paths):
# the next candidate can displace a child from the output
if candidate in list(out_path.parents):
output_paths.remove(out_path)
output_paths.add(candidate)
return output_paths
def sort_paths_according_to_explicit_sorting(
unsorted_paths: List[Path], explicit_sorting: List[Path]
) -> List[Path]:
"""the way this sorting works, is assuming that explicit_sorting does
NOT necessarily contains all the paths found in the original list.
We must ensure that all items in unsorted_paths must also exist
in the output list.
"""
unsorted = unsorted_paths.copy()
sorted_paths: List[Path] = []
for path in explicit_sorting:
try:
# we look for Path objects and not str for normalization of the path
found_path: Path = unsorted.pop(unsorted.index(path))
sorted_paths.append(found_path)
except ValueError:
continue # skip, not found in the original list
# add the remainder to the end of the sorted list
sorted_paths.extend(unsorted)
return sorted_paths
def glob_style_match_path_to_list(path: str, match_paths: List[str]):
"""
Check if given path matches any of the match_paths either via glob style matching or by being nested under - when the match path ends with "/**"
return the match path if there's a match, and None otherwise
"""
# check if any of our ignore paths match the given path
for match_path in match_paths:
# if the path is indicated as a parent via "/**" at the end
if match_path.endswith("/**"):
# check if the path is under the parent
if path.startswith(match_path[:-3]):
return match_path
# otherwise check for simple (non-recursive glob matching)
else:
path_object = Path(path)
if path_object.match(match_path):
return match_path
# if no match - this path shouldn't be ignored
return None
The provided code snippet includes necessary dependencies for implementing the `is_under_directories` function. Write a Python function `def is_under_directories(f: VersionedFile, directories: Set[Path]) -> bool` to solve the following problem:
a filter on versioned files, filters only files under certain directories in the repo.
Here is the function:
def is_under_directories(f: VersionedFile, directories: Set[Path]) -> bool:
"""a filter on versioned files, filters only files under certain
directories in the repo."""
return PathUtils.is_child_of_directories(f.path, directories) | a filter on versioned files, filters only files under certain directories in the repo. |
156,173 | import os
from pathlib import Path
from opal_common.config import opal_common_config
def save_ssh_key_to_pem_file(key: str) -> Path:
key = key.replace("_", "\n")
if not key.endswith("\n"):
key = key + "\n" # pem file must end with newline
key_path = os.path.expanduser(opal_common_config.GIT_SSH_KEY_FILE)
parent_directory = os.path.dirname(key_path)
if not os.path.exists(parent_directory):
os.makedirs(parent_directory, exist_ok=True)
with open(key_path, "w") as f:
f.write(key)
os.chmod(key_path, 0o600)
return Path(key_path)
def is_ssh_repo_url(repo_url: str):
"""return True if the repo url uses SSH authentication.
(see: https://docs.github.com/en/github/authenticating-to-github/connecting-to-github-with-ssh)
"""
return repo_url.startswith(SSH_PREFIX) or repo_url.startswith(GIT_SSH_USER_PREFIX)
The provided code snippet includes necessary dependencies for implementing the `provide_git_ssh_environment` function. Write a Python function `def provide_git_ssh_environment(url: str, ssh_key: str)` to solve the following problem:
provides git SSH configuration via GIT_SSH_COMMAND. the git ssh config will be provided only if the following conditions are met: - the repo url is a git ssh url - an ssh private key is provided in Repo Cloner __init__
Here is the function:
def provide_git_ssh_environment(url: str, ssh_key: str):
"""provides git SSH configuration via GIT_SSH_COMMAND.
the git ssh config will be provided only if the following conditions are met:
- the repo url is a git ssh url
- an ssh private key is provided in Repo Cloner __init__
"""
if not is_ssh_repo_url(url) or ssh_key is None:
return {} # no ssh config
git_ssh_identity_file = save_ssh_key_to_pem_file(ssh_key)
return {
"GIT_SSH_COMMAND": f"ssh -o StrictHostKeyChecking=no -o IdentitiesOnly=yes -i {git_ssh_identity_file}",
"GIT_TRACE": "1",
"GIT_CURL_VERBOSE": "1",
} | provides git SSH configuration via GIT_SSH_COMMAND. the git ssh config will be provided only if the following conditions are met: - the repo url is a git ssh url - an ssh private key is provided in Repo Cloner __init__ |
156,174 | from pathlib import Path
from typing import IO, Callable, Generator, List, Optional, Set
from git import Repo
from git.diff import Diff, DiffIndex
from git.objects.commit import Commit
from opal_common.git.commit_viewer import VersionedFile
from opal_common.paths import PathUtils
DiffFilter = Callable[[Diff], bool]
The provided code snippet includes necessary dependencies for implementing the `apply_filter` function. Write a Python function `def apply_filter( generator: Generator[Diff, None, None], filter: Optional[DiffFilter] = None ) -> Generator[Diff, None, None]` to solve the following problem:
applies an optional filter on top of a Diff generator. returns only the diffs yielded by the source generator that pass the filter. if no filter is provided, returns the same results as the source generator.
Here is the function:
def apply_filter(
generator: Generator[Diff, None, None], filter: Optional[DiffFilter] = None
) -> Generator[Diff, None, None]:
"""applies an optional filter on top of a Diff generator.
returns only the diffs yielded by the source generator that pass the
filter. if no filter is provided, returns the same results as the
source generator.
"""
if filter is None:
yield from generator
else:
for diff in generator:
if filter(diff):
yield diff | applies an optional filter on top of a Diff generator. returns only the diffs yielded by the source generator that pass the filter. if no filter is provided, returns the same results as the source generator. |
156,175 | from pathlib import Path
from typing import IO, Callable, Generator, List, Optional, Set
from git import Repo
from git.diff import Diff, DiffIndex
from git.objects.commit import Commit
from opal_common.git.commit_viewer import VersionedFile
from opal_common.paths import PathUtils
The provided code snippet includes necessary dependencies for implementing the `diffed_file_has_extension` function. Write a Python function `def diffed_file_has_extension( diff: Diff, extensions: Optional[List[str]] = None ) -> bool` to solve the following problem:
filter on git diffs, filters only diffs on files that has a certain extension/type. if the file is renamed/added/removed, its enough that only one of its versions has the required extension.
Here is the function:
def diffed_file_has_extension(
diff: Diff, extensions: Optional[List[str]] = None
) -> bool:
"""filter on git diffs, filters only diffs on files that has a certain
extension/type.
if the file is renamed/added/removed, its enough that only one of
its versions has the required extension.
"""
if extensions is None:
return True # no filter
for path in [diff.a_path, diff.b_path]:
if path is not None and Path(path).suffix in extensions:
return True
return False | filter on git diffs, filters only diffs on files that has a certain extension/type. if the file is renamed/added/removed, its enough that only one of its versions has the required extension. |
156,176 | from pathlib import Path
from typing import IO, Callable, Generator, List, Optional, Set
from git import Repo
from git.diff import Diff, DiffIndex
from git.objects.commit import Commit
from opal_common.git.commit_viewer import VersionedFile
from opal_common.paths import PathUtils
class PathUtils:
def intermediate_directories(paths: List[Path]) -> List[Path]:
"""returns the set of all parent directories for a list of paths.
i.e: calculate all partial paths that are directories.
"""
directories = set()
for path in paths:
directories.update(path.parents)
return sorted_list_from_set(directories)
def is_child_of_directories(path: Path, directories: Set[Path]) -> bool:
"""whether the input path is a child of one of the input
directories."""
return bool(directories & set(path.parents))
def filter_children_paths_of_directories(
paths: List[Path], directories: Set[Path]
) -> List[Path]:
"""returns only paths in :paths that are children of one of the paths
in :directories."""
return [
path
for path in paths
if PathUtils.is_child_of_directories(path, directories)
]
def non_intersecting_directories(paths: List[Path]) -> Set[Path]:
"""gets a list of paths (directories), and returns a set of directories
that are non-intersecting, meaning no directory in the set is a parent
of another directory in the set (i.e: parent directories "swallow"
their subdirectories)."""
output_paths = set()
for candidate in paths:
if set(candidate.parents) & output_paths:
# the next candidate is covered by a parent which is already in output -> SKIP
# or the next candidate is already in the list
continue
for out_path in list(output_paths):
# the next candidate can displace a child from the output
if candidate in list(out_path.parents):
output_paths.remove(out_path)
output_paths.add(candidate)
return output_paths
def sort_paths_according_to_explicit_sorting(
unsorted_paths: List[Path], explicit_sorting: List[Path]
) -> List[Path]:
"""the way this sorting works, is assuming that explicit_sorting does
NOT necessarily contains all the paths found in the original list.
We must ensure that all items in unsorted_paths must also exist
in the output list.
"""
unsorted = unsorted_paths.copy()
sorted_paths: List[Path] = []
for path in explicit_sorting:
try:
# we look for Path objects and not str for normalization of the path
found_path: Path = unsorted.pop(unsorted.index(path))
sorted_paths.append(found_path)
except ValueError:
continue # skip, not found in the original list
# add the remainder to the end of the sorted list
sorted_paths.extend(unsorted)
return sorted_paths
def glob_style_match_path_to_list(path: str, match_paths: List[str]):
"""
Check if given path matches any of the match_paths either via glob style matching or by being nested under - when the match path ends with "/**"
return the match path if there's a match, and None otherwise
"""
# check if any of our ignore paths match the given path
for match_path in match_paths:
# if the path is indicated as a parent via "/**" at the end
if match_path.endswith("/**"):
# check if the path is under the parent
if path.startswith(match_path[:-3]):
return match_path
# otherwise check for simple (non-recursive glob matching)
else:
path_object = Path(path)
if path_object.match(match_path):
return match_path
# if no match - this path shouldn't be ignored
return None
The provided code snippet includes necessary dependencies for implementing the `diffed_file_is_under_directories` function. Write a Python function `def diffed_file_is_under_directories(diff: Diff, directories: Set[Path]) -> bool` to solve the following problem:
filter on git diffs, filters only diffs on files that are located in certain directories. if a file is renamed/added/removed, its enough that only one of its versions was located in one of the required directories.
Here is the function:
def diffed_file_is_under_directories(diff: Diff, directories: Set[Path]) -> bool:
"""filter on git diffs, filters only diffs on files that are located in
certain directories.
if a file is renamed/added/removed, its enough that only one of its
versions was located in one of the required directories.
"""
for path in [diff.a_path, diff.b_path]:
if path is not None and PathUtils.is_child_of_directories(
Path(path), directories
):
return True
return False | filter on git diffs, filters only diffs on files that are located in certain directories. if a file is renamed/added/removed, its enough that only one of its versions was located in one of the required directories. |
156,177 | import aiohttp
The provided code snippet includes necessary dependencies for implementing the `is_http_error_response` function. Write a Python function `def is_http_error_response(response: aiohttp.ClientResponse) -> bool` to solve the following problem:
HTTP 400 and above are considered error responses.
Here is the function:
def is_http_error_response(response: aiohttp.ClientResponse) -> bool:
"""HTTP 400 and above are considered error responses."""
return response.status >= 400 | HTTP 400 and above are considered error responses. |
156,178 | from pathlib import Path
from opal_common.config import opal_common_config
The provided code snippet includes necessary dependencies for implementing the `is_data_module` function. Write a Python function `def is_data_module(path: Path) -> bool` to solve the following problem:
Only json files named `data.json` can be included in official OPA bundles as static data files. checks if a given path points to such file.
Here is the function:
def is_data_module(path: Path) -> bool:
"""Only json files named `data.json` can be included in official OPA
bundles as static data files.
checks if a given path points to such file.
"""
return path.name == "data.json" | Only json files named `data.json` can be included in official OPA bundles as static data files. checks if a given path points to such file. |
156,179 | from pathlib import Path
from opal_common.config import opal_common_config
opal_common_config = OpalCommonConfig(prefix="OPAL_")
The provided code snippet includes necessary dependencies for implementing the `is_policy_module` function. Write a Python function `def is_policy_module(path: Path) -> bool` to solve the following problem:
Checks if a given path points to a rego file (extension == .rego). Only rego files are allowed in official OPA bundles as policy files.
Here is the function:
def is_policy_module(path: Path) -> bool:
"""Checks if a given path points to a rego file (extension == .rego).
Only rego files are allowed in official OPA bundles as policy files.
"""
return path.suffix in opal_common_config.POLICY_REPO_POLICY_EXTENSIONS | Checks if a given path points to a rego file (extension == .rego). Only rego files are allowed in official OPA bundles as policy files. |
156,180 | import re
from typing import Optional
REGO_PACKAGE_DECLARATION = re.compile(r"^package\s+([a-zA-Z0-9\.\"\[\]]+)$")
The provided code snippet includes necessary dependencies for implementing the `get_rego_package` function. Write a Python function `def get_rego_package(contents: str) -> Optional[str]` to solve the following problem:
try to parse the package name from rego file contents. return None if failed to parse (probably invalid .rego file)
Here is the function:
def get_rego_package(contents: str) -> Optional[str]:
"""try to parse the package name from rego file contents.
return None if failed to parse (probably invalid .rego file)
"""
lines = contents.splitlines()
for line in lines:
match = REGO_PACKAGE_DECLARATION.match(line)
if match is not None:
return match.group(1)
return None | try to parse the package name from rego file contents. return None if failed to parse (probably invalid .rego file) |
156,181 | from pathlib import Path
from typing import List
from opal_common.paths import PathUtils
POLICY_PREFIX = "policy:"
The provided code snippet includes necessary dependencies for implementing the `remove_prefix` function. Write a Python function `def remove_prefix(topic: str, prefix: str = POLICY_PREFIX)` to solve the following problem:
removes the policy topic prefix to get the path (directory) encoded in the topic.
Here is the function:
def remove_prefix(topic: str, prefix: str = POLICY_PREFIX):
"""removes the policy topic prefix to get the path (directory) encoded in
the topic."""
if topic.startswith(prefix):
return topic[len(prefix) :]
return topic | removes the policy topic prefix to get the path (directory) encoded in the topic. |
156,182 | from pathlib import Path
from typing import List
from opal_common.paths import PathUtils
def policy_topics(paths: List[Path]) -> List[str]:
"""prefixes a list of directories with the policy topic prefix."""
return ["{}{}".format(POLICY_PREFIX, str(path)) for path in paths]
class PathUtils:
def intermediate_directories(paths: List[Path]) -> List[Path]:
"""returns the set of all parent directories for a list of paths.
i.e: calculate all partial paths that are directories.
"""
directories = set()
for path in paths:
directories.update(path.parents)
return sorted_list_from_set(directories)
def is_child_of_directories(path: Path, directories: Set[Path]) -> bool:
"""whether the input path is a child of one of the input
directories."""
return bool(directories & set(path.parents))
def filter_children_paths_of_directories(
paths: List[Path], directories: Set[Path]
) -> List[Path]:
"""returns only paths in :paths that are children of one of the paths
in :directories."""
return [
path
for path in paths
if PathUtils.is_child_of_directories(path, directories)
]
def non_intersecting_directories(paths: List[Path]) -> Set[Path]:
"""gets a list of paths (directories), and returns a set of directories
that are non-intersecting, meaning no directory in the set is a parent
of another directory in the set (i.e: parent directories "swallow"
their subdirectories)."""
output_paths = set()
for candidate in paths:
if set(candidate.parents) & output_paths:
# the next candidate is covered by a parent which is already in output -> SKIP
# or the next candidate is already in the list
continue
for out_path in list(output_paths):
# the next candidate can displace a child from the output
if candidate in list(out_path.parents):
output_paths.remove(out_path)
output_paths.add(candidate)
return output_paths
def sort_paths_according_to_explicit_sorting(
unsorted_paths: List[Path], explicit_sorting: List[Path]
) -> List[Path]:
"""the way this sorting works, is assuming that explicit_sorting does
NOT necessarily contains all the paths found in the original list.
We must ensure that all items in unsorted_paths must also exist
in the output list.
"""
unsorted = unsorted_paths.copy()
sorted_paths: List[Path] = []
for path in explicit_sorting:
try:
# we look for Path objects and not str for normalization of the path
found_path: Path = unsorted.pop(unsorted.index(path))
sorted_paths.append(found_path)
except ValueError:
continue # skip, not found in the original list
# add the remainder to the end of the sorted list
sorted_paths.extend(unsorted)
return sorted_paths
def glob_style_match_path_to_list(path: str, match_paths: List[str]):
"""
Check if given path matches any of the match_paths either via glob style matching or by being nested under - when the match path ends with "/**"
return the match path if there's a match, and None otherwise
"""
# check if any of our ignore paths match the given path
for match_path in match_paths:
# if the path is indicated as a parent via "/**" at the end
if match_path.endswith("/**"):
# check if the path is under the parent
if path.startswith(match_path[:-3]):
return match_path
# otherwise check for simple (non-recursive glob matching)
else:
path_object = Path(path)
if path_object.match(match_path):
return match_path
# if no match - this path shouldn't be ignored
return None
The provided code snippet includes necessary dependencies for implementing the `pubsub_topics_from_directories` function. Write a Python function `def pubsub_topics_from_directories(dirs: List[str]) -> List[str]` to solve the following problem:
converts a list of directories on the policy repository that the client wants to subscribe to into a list of topics. this method also ensures the client only subscribes to non- intersecting directories by dedupping directories that are descendents of one another.
Here is the function:
def pubsub_topics_from_directories(dirs: List[str]) -> List[str]:
"""converts a list of directories on the policy repository that the client
wants to subscribe to into a list of topics.
this method also ensures the client only subscribes to non-
intersecting directories by dedupping directories that are
descendents of one another.
"""
policy_directories = PathUtils.non_intersecting_directories([Path(d) for d in dirs])
return policy_topics(policy_directories) | converts a list of directories on the policy repository that the client wants to subscribe to into a list of topics. this method also ensures the client only subscribes to non- intersecting directories by dedupping directories that are descendents of one another. |
156,183 | import logging
import sys
from loguru import logger
from .config import opal_common_config
from .logging.filter import ModuleFilter
from .logging.formatter import Formatter
from .logging.intercept import InterceptHandler
from .logging.thirdparty import hijack_uvicorn_logs
from .monitoring.apm import fix_ddtrace_logging
opal_common_config = OpalCommonConfig(prefix="OPAL_")
class ModuleFilter:
"""filter logs by module name."""
def __init__(
self, exclude_list: List[str] = None, include_list: List[str] = None
) -> None:
"""[summary]
Args:
exclude_list (List[str], optional): module name (prefixes) to reject. Defaults to [].
include_list (List[str], optional): module name (prefixes) to include (even if higher form is excluded). Defaults to [].
Usage:
ModuleFilter(["uvicorn"]) # exclude all logs coming from module name starting with "uvicorn"
ModuleFilter(["uvicorn"], ["uvicorn.access]) # exclude all logs coming from module name starting with "uvicorn" except ones starting with "uvicorn.access")
"""
self._exclude_list = exclude_list or []
self._include_list = include_list or []
def filter(self, record):
name: str = record["name"]
for module in self._include_list:
if name.startswith(module):
return True
for module in self._exclude_list:
if name.startswith(module):
return False
return True
class Formatter:
MAX_FIELD_LEN = 25
def __init__(self, format_string: str):
self.fmt = format_string
def limit_len(self, record, field, length=MAX_FIELD_LEN):
# Shorten field content
content = record[field]
if len(content) > length:
parts = content.split(".")
if len(parts) > 2:
content = f"{parts[0]}...{parts[-1]}"
if len(content) > length:
content = f"{content[:length-3]}..."
record[field] = content
def format(self, record):
self.limit_len(record, "name", 40)
return self.fmt
class InterceptHandler(logging.Handler):
def emit(self, record):
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__:
if frame.f_back is None:
break
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage()
)
def hijack_uvicorn_logs(intercept_handler: logging.Handler):
"""Uvicorn loggers are configured to use special handlers.
Adding an intercept handler to the root logger manages to intercept logs from uvicorn, however, the log messages are duplicated.
This is happening because uvicorn loggers are propagated by default - we get a log message once for the "uvicorn" / "uvicorn.error"
logger and once for the root logger). Another stupid issue is that the "uvicorn.error" logger is not just for errors, which is confusing.
This method is doing 2 things for each uvicorn logger:
1) remove all existing handlers and replace them with the intercept handler (i.e: will be logged via loguru)
2) cancel propagation - which will mean messages will not propagate to the root logger (which also has an InterceptHandler), fixing the duplication
"""
# get loggers directly from uvicorn config - if they will change something - we will know.
from uvicorn.config import LOGGING_CONFIG
uvicorn_logger_names = list(LOGGING_CONFIG.get("loggers", {}).keys()) or [
"uvicorn",
"uvicorn.access",
"uvicorn.error",
]
for logger_name in uvicorn_logger_names:
logger = logging.getLogger(logger_name)
logger.handlers = [intercept_handler]
logger.propagate = False
def fix_ddtrace_logging():
logging.getLogger("ddtrace").setLevel(logging.WARNING)
ddtrace_logger = logging.getLogger("ddtrace")
for handler in ddtrace_logger.handlers:
ddtrace_logger.removeHandler(handler)
The provided code snippet includes necessary dependencies for implementing the `configure_logs` function. Write a Python function `def configure_logs()` to solve the following problem:
Takeover process logs and create a logger with Loguru according to the configuration.
Here is the function:
def configure_logs():
"""Takeover process logs and create a logger with Loguru according to the
configuration."""
fix_ddtrace_logging()
intercept_handler = InterceptHandler()
formatter = Formatter(opal_common_config.LOG_FORMAT)
filter = ModuleFilter(
include_list=opal_common_config.LOG_MODULE_INCLUDE_LIST,
exclude_list=opal_common_config.LOG_MODULE_EXCLUDE_LIST,
)
logging.basicConfig(handlers=[intercept_handler], level=0, force=True)
if opal_common_config.LOG_PATCH_UVICORN_LOGS:
# Monkey patch UVICORN to use our logger
hijack_uvicorn_logs(intercept_handler)
# Clean slate
logger.remove()
# Logger configuration
logger.add(
sys.stderr,
filter=filter.filter,
format=formatter.format,
level=opal_common_config.LOG_LEVEL,
backtrace=opal_common_config.LOG_TRACEBACK,
diagnose=opal_common_config.LOG_DIAGNOSE,
colorize=opal_common_config.LOG_COLORIZE,
serialize=opal_common_config.LOG_SERIALIZE,
)
# log to a file
if opal_common_config.LOG_TO_FILE:
logger.add(
opal_common_config.LOG_FILE_PATH,
compression=opal_common_config.LOG_FILE_COMPRESSION,
retention=opal_common_config.LOG_FILE_RETENTION,
rotation=opal_common_config.LOG_FILE_ROTATION,
serialize=opal_common_config.LOG_FILE_SERIALIZE,
level=opal_common_config.LOG_FILE_LEVEL,
) | Takeover process logs and create a logger with Loguru according to the configuration. |
156,184 | import logging
import sys
from loguru import logger
from .config import opal_common_config
from .logging.filter import ModuleFilter
from .logging.formatter import Formatter
from .logging.intercept import InterceptHandler
from .logging.thirdparty import hijack_uvicorn_logs
from .monitoring.apm import fix_ddtrace_logging
The provided code snippet includes necessary dependencies for implementing the `get_logger` function. Write a Python function `def get_logger(name="")` to solve the following problem:
backward compatibility to old get_logger.
Here is the function:
def get_logger(name=""):
"""backward compatibility to old get_logger."""
return logger | backward compatibility to old get_logger. |
156,185 | import collections
import glob
import inspect
import os
import sys
class PyFrame(object):
def __init__(self):
self._frame = inspect.currentframe()
def __enter__(self):
return self._frame.f_back
def __exit__(self, exc_type, exc_value, traceback):
del self._frame
The provided code snippet includes necessary dependencies for implementing the `get_caller_module` function. Write a Python function `def get_caller_module(depth=0)` to solve the following problem:
:param depth: stack depth of the caller. 0 == yourself, 1 == your parent :return: the module object of the caller function (in set stack depth)
Here is the function:
def get_caller_module(depth=0):
"""
:param depth: stack depth of the caller. 0 == yourself, 1 == your parent
:return: the module object of the caller function (in set stack depth)
"""
with PyFrame() as frame:
for i in range(0, depth):
frame = frame.f_back
return sys.modules[frame.f_globals["__name__"]] | :param depth: stack depth of the caller. 0 == yourself, 1 == your parent :return: the module object of the caller function (in set stack depth) |
156,186 | import collections
import glob
import inspect
import os
import sys
class PyFrame(object):
def __init__(self):
self._frame = inspect.currentframe()
def __enter__(self):
return self._frame.f_back
def __exit__(self, exc_type, exc_value, traceback):
del self._frame
def co_to_dict(co):
return {
"co_argcount": co.co_argcount,
"co_nlocals": co.co_nlocals,
"co_stacksize": co.co_stacksize,
"co_flags": co.co_flags,
"co_consts": co.co_consts,
"co_names": co.co_names,
"co_varnames": co.co_varnames,
"co_filename": co.co_filename,
"co_name": co.co_name,
"co_firstlineno": co.co_firstlineno,
"co_lnotab": co.co_lnotab,
}
The provided code snippet includes necessary dependencies for implementing the `get_caller` function. Write a Python function `def get_caller(depth=0)` to solve the following problem:
:param depth: stack depth of the caller. 0 == yourself, 1 == your parent :return: the frame object of the caller function (in set stack depth)
Here is the function:
def get_caller(depth=0):
"""
:param depth: stack depth of the caller. 0 == yourself, 1 == your parent
:return: the frame object of the caller function (in set stack depth)
"""
with PyFrame() as frame:
for i in range(0, depth):
frame = frame.f_back
return co_to_dict(frame.f_code) | :param depth: stack depth of the caller. 0 == yourself, 1 == your parent :return: the frame object of the caller function (in set stack depth) |
156,187 | import collections
import glob
import inspect
import os
import sys
class ObjectUtils(object):
def is_derived_of(obj, possible_parent_class):
if hasattr(obj, "__bases__"):
return possible_parent_class in inspect.getmro(obj)
else:
return False
def get_properties(obj):
def filter(x):
return not isinstance(x, collections.Callable)
return {
k: v for k, v in inspect.getmembers(obj, filter) if not k.startswith("__")
}
def get_members_who_are_instance_of(obj, class_type):
def filter(x):
return isinstance(x, class_type)
return inspect.getmembers(obj, filter)
def get_class_members_who_derive_of(cls, obj, parent_class):
def filter(x):
return (
inspect.isclass(x)
and cls.is_derived_of(x, parent_class)
and list(inspect.getmro(x)).index(parent_class) != 0
)
return inspect.getmembers(obj, filter)
class Emport(object):
def __init__(self, module, members):
self.__original__ = module
self._members = []
for member in members:
self._members.append(member[1])
setattr(self, member[0], member[1])
def get_original_module(self):
return self.__original__
def get_members_list(self):
return self._members
def get_flat_list(self):
"""
:return: all the members of this Emport (And submodules) as one list
"""
res = []
for member in self._members:
# if a member is an Emport itself flatten it as well
if isinstance(member, Emport):
res += member.get_flat_list()
else:
res.append(member)
return res
def __repr__(self):
return "EMPORT - %s" % self.__original__
The provided code snippet includes necessary dependencies for implementing the `emport_by_class` function. Write a Python function `def emport_by_class(from_path, cls, import_items=None)` to solve the following problem:
Wrap __import__ to import modules and filter only classes deriving from the given cls. :param from_path: dot separated package path :param cls: class to filter import contents by :param import_items: the items to import form the package path (can also be ['*']) :return: an Emport object with contents filtered according to given cls
Here is the function:
def emport_by_class(from_path, cls, import_items=None):
"""Wrap __import__ to import modules and filter only classes deriving from
the given cls.
:param from_path: dot separated package path
:param cls: class to filter import contents by
:param import_items: the items to import form the package path (can also be ['*'])
:return: an Emport object with contents filtered according to given cls
"""
import_items = import_items or ["*"]
module_obj = __import__(from_path, globals(), locals(), import_items, 0)
clean_items = ObjectUtils.get_class_members_who_derive_of(module_obj, cls)
for sub_name, sub_module in ObjectUtils.get_members_who_are_instance_of(
module_obj, module_obj.__class__
):
results = ObjectUtils.get_class_members_who_derive_of(sub_module, cls)
# Keep only modules with sub values
if len(results) > 0:
clean_sub_module = Emport(sub_module, results)
clean_items.append((sub_name, clean_sub_module))
clean_module = Emport(module_obj, clean_items)
return clean_module | Wrap __import__ to import modules and filter only classes deriving from the given cls. :param from_path: dot separated package path :param cls: class to filter import contents by :param import_items: the items to import form the package path (can also be ['*']) :return: an Emport object with contents filtered according to given cls |
156,188 | import collections
import glob
import inspect
import os
import sys
class ObjectUtils(object):
def is_derived_of(obj, possible_parent_class):
if hasattr(obj, "__bases__"):
return possible_parent_class in inspect.getmro(obj)
else:
return False
def get_properties(obj):
def filter(x):
return not isinstance(x, collections.Callable)
return {
k: v for k, v in inspect.getmembers(obj, filter) if not k.startswith("__")
}
def get_members_who_are_instance_of(obj, class_type):
def filter(x):
return isinstance(x, class_type)
return inspect.getmembers(obj, filter)
def get_class_members_who_derive_of(cls, obj, parent_class):
def filter(x):
return (
inspect.isclass(x)
and cls.is_derived_of(x, parent_class)
and list(inspect.getmro(x)).index(parent_class) != 0
)
return inspect.getmembers(obj, filter)
The provided code snippet includes necessary dependencies for implementing the `emport_objects_by_class` function. Write a Python function `def emport_objects_by_class(from_path, cls, import_items=None)` to solve the following problem:
Wrap __import__ to import modules and filter only classes deriving from the given cls Return a flat list of objects without the modules themselves. :param from_path: dot separated package path :param cls: class to filter import contents by :param import_items: the items to import form the package path (can also be ['*']) :return: an Emport object with contents filtered according to given cls
Here is the function:
def emport_objects_by_class(from_path, cls, import_items=None):
"""Wrap __import__ to import modules and filter only classes deriving from
the given cls Return a flat list of objects without the modules themselves.
:param from_path: dot separated package path
:param cls: class to filter import contents by
:param import_items: the items to import form the package path (can also be ['*'])
:return: an Emport object with contents filtered according to given cls
"""
results = []
import_items = import_items or ["*"]
module_obj = __import__(from_path, globals(), locals(), import_items, 0)
# direct objects
clean_items = ObjectUtils.get_class_members_who_derive_of(module_obj, cls)
results.extend(clean_items)
# nested
for sub_name, sub_module in ObjectUtils.get_members_who_are_instance_of(
module_obj, module_obj.__class__
):
objects = ObjectUtils.get_class_members_who_derive_of(sub_module, cls)
results.extend(objects)
return results | Wrap __import__ to import modules and filter only classes deriving from the given cls Return a flat list of objects without the modules themselves. :param from_path: dot separated package path :param cls: class to filter import contents by :param import_items: the items to import form the package path (can also be ['*']) :return: an Emport object with contents filtered according to given cls |
156,189 | import collections
import glob
import inspect
import os
import sys
The provided code snippet includes necessary dependencies for implementing the `dynamic_all` function. Write a Python function `def dynamic_all(init_file_path)` to solve the following problem:
return a list of all the py files in a dir usage (in __init__.py file) : from emport import dynamic_all __all__ = dynamic_all(__file__)
Here is the function:
def dynamic_all(init_file_path):
"""return a list of all the py files in a dir usage (in __init__.py file) :
from emport import dynamic_all
__all__ = dynamic_all(__file__)
"""
modules = glob.glob(os.path.join(os.path.dirname(init_file_path), "*.py*"))
target_modules = set([])
for module in modules:
name = os.path.splitext(os.path.basename(module))[0]
if os.path.isfile(module) and not name.startswith("_"):
target_modules.add(name)
return list(target_modules) | return a list of all the py files in a dir usage (in __init__.py file) : from emport import dynamic_all __all__ = dynamic_all(__file__) |
156,190 | import os
from types import SimpleNamespace
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
def get_package_metadata():
package_metadata = {}
with open(os.path.join(here, "../__packaging__.py")) as f:
exec(f.read(), package_metadata)
return SimpleNamespace(**package_metadata) | null |
156,194 | import os
import sys
import typer
from click.core import Context
from fastapi.applications import FastAPI
from typer.main import Typer
from opal_common.cli.docs import MainTexts
from opal_common.cli.typer_app import get_typer_app
from opal_common.config import opal_common_config
from opal_common.corn_utils import run_gunicorn, run_uvicorn
from opal_server.config import opal_server_config
app = get_typer_app()
def run_gunicorn(app, number_of_workers=None, host=None, port=None, **kwargs):
options = {
"bind": "%s:%s" % (host or "127.0.0.1", port or "8080"),
"workers": number_of_workers or calc_default_number_of_workers(),
"worker_class": "uvicorn.workers.UvicornWorker",
}
options.update(kwargs)
GunicornApp(app, options).run()
def run_uvicorn(
app_path, number_of_workers=None, host=None, port=None, reload=False, **kwargs
):
options = {
"host": host or "127.0.0.1",
"port": port or "8080",
"reload": reload,
"workers": number_of_workers or calc_default_number_of_workers(),
}
options.update(kwargs)
import uvicorn
uvicorn.run(app_path, **options)
opal_server_config = OpalServerConfig(prefix="OPAL_")
app = create_app()
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run(engine_type: str = typer.Option("uvicron", help="uvicorn or gunicorn"))` to solve the following problem:
Run the server as a daemon.
Here is the function:
def run(engine_type: str = typer.Option("uvicron", help="uvicorn or gunicorn")):
"""Run the server as a daemon."""
typer.echo(f"-- Starting OPAL Server (with {engine_type}) --")
if engine_type == "gunicorn":
app: FastAPI
from opal_server.main import app
run_gunicorn(
app,
opal_server_config.SERVER_WORKER_COUNT,
host=opal_server_config.SERVER_HOST,
port=opal_server_config.SERVER_BIND_PORT,
)
else:
run_uvicorn(
"opal_server.main:app",
workers=opal_server_config.SERVER_WORKER_COUNT,
host=opal_server_config.SERVER_HOST,
port=opal_server_config.SERVER_BIND_PORT,
) | Run the server as a daemon. |
156,195 | import os
import sys
import typer
from click.core import Context
from fastapi.applications import FastAPI
from typer.main import Typer
from opal_common.cli.docs import MainTexts
from opal_common.cli.typer_app import get_typer_app
from opal_common.config import opal_common_config
from opal_common.corn_utils import run_gunicorn, run_uvicorn
from opal_server.config import opal_server_config
opal_common_config = OpalCommonConfig(prefix="OPAL_")
opal_server_config = OpalServerConfig(prefix="OPAL_")
The provided code snippet includes necessary dependencies for implementing the `print_config` function. Write a Python function `def print_config()` to solve the following problem:
To test config values, print the configuration parsed from ENV and CMD.
Here is the function:
def print_config():
"""To test config values, print the configuration parsed from ENV and
CMD."""
typer.echo("Printing configuration values")
typer.echo(str(opal_server_config))
typer.echo(str(opal_common_config)) | To test config values, print the configuration parsed from ENV and CMD. |
156,196 | import os
import sys
import typer
from click.core import Context
from fastapi.applications import FastAPI
from typer.main import Typer
from opal_common.cli.docs import MainTexts
from opal_common.cli.typer_app import get_typer_app
from opal_common.config import opal_common_config
from opal_common.corn_utils import run_gunicorn, run_uvicorn
from opal_server.config import opal_server_config
app = get_typer_app()
class MainTexts:
def __init__(self, first_line, name):
self.header = f"""\b
{first_line}
Open-Policy Administration Layer - {name}\b\f"""
self.docs = f"""\b
Config top level options:
- Use env-vars (same as cmd options) but uppercase
and with "_" instead of "-"; all prefixed with "OPAL_"
- Use command line options as detailed by '--help'
- Use .env or .ini files
\b
Examples:
- opal-{name} --help Detailed help on CLI
- opal-{name} run --help Help on run command
- opal-{name} run --engine-type gunicorn Run {name} with gunicorn
\b
"""
opal_common_config = OpalCommonConfig(prefix="OPAL_")
opal_server_config = OpalServerConfig(prefix="OPAL_")
app = create_app()
def cli():
main_texts = MainTexts("💎 OPAL-SERVER 💎", "server")
def on_start(ctx: Context, **kwargs):
if ctx.invoked_subcommand is None or ctx.invoked_subcommand == "run":
typer.secho(main_texts.header, bold=True, fg=typer.colors.MAGENTA)
if ctx.invoked_subcommand is None:
typer.echo(ctx.get_usage())
typer.echo(main_texts.docs)
opal_server_config.cli(
[opal_common_config], typer_app=app, help=main_texts.docs, on_start=on_start
) | null |
156,197 | from functools import partial
from typing import Any, List, Optional
from fastapi_websocket_pubsub.pub_sub_server import PubSubEndpoint
from opal_common.confi.confi import load_conf_if_none
from opal_common.git.repo_cloner import RepoClonePathFinder
from opal_common.logger import logger
from opal_common.sources.api_policy_source import ApiPolicySource
from opal_common.sources.git_policy_source import GitPolicySource
from opal_common.topics.publisher import TopicPublisher
from opal_server.config import PolicySourceTypes, opal_server_config
from opal_server.policy.watcher.callbacks import publish_changed_directories
from opal_server.policy.watcher.task import BasePolicyWatcherTask, PolicyWatcherTask
from opal_server.scopes.task import ScopesPolicyWatcherTask
def load_conf_if_none(variable, conf):
if variable is None:
return conf
else:
return variable
class RepoClonePathFinder:
"""
We are cloning the policy repo into a unique random subdirectory of a base path.
Args:
base_clone_path (str): parent directory for the repoistory clone
clone_subdirectory_prefix (str): the prefix for the randomized repository dir, or the dir name itself when `use_fixes_path=true`
use_fixed_path (bool): if set, random suffix won't be added to `clone_subdirectory_prefix` (if the path already exists, it would be reused)
This class knows how to such clones, so we can discard previous ones, but also so
that siblings workers (who are not the master who decided where to clone) can also
find the current clone by globing the base dir.
"""
def __init__(
self, base_clone_path: str, clone_subdirectory_prefix: str, use_fixed_path: bool
):
if not base_clone_path:
raise ValueError("base_clone_path cannot be empty!")
if not clone_subdirectory_prefix:
raise ValueError("clone_subdirectory_prefix cannot be empty!")
self._base_clone_path = os.path.expanduser(base_clone_path)
self._clone_subdirectory_prefix = clone_subdirectory_prefix
self._use_fixed_path = use_fixed_path
def _get_randomized_clone_subdirectories(self) -> Generator[str, None, None]:
"""a generator yielding all the randomized subdirectories of the base
clone path that are matching the clone pattern.
Yields:
the next subdirectory matching the pattern
"""
folders_with_pattern = get_filepaths_with_glob(
self._base_clone_path, f"{self._clone_subdirectory_prefix}-*"
)
for folder in folders_with_pattern:
yield folder
def _get_single_existing_random_clone_path(self) -> Optional[str]:
"""searches for the single randomly-suffixed clone subdirectory in
existence.
If found no such subdirectory or if found more than one (multiple matching subdirectories) - will return None.
otherwise: will return the single and only clone.
"""
subdirectories = list(self._get_randomized_clone_subdirectories())
if len(subdirectories) != 1:
return None
return subdirectories[0]
def _generate_randomized_clone_path(self) -> str:
folder_name = f"{self._clone_subdirectory_prefix}-{uuid.uuid4().hex}"
full_local_repo_path = os.path.join(self._base_clone_path, folder_name)
return full_local_repo_path
def _get_fixed_clone_path(self) -> str:
return os.path.join(self._base_clone_path, self._clone_subdirectory_prefix)
def get_clone_path(self) -> Optional[str]:
"""Get the clone path (fixed or randomized) if it exists."""
if self._use_fixed_path:
fixed_path = self._get_fixed_clone_path()
if os.path.exists(fixed_path):
return fixed_path
else:
return None
else:
return self._get_single_existing_random_clone_path()
def create_new_clone_path(self) -> str:
"""
If using a fixed path - simply creates it.
If using a randomized suffix -
takes the base path from server config and create new folder with unique name for the local clone.
The folder name is looks like /<base-path>/<folder-prefix>-<uuid>
If such folders already exist they would be removed.
"""
if self._use_fixed_path:
# When using fixed path - just use old path without cleanup
full_local_repo_path = self._get_fixed_clone_path()
else:
# Remove old randomized subdirectories
for folder in self._get_randomized_clone_subdirectories():
logger.warning(
"Found previous policy repo clone: {folder_name}, removing it to avoid conflicts.",
folder_name=folder,
)
shutil.rmtree(folder)
full_local_repo_path = self._generate_randomized_clone_path()
os.makedirs(full_local_repo_path, exist_ok=True)
return full_local_repo_path
class ApiPolicySource(BasePolicySource):
"""Watches an OPA-like bundle server for changes and can trigger callbacks
when detecting a new bundle.
Checking for changes is done by sending an HTTP GET request to the remote bundle server.
OPAL will check for changes either when triggered a webhook or periodically if configured
to run a polling task.
You can read more on OPA bundles here:
https://www.openpolicyagent.org/docs/latest/management-bundles/
Args:
remote_source_url(str): the base address to request the policy from
local_clone_path(str): path for the local git to manage policies
polling_interval(int): how many seconds need to wait between polling
token (str, optional): auth token to include in connections to bundle server. Defaults to POLICY_BUNDLE_SERVER_TOKEN.
token_id (str, optional): auth token ID to include in connections to bundle server. Defaults to POLICY_BUNDLE_SERVER_TOKEN_ID.
bundle_server_type (PolicyBundleServerType, optional): the type of bundle server
"""
def __init__(
self,
remote_source_url: str,
local_clone_path: str,
polling_interval: int = 0,
token: Optional[str] = None,
token_id: Optional[str] = None,
bundle_server_type: Optional[PolicyBundleServerType] = None,
policy_bundle_path=".",
policy_bundle_git_add_pattern="*",
):
super().__init__(
remote_source_url=remote_source_url,
local_clone_path=local_clone_path,
polling_interval=polling_interval,
)
self.token = token
self.token_id = token_id
self.server_type = bundle_server_type
self.bundle_hash = None
self.etag = None
self.tmp_bundle_path = Path(policy_bundle_path)
self.policy_bundle_git_add_pattern = policy_bundle_git_add_pattern
self.tar_to_git = TarFileToLocalGitExtractor(
self.local_clone_path,
self.tmp_bundle_path,
self.policy_bundle_git_add_pattern,
)
async def get_initial_policy_state_from_remote(self):
"""init remote data to local repo."""
async for attempt in AsyncRetrying(wait=wait_fixed(5)):
with attempt:
try:
await self.fetch_policy_bundle_from_api_source(
self.remote_source_url, self.token
)
self.local_git = self.tar_to_git.create_local_git()
except Exception:
logger.exception(
"Failed to load initial policy from remote API bundle server"
)
raise
async def api_update_policy(self) -> Tuple[bool, str, str]:
async for attempt in AsyncRetrying(wait=wait_fixed(5)):
with attempt:
try:
(
tmp_bundle_path,
prev_version,
current_hash,
) = await self.fetch_policy_bundle_from_api_source(
self.remote_source_url, self.token
)
if tmp_bundle_path and prev_version and current_hash:
commit_msg = f"new version {current_hash}"
(
self.local_git,
prev_commit,
new_commit,
) = self.tar_to_git.extract_bundle_to_local_git(
commit_msg=commit_msg
)
return (
True,
prev_version,
current_hash,
prev_commit,
new_commit,
)
else:
return False, None, current_hash, None, None
except Exception as e:
logger.exception(
f"Failed to update policy from remote API bundle server"
)
raise
def build_auth_headers(self, token=None, path=None):
# if it's a simple HTTP server with a bearer token
if self.server_type == PolicyBundleServerType.HTTP and token is not None:
return tuple_to_dict(get_authorization_header(token))
# if it's an AWS s3 server and we have the token and it's id -
elif (
self.server_type == PolicyBundleServerType.AWS_S3
and token is not None
and self.token_id is not None
):
split_url = urlparse(self.remote_source_url)
host = split_url.netloc
path = split_url.path + "/" + path
return build_aws_rest_auth_headers(self.token_id, token, host, path)
else:
return {}
async def fetch_policy_bundle_from_api_source(
self, url: str, token: Optional[str]
) -> Tuple[Path, BundleHash, BundleHash]:
"""Fetches the bundle. May throw, in which case we retry again. Checks
that the bundle file isn't the same with Etag, if server doesn't have
Etag it checks it with hash on the bundle file.
Read more on Etag here:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag
Args:
url(str): the base address to request the bundle.tar.gz file from
token (str, optional): Auth token to include in connections to OPAL server. Defaults to POLICY_BUNDLE_SERVER_TOKEN.
Returns:
Path: path to the bundle file that we just downloaded from the remote API source
BundleHash: previous bundle hash on None if this is the initial bundle file
BundleHash: current bundle hash
"""
path = "bundle.tar.gz"
auth_headers = self.build_auth_headers(token=token, path=path)
etag_headers = (
{"ETag": self.etag, "If-None-Match": self.etag} if self.etag else {}
)
full_url = f"{url}/{path}"
async with aiohttp.ClientSession() as session:
try:
async with session.get(
f"{full_url}",
headers={
"content-type": "application/gzip",
**auth_headers,
**etag_headers,
},
) as response:
if response.status == status.HTTP_404_NOT_FOUND:
logger.warning(
"requested url not found: {full_url}",
full_url=full_url,
)
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"requested url not found: {full_url}",
)
if response.status == status.HTTP_304_NOT_MODIFIED:
logger.info(
"Not modified at: {now}",
now=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
)
return False, None, self.etag
# may throw ValueError
await throw_if_bad_status_code(
response, expected=[status.HTTP_200_OK], logger=logger
)
current_etag = response.headers.get("ETag", None)
response_bytes = await response.read()
tmp_file_path = self.tmp_bundle_path
with open(tmp_file_path, "wb") as file:
file.write(response_bytes)
if not current_etag:
logger.info(
"Etag is turned off, you may want to turn it on at your bundle server"
)
current_bundle_hash = hash_file(tmp_file_path)
logger.info("Bundle hash is {hash}", hash=current_bundle_hash)
if self.bundle_hash == current_bundle_hash:
logger.info(
"No new bundle, hash is: {hash}",
hash=current_bundle_hash,
)
return False, None, current_bundle_hash
else:
logger.info(
"New bundle found, hash is: {hash}",
hash=current_bundle_hash,
)
prev_bundle_hash = self.bundle_hash
self.bundle_hash = current_bundle_hash
return (
tmp_file_path,
prev_bundle_hash,
current_bundle_hash,
)
else:
if (
self.etag == current_etag
): # validate against bad etag implementation
logger.info(
"No new bundle, hash is: {hash}",
hash=current_etag,
)
return False, None, current_etag
prev_etag = self.etag
self.etag = current_etag
return tmp_file_path, prev_etag, current_etag
except (aiohttp.ClientError, HTTPException) as e:
logger.warning("server connection error: {err}", err=repr(e))
raise
except Exception as e:
logger.error("unexpected server connection error: {err}", err=repr(e))
raise
async def check_for_changes(self):
"""Calling this method will trigger an api check to the remote.
If after the request the watcher detects new bundle, it will
call the callbacks registered with _on_new_policy().
"""
logger.info(
"Fetching changes from remote: '{remote}'",
remote=self.remote_source_url,
)
(
has_changes,
prev,
latest,
prev_commit,
new_commit,
) = await self.api_update_policy()
if not has_changes:
logger.info("No new version: current hash is: {head}", head=latest)
else:
logger.info(
"Found new version: old version hash was '{prev_head}', new version hash is '{new_head}'",
prev_head=prev,
new_head=latest,
)
await self._on_new_policy(old=prev_commit, new=new_commit)
class GitPolicySource(BasePolicySource):
"""Watches a git repository for changes and can trigger callbacks when
detecting new commits on the tracked branch.
Checking for changes is done following a git pull from a tracked
remote. The pull can be either triggered by a method (i.e: you can
call it from a webhook) or can be triggered periodically by a polling
task.
Args:
remote_source_url(str): the base address to request the policy from
local_clone_path(str): path for the local git to manage policies
branch_name(str): name of remote branch in git to pull, default to master
ssh_key (str, optional): private ssh key used to gain access to the cloned repo
polling_interval(int): how many seconds need to wait between polling
request_timeout(int): how many seconds need to wait until timeout
"""
def __init__(
self,
remote_source_url: str,
local_clone_path: str,
branch_name: str = "master",
ssh_key: Optional[str] = None,
polling_interval: int = 0,
request_timeout: int = 0,
):
super().__init__(
remote_source_url=remote_source_url,
local_clone_path=local_clone_path,
polling_interval=polling_interval,
)
self._ssh_key = ssh_key
self._cloner = RepoCloner(
remote_source_url,
local_clone_path,
branch_name=branch_name,
ssh_key=self._ssh_key,
clone_timeout=request_timeout,
)
self._branch_name = branch_name
self._tracker = None
async def get_initial_policy_state_from_remote(self):
"""init remote data to local repo."""
try:
try:
# Check if path already contains valid repo
repo = Repo(self._cloner.path)
except:
# If it doesn't - clone it
result = await self._cloner.clone()
repo = result.repo
else:
# If it does - validate remote url is correct and checkout required branch
remote_urls = list(repo.remote().urls)
if not self._cloner.url in remote_urls:
# Don't bother with remove and reclone because this case shouldn't happen on reasobable usage
raise GitFailed(
RuntimeError(
f"Existing repo has wrong remote url: {remote_urls}"
)
)
else:
logger.info(
"SKIPPED cloning policy repo, found existing repo at '{path}' with remotes: {remote_urls})",
path=self._cloner.path,
remote_urls=remote_urls,
)
except GitFailed as e:
await self._on_git_failed(e)
return
self._tracker = BranchTracker(
repo=repo, branch_name=self._branch_name, ssh_key=self._ssh_key
)
async def check_for_changes(self):
"""Calling this method will trigger a git pull from the tracked remote.
If after the pull the watcher detects new commits, it will call
the callbacks registered with _on_new_policy().
"""
logger.info(
"Pulling changes from remote: '{remote}'",
remote=self._tracker.tracked_remote.name,
)
has_changes, prev, latest = self._tracker.pull()
if not has_changes:
logger.info("No new commits: HEAD is at '{head}'", head=latest.hexsha)
else:
logger.info(
"Found new commits: old HEAD was '{prev_head}', new HEAD is '{new_head}'",
prev_head=prev.hexsha,
new_head=latest.hexsha,
)
await self._on_new_policy(old=prev, new=latest)
class TopicPublisher:
"""abstract publisher, base class for client side and server side
publisher."""
def __init__(self):
"""inits the publisher's asyncio tasks list."""
self._tasks: Set[asyncio.Task] = set()
self._tasks_lock = asyncio.Lock()
async def publish(self, topics: TopicList, data: Any = None):
raise NotImplementedError()
async def __aenter__(self):
self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
def start(self):
"""starts the publisher."""
logger.debug("started topic publisher")
async def _add_task(self, task: asyncio.Task):
async with self._tasks_lock:
self._tasks.add(task)
task.add_done_callback(self._cleanup_task)
async def wait(self):
async with self._tasks_lock:
await asyncio.gather(*self._tasks, return_exceptions=True)
self._tasks.clear()
async def stop(self):
"""stops the publisher (cancels any running publishing tasks)"""
logger.debug("stopping topic publisher")
await self.wait()
def _cleanup_task(self, task: asyncio.Task):
try:
self._tasks.remove(task)
except KeyError:
...
class PolicySourceTypes(str, Enum):
Git = "GIT"
Api = "API"
opal_server_config = OpalServerConfig(prefix="OPAL_")
async def publish_changed_directories(
old_commit: Commit,
new_commit: Commit,
publisher: TopicPublisher,
file_extensions: Optional[List[str]] = None,
bundle_ignore: Optional[List[str]] = None,
):
"""publishes policy topics matching all relevant directories in tracked
repo, prompting the client to ask for *all* contents of these directories
(and not just diffs)."""
notification = await create_policy_update(
old_commit, new_commit, file_extensions, bundle_ignore
)
if notification:
async with publisher:
await publisher.publish(
topics=notification.topics, data=notification.update.dict()
)
class BasePolicyWatcherTask:
"""Manages the asyncio tasks of the policy watcher."""
def __init__(self, pubsub_endpoint: PubSubEndpoint):
self._tasks: List[asyncio.Task] = []
self._should_stop: Optional[asyncio.Event] = None
self._pubsub_endpoint = pubsub_endpoint
self._webhook_tasks: List[asyncio.Task] = []
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
async def _on_webhook(self, topic: Topic, data: Any):
logger.info(f"Webhook listener triggered ({len(self._webhook_tasks)})")
for task in self._webhook_tasks:
if task.done():
# Clean references to finished tasks
self._webhook_tasks.remove(task)
self._webhook_tasks.append(asyncio.create_task(self.trigger(topic, data)))
async def _listen_to_webhook_notifications(self):
# Webhook api route can be hit randomly in all workers, so it publishes a message to the webhook topic.
# This listener, running in the leader's context, would actually trigger the repo pull
async def _subscribe_internal():
logger.info(
"listening on webhook topic: '{topic}'",
topic=opal_server_config.POLICY_REPO_WEBHOOK_TOPIC,
)
await self._pubsub_endpoint.subscribe(
[opal_server_config.POLICY_REPO_WEBHOOK_TOPIC],
self._on_webhook,
)
if self._pubsub_endpoint.broadcaster is not None:
async with self._pubsub_endpoint.broadcaster.get_listening_context():
await _subscribe_internal()
await self._pubsub_endpoint.broadcaster.get_reader_task()
# Stop the watcher if broadcaster disconnects
self.signal_stop()
else:
# If no broadcaster is configured, just subscribe, no need to wait on anything
await _subscribe_internal()
async def start(self):
"""starts the policy watcher and registers a failure callback to
terminate gracefully."""
logger.info("Launching policy watcher")
self._tasks.append(asyncio.create_task(self._listen_to_webhook_notifications()))
self._init_should_stop()
async def stop(self):
"""stops all policy watcher tasks."""
logger.info("Stopping policy watcher")
for task in self._tasks + self._webhook_tasks:
if not task.done():
task.cancel()
await asyncio.gather(*self._tasks, return_exceptions=True)
async def trigger(self, topic: Topic, data: Any):
"""triggers the policy watcher from outside to check for changes (git
pull)"""
raise NotImplementedError()
def wait_until_should_stop(self) -> Coroutine:
"""waits until self.signal_stop() is called on the watcher.
allows us to keep the repo watcher context alive until signalled
to stop from outside.
"""
self._init_should_stop()
return self._should_stop.wait()
def signal_stop(self):
"""signal the repo watcher it should stop."""
self._init_should_stop()
self._should_stop.set()
def _init_should_stop(self):
if self._should_stop is None:
self._should_stop = asyncio.Event()
async def _fail(self, exc: Exception):
"""called when the watcher fails, and stops all tasks gracefully."""
logger.error("policy watcher failed with exception: {err}", err=repr(exc))
self.signal_stop()
# trigger uvicorn graceful shutdown
os.kill(os.getpid(), signal.SIGTERM)
class PolicyWatcherTask(BasePolicyWatcherTask):
def __init__(self, policy_source: BasePolicySource, *args, **kwargs):
self._watcher = policy_source
super().__init__(*args, **kwargs)
async def start(self):
await super().start()
self._watcher.add_on_failure_callback(self._fail)
self._tasks.append(asyncio.create_task(self._watcher.run()))
async def stop(self):
await self._watcher.stop()
return await super().stop()
async def trigger(self, topic: Topic, data: Any):
"""triggers the policy watcher from outside to check for changes (git
pull)"""
await self._watcher.check_for_changes()
class ScopesPolicyWatcherTask(BasePolicyWatcherTask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._service = ScopesService(
base_dir=Path(opal_server_config.BASE_DIR),
scopes=ScopeRepository(RedisDB(opal_server_config.REDIS_URL)),
pubsub_endpoint=self._pubsub_endpoint,
)
async def start(self):
await super().start()
self._tasks.append(asyncio.create_task(self._service.sync_scopes()))
if opal_server_config.POLICY_REFRESH_INTERVAL > 0:
self._tasks.append(asyncio.create_task(self._periodic_polling()))
async def stop(self):
return await super().stop()
async def _periodic_polling(self):
try:
while True:
await asyncio.sleep(opal_server_config.POLICY_REFRESH_INTERVAL)
logger.info("Periodic sync")
try:
await self._service.sync_scopes(only_poll_updates=True)
except asyncio.CancelledError:
raise
except Exception as e:
logger.exception(f"Periodic sync (sync_scopes) failed")
except asyncio.CancelledError:
logger.info("Periodic sync cancelled")
async def trigger(self, topic: Topic, data: Any):
if data is not None and isinstance(data, dict):
# Refresh single scope
try:
await self._service.sync_scope(
scope_id=data["scope_id"],
force_fetch=data.get("force_fetch", False),
hinted_hash=data.get("hinted_hash"),
req_time=datetime.datetime.now(),
)
except KeyError:
logger.warning(
"Got invalid keyword args for single scope refresh: %s", data
)
else:
# Refresh all scopes
await self._service.sync_scopes()
def preload_scopes():
"""Clone all scopes repositories as part as server startup.
This speeds up the first sync of scopes after workers are
started.
"""
if opal_server_config.SCOPES:
logger.info("Preloading repo clones for scopes")
service = ScopesService(
base_dir=Path(opal_server_config.BASE_DIR),
scopes=ScopeRepository(RedisDB(opal_server_config.REDIS_URL)),
pubsub_endpoint=None,
)
asyncio.run(service.sync_scopes(notify_on_changes=False))
logger.warning("Finished preloading repo clones for scopes.")
The provided code snippet includes necessary dependencies for implementing the `setup_watcher_task` function. Write a Python function `def setup_watcher_task( publisher: TopicPublisher, pubsub_endpoint: PubSubEndpoint, source_type: str = None, remote_source_url: str = None, clone_path_finder: RepoClonePathFinder = None, branch_name: str = None, ssh_key: Optional[str] = None, polling_interval: int = None, request_timeout: int = None, policy_bundle_token: str = None, policy_bundle_token_id: str = None, policy_bundle_server_type: str = None, extensions: Optional[List[str]] = None, bundle_ignore: Optional[List[str]] = None, ) -> BasePolicyWatcherTask` to solve the following problem:
Create a PolicyWatcherTask with Git / API policy source defined by env vars Load all the defaults from config if called without params. Args: publisher(TopicPublisher): server side publisher to publish changes in policy source_type(str): policy source type, can be Git / Api to opa bundle server remote_source_url(str): the base address to request the policy from clone_path_finder(RepoClonePathFinder): from which the local dir path for the repo clone would be retrieved branch_name(str): name of remote branch in git to pull ssh_key (str, optional): private ssh key used to gain access to the cloned repo polling_interval(int): how many seconds need to wait between polling request_timeout(int): how many seconds need to wait until timeout policy_bundle_token(int): auth token to include in connections to OPAL server. Defaults to POLICY_BUNDLE_SERVER_TOKEN. policy_bundle_token_id(int): id token to include in connections to OPAL server. Defaults to POLICY_BUNDLE_SERVER_TOKEN_ID. policy_bundle_server_type (str): type of policy bundle server (HTTP S3). Defaults to POLICY_BUNDLE_SERVER_TYPE extensions(list(str), optional): list of extantions to check when new policy arrive default is FILTER_FILE_EXTENSIONS bundle_ignore(list(str), optional): list of glob paths to use for excluding files from bundle default is OPA_BUNDLE_IGNORE
Here is the function:
def setup_watcher_task(
publisher: TopicPublisher,
pubsub_endpoint: PubSubEndpoint,
source_type: str = None,
remote_source_url: str = None,
clone_path_finder: RepoClonePathFinder = None,
branch_name: str = None,
ssh_key: Optional[str] = None,
polling_interval: int = None,
request_timeout: int = None,
policy_bundle_token: str = None,
policy_bundle_token_id: str = None,
policy_bundle_server_type: str = None,
extensions: Optional[List[str]] = None,
bundle_ignore: Optional[List[str]] = None,
) -> BasePolicyWatcherTask:
"""Create a PolicyWatcherTask with Git / API policy source defined by env
vars Load all the defaults from config if called without params.
Args:
publisher(TopicPublisher): server side publisher to publish changes in policy
source_type(str): policy source type, can be Git / Api to opa bundle server
remote_source_url(str): the base address to request the policy from
clone_path_finder(RepoClonePathFinder): from which the local dir path for the repo clone would be retrieved
branch_name(str): name of remote branch in git to pull
ssh_key (str, optional): private ssh key used to gain access to the cloned repo
polling_interval(int): how many seconds need to wait between polling
request_timeout(int): how many seconds need to wait until timeout
policy_bundle_token(int): auth token to include in connections to OPAL server. Defaults to POLICY_BUNDLE_SERVER_TOKEN.
policy_bundle_token_id(int): id token to include in connections to OPAL server. Defaults to POLICY_BUNDLE_SERVER_TOKEN_ID.
policy_bundle_server_type (str): type of policy bundle server (HTTP S3). Defaults to POLICY_BUNDLE_SERVER_TYPE
extensions(list(str), optional): list of extantions to check when new policy arrive default is FILTER_FILE_EXTENSIONS
bundle_ignore(list(str), optional): list of glob paths to use for excluding files from bundle default is OPA_BUNDLE_IGNORE
"""
if opal_server_config.SCOPES:
return ScopesPolicyWatcherTask(pubsub_endpoint)
# load defaults
source_type = load_conf_if_none(source_type, opal_server_config.POLICY_SOURCE_TYPE)
clone_path_finder = load_conf_if_none(
clone_path_finder,
RepoClonePathFinder(
base_clone_path=opal_server_config.POLICY_REPO_CLONE_PATH,
clone_subdirectory_prefix=opal_server_config.POLICY_REPO_CLONE_FOLDER_PREFIX,
use_fixed_path=opal_server_config.POLICY_REPO_REUSE_CLONE_PATH,
),
)
clone_path = (
clone_path_finder.get_clone_path() or clone_path_finder.create_new_clone_path()
)
logger.info(f"Policy repo will be cloned to: {clone_path}")
branch_name = load_conf_if_none(
branch_name, opal_server_config.POLICY_REPO_MAIN_BRANCH
)
ssh_key = load_conf_if_none(ssh_key, opal_server_config.POLICY_REPO_SSH_KEY)
polling_interval = load_conf_if_none(
polling_interval, opal_server_config.POLICY_REPO_POLLING_INTERVAL
)
request_timeout = load_conf_if_none(
request_timeout, opal_server_config.POLICY_REPO_CLONE_TIMEOUT
)
policy_bundle_token = load_conf_if_none(
policy_bundle_token, opal_server_config.POLICY_BUNDLE_SERVER_TOKEN
)
extensions = load_conf_if_none(
extensions, opal_server_config.FILTER_FILE_EXTENSIONS
)
bundle_ignore = load_conf_if_none(bundle_ignore, opal_server_config.BUNDLE_IGNORE)
if source_type == PolicySourceTypes.Git:
remote_source_url = load_conf_if_none(
remote_source_url, opal_server_config.POLICY_REPO_URL
)
if remote_source_url is None:
logger.warning(
"POLICY_REPO_URL is unset but repo watcher is enabled! disabling watcher."
)
watcher = GitPolicySource(
remote_source_url=remote_source_url,
local_clone_path=clone_path,
branch_name=branch_name,
ssh_key=ssh_key,
polling_interval=polling_interval,
request_timeout=request_timeout,
)
elif source_type == PolicySourceTypes.Api:
remote_source_url = load_conf_if_none(
remote_source_url, opal_server_config.POLICY_BUNDLE_URL
)
if remote_source_url is None:
logger.warning(
"POLICY_BUNDLE_URL is unset but policy watcher is enabled! disabling watcher."
)
policy_bundle_token_id = load_conf_if_none(
policy_bundle_token_id, opal_server_config.POLICY_BUNDLE_SERVER_TOKEN_ID
)
policy_bundle_server_type = load_conf_if_none(
policy_bundle_server_type, opal_server_config.POLICY_BUNDLE_SERVER_TYPE
)
watcher = ApiPolicySource(
remote_source_url=remote_source_url,
local_clone_path=clone_path,
polling_interval=polling_interval,
token=policy_bundle_token,
token_id=policy_bundle_token_id,
bundle_server_type=policy_bundle_server_type,
policy_bundle_path=opal_server_config.POLICY_BUNDLE_TMP_PATH,
policy_bundle_git_add_pattern=opal_server_config.POLICY_BUNDLE_GIT_ADD_PATTERN,
)
else:
raise ValueError("Unknown value for OPAL_POLICY_SOURCE_TYPE")
watcher.add_on_new_policy_callback(
partial(
publish_changed_directories,
publisher=publisher,
file_extensions=extensions,
bundle_ignore=bundle_ignore,
)
)
return PolicyWatcherTask(watcher, pubsub_endpoint) | Create a PolicyWatcherTask with Git / API policy source defined by env vars Load all the defaults from config if called without params. Args: publisher(TopicPublisher): server side publisher to publish changes in policy source_type(str): policy source type, can be Git / Api to opa bundle server remote_source_url(str): the base address to request the policy from clone_path_finder(RepoClonePathFinder): from which the local dir path for the repo clone would be retrieved branch_name(str): name of remote branch in git to pull ssh_key (str, optional): private ssh key used to gain access to the cloned repo polling_interval(int): how many seconds need to wait between polling request_timeout(int): how many seconds need to wait until timeout policy_bundle_token(int): auth token to include in connections to OPAL server. Defaults to POLICY_BUNDLE_SERVER_TOKEN. policy_bundle_token_id(int): id token to include in connections to OPAL server. Defaults to POLICY_BUNDLE_SERVER_TOKEN_ID. policy_bundle_server_type (str): type of policy bundle server (HTTP S3). Defaults to POLICY_BUNDLE_SERVER_TYPE extensions(list(str), optional): list of extantions to check when new policy arrive default is FILTER_FILE_EXTENSIONS bundle_ignore(list(str), optional): list of glob paths to use for excluding files from bundle default is OPA_BUNDLE_IGNORE |
156,198 | from typing import Callable, List
from urllib.parse import SplitResult, urlparse
from fastapi import APIRouter, Depends, Request, status
from fastapi_websocket_pubsub.pub_sub_server import PubSubEndpoint
from opal_common.authentication.deps import JWTAuthenticator
from opal_common.logger import logger
from opal_common.schemas.webhook import GitWebhookRequestParams
from opal_server.config import PolicySourceTypes, opal_server_config
from opal_server.policy.webhook.deps import (
GitChanges,
extracted_git_changes,
validate_git_secret_or_throw,
)
def get_webhook_router(
route_dependencies: List[Depends],
git_changes: Depends,
source_type: PolicySourceTypes,
publish: Callable,
webhook_config: GitWebhookRequestParams = opal_server_config.POLICY_REPO_WEBHOOK_PARAMS,
):
if webhook_config is None:
webhook_config = opal_server_config.POLICY_REPO_WEBHOOK_PARAMS
router = APIRouter()
"/webhook",
status_code=status.HTTP_200_OK,
dependencies=route_dependencies,
)
async def trigger_webhook(request: Request, git_changes: GitChanges = git_changes):
# TODO: breaking change: change "repo_url" to "remote_url" in next major
if source_type == PolicySourceTypes.Git:
# look at values extracted from request
urls = git_changes.urls
branch = git_changes.branch
names = git_changes.names
# Enforce branch matching (webhook to config) if turned on via config
if (
opal_server_config.POLICY_REPO_WEBHOOK_ENFORCE_BRANCH
and opal_server_config.POLICY_REPO_MAIN_BRANCH != branch
):
logger.warning(
"Git Webhook ignored - POLICY_REPO_WEBHOOK_ENFORCE_BRANCH is enabled, and POLICY_REPO_MAIN_BRANCH is `{tracking}` but received webhook for a different branch ({branch})",
tracking=opal_server_config.POLICY_REPO_MAIN_BRANCH,
branch=branch,
)
return None
# parse event from header
if webhook_config.event_header_name is not None:
event = request.headers.get(webhook_config.event_header_name, "ping")
# parse event from request body
elif webhook_config.event_request_key is not None:
payload = await request.json()
event = payload.get(webhook_config.event_request_key, "ping")
else:
logger.error(
"Webhook config is missing both event_request_key and event_header_name. Must have at least one."
)
policy_repo_url = opal_server_config.POLICY_REPO_URL
# Check if the URL we are tracking is mentioned in the webhook
if policy_repo_url and (
is_matching_webhook_url(policy_repo_url, urls, names)
or not webhook_config.match_sender_url
):
logger.info(
"triggered webhook on repo: {repo}",
repo=opal_server_config.POLICY_REPO_URL,
hook_event=event,
)
# Check if this it the right event (push)
if event == webhook_config.push_event_value:
# notifies the webhook listener via the pubsub broadcaster
await publish(opal_server_config.POLICY_REPO_WEBHOOK_TOPIC)
return {
"status": "ok",
"event": event,
"repo_url": opal_server_config.POLICY_REPO_URL,
}
else:
logger.warning(
"Got an unexpected webhook not matching the tracked repo ({repo}) - with these URLS: {urls} and those names: {names}.",
repo=opal_server_config.POLICY_REPO_URL,
urls=urls,
names=names,
hook_event=event,
)
elif source_type == PolicySourceTypes.Api:
logger.info("Triggered webhook to check API bundle URL")
await publish(opal_server_config.POLICY_REPO_WEBHOOK_TOPIC)
return {
"status": "ok",
"event": "webhook_trigger",
"repo_url": opal_server_config.POLICY_BUNDLE_URL,
}
return {"status": "ignored", "event": event}
return router
class JWTAuthenticator(_JWTAuthenticator):
"""bearer token authentication for http(s) api endpoints.
throws 401 if a valid jwt is not provided.
"""
def __call__(self, authorization: Optional[str] = Header(None)) -> JWTClaims:
token = get_token_from_header(authorization)
return verify_logged_in(self._verifier, token)
class PolicySourceTypes(str, Enum):
Git = "GIT"
Api = "API"
opal_server_config = OpalServerConfig(prefix="OPAL_")
validate_git_secret_or_throw = validate_git_secret_or_throw_factory()
async def extracted_git_changes(request: Request) -> GitChanges:
"""extracts the repo url from a webhook request payload.
used to make sure that the webhook was triggered on *our* monitored
repo.
This functions search for common patterns for where the affected URL may appear in the webhook
"""
payload = await request.json()
### --- Get branch --- ###
# Gitlab / gitHub style
ref = payload.get("ref", None)
# Azure style
if ref is None:
ref = payload.get("refUpdates", {}).get("name", None)
if isinstance(ref, str):
# remove prefix
if ref.startswith("refs/heads/"):
branch = ref[11:]
else:
branch = ref
else:
branch = None
### Get urls ###
# Github style
repo_payload = payload.get("repository", {})
git_url = repo_payload.get("git_url", None)
ssh_url = repo_payload.get("ssh_url", None)
clone_url = repo_payload.get("clone_url", None)
# Gitlab style
project_payload = payload.get("project", {})
project_git_http_url = project_payload.get("git_http_url", None)
project_git_ssh_url = project_payload.get("git_ssh_url", None)
project_full_name = project_payload.get("path_with_namespace", None)
# Azure style
resource_payload = payload.get("resource", {})
azure_repo_payload = resource_payload.get("repository", {})
remote_url = azure_repo_payload.get("remoteUrl", None)
# Bitbucket+Github style for fullname
full_name = repo_payload.get("full_name", None)
# additional support for url payload
git_http_url = repo_payload.get("git_ssh_url", None)
ssh_http_url = repo_payload.get("git_http_url", None)
url = repo_payload.get("url", None)
# remove duplicates and None
urls = list(
set(
[
remote_url,
git_url,
ssh_url,
clone_url,
git_http_url,
ssh_http_url,
url,
project_git_http_url,
project_git_ssh_url,
]
)
)
urls.remove(None)
names = list(
set(
[
project_full_name,
full_name,
]
)
)
names.remove(None)
if not urls and not names:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="repo url or full name not found in payload!",
)
return GitChanges(urls=urls, branch=branch, names=names)
def init_git_webhook_router(
pubsub_endpoint: PubSubEndpoint, authenticator: JWTAuthenticator
):
async def dummy_affected_repo_urls(request: Request) -> List[str]:
return []
source_type = opal_server_config.POLICY_SOURCE_TYPE
if source_type == PolicySourceTypes.Api:
route_dependency = authenticator
func_dependency = dummy_affected_repo_urls
else:
route_dependency = validate_git_secret_or_throw
func_dependency = extracted_git_changes
return get_webhook_router(
[Depends(route_dependency)],
Depends(func_dependency),
source_type,
pubsub_endpoint.publish,
) | null |
156,199 | import hashlib
import hmac
import re
from typing import List, Optional
from fastapi import Header, HTTPException, Request, status
from opal_common.schemas.webhook import GitWebhookRequestParams, SecretTypeEnum
from opal_server.config import opal_server_config
from pydantic import BaseModel
validate_git_secret_or_throw = validate_git_secret_or_throw_factory()
class SecretTypeEnum(str, Enum):
"""is the passed secret in the webhook a token or a signature on the
request body."""
token = "token"
signature = "signature"
class GitWebhookRequestParams(BaseSchema):
secret_header_name: str = Field(
...,
description="The HTTP header holding the secret",
)
secret_type: SecretTypeEnum = Field(
...,
description=SecretTypeEnum.__doc__,
)
secret_parsing_regex: str = Field(
...,
description="The regex used to parse out the actual signature from the header. Use '(.*)' for the entire value",
)
event_header_name: typing.Optional[str] = Field(
default=None,
description="The HTTP header holding the event information (used instead of event_request_key)",
)
event_request_key: typing.Optional[str] = Field(
default=None,
description="The JSON object key holding the event information (used instead of event_header_name)",
)
push_event_value: str = Field(
...,
description="The event value indicating a Git push",
)
match_sender_url: bool = Field(
True,
description="Should OPAL verify that the sender url matches the tracked repo URL, and drop the webhook request otherwise?",
)
opal_server_config = OpalServerConfig(prefix="OPAL_")
The provided code snippet includes necessary dependencies for implementing the `validate_git_secret_or_throw_factory` function. Write a Python function `def validate_git_secret_or_throw_factory( webhook_secret: Optional[str] = opal_server_config.POLICY_REPO_WEBHOOK_SECRET, webhook_params: GitWebhookRequestParams = opal_server_config.POLICY_REPO_WEBHOOK_PARAMS, )` to solve the following problem:
Factory function to create secret validator dependency according to config. Returns: validate_git_secret_or_throw (async function) Args: webhook_secret (Optional[ str ], optional): The secret to validate. Defaults to opal_server_config.POLICY_REPO_WEBHOOK_SECRET. webhook_params (GitWebhookRequestParams, optional):The webhook configuration - including how to parse the secret. Defaults to opal_server_config.POLICY_REPO_WEBHOOK_PARAMS.
Here is the function:
def validate_git_secret_or_throw_factory(
webhook_secret: Optional[str] = opal_server_config.POLICY_REPO_WEBHOOK_SECRET,
webhook_params: GitWebhookRequestParams = opal_server_config.POLICY_REPO_WEBHOOK_PARAMS,
):
"""Factory function to create secret validator dependency according to
config.
Returns: validate_git_secret_or_throw (async function)
Args:
webhook_secret (Optional[ str ], optional): The secret to validate. Defaults to opal_server_config.POLICY_REPO_WEBHOOK_SECRET.
webhook_params (GitWebhookRequestParams, optional):The webhook configuration - including how to parse the secret. Defaults to opal_server_config.POLICY_REPO_WEBHOOK_PARAMS.
"""
async def validate_git_secret_or_throw(request: Request) -> bool:
"""authenticates a request from a git service webhook system by
checking that the request contains a valid signature (i.e: via the
secret stored on github) or a valid token (as stored in Gitlab)."""
if webhook_secret is None:
# webhook can be configured without secret (not recommended but quite possible)
return True
# get the secret the git service has sent us
incoming_secret = request.headers.get(webhook_params.secret_header_name, "")
# parse out the actual secret (Some services like Github add prefixes)
matches = re.findall(
webhook_params.secret_parsing_regex,
incoming_secret,
)
incoming_secret = matches[0] if len(matches) > 0 else None
# check we actually got something
if incoming_secret is None or len(incoming_secret) == 0:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="No secret was provided!",
)
# Check secret as signature
if webhook_params.secret_type == SecretTypeEnum.signature:
# calculate our signature on the post body
payload = await request.body()
our_signature = hmac.new(
webhook_secret.encode("utf-8"),
payload,
hashlib.sha256,
).hexdigest()
# compare signatures on the post body
provided_signature = incoming_secret
if not hmac.compare_digest(our_signature, provided_signature):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="signatures didn't match!",
)
# Check secret as token
elif incoming_secret.encode("utf-8") != webhook_secret.encode("utf-8"):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="secret-tokens didn't match!",
)
return True
return validate_git_secret_or_throw | Factory function to create secret validator dependency according to config. Returns: validate_git_secret_or_throw (async function) Args: webhook_secret (Optional[ str ], optional): The secret to validate. Defaults to opal_server_config.POLICY_REPO_WEBHOOK_SECRET. webhook_params (GitWebhookRequestParams, optional):The webhook configuration - including how to parse the secret. Defaults to opal_server_config.POLICY_REPO_WEBHOOK_PARAMS. |
156,200 | from fastapi_websocket_pubsub.pub_sub_client import PubSubClient, Topic
from opal_common.confi.confi import load_conf_if_none
from opal_common.topics.listener import TopicCallback, TopicListener
from opal_common.utils import get_authorization_header
from opal_server.config import opal_server_config
def load_conf_if_none(variable, conf):
if variable is None:
return conf
else:
return variable
class TopicCallback(Protocol):
def __call__(self, topic: Topic, data: Any) -> Coroutine:
...
class TopicListener:
"""A simple wrapper around a PubSubClient that listens on a topic and runs
a callback when messages arrive for that topic.
Provides start() and stop() shortcuts that helps treat this client
as a separate "process" or task that runs in the background.
"""
def __init__(
self,
client: PubSubClient,
server_uri: str,
topics: TopicList = None,
callback: TopicCallback = None,
):
"""[summary]
Args:
client (PubSubClient): a configured not-yet-started pub sub client
server_uri (str): the URI of the pub sub server we subscribe to
topics (TopicList): the topic(s) we subscribe to
callback (TopicCallback): the (async) callback to run when a message
arrive on one of the subsribed topics
"""
self._client = client
self._server_uri = server_uri
self._topics = topics
self._callback = callback
async def __aenter__(self):
self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
def start(self):
"""starts the pub/sub client and subscribes to the predefined topic.
the client will attempt to connect to the pubsub server until
successful.
"""
logger.info("started topic listener, topics={topics}", topics=self._topics)
for topic in self._topics:
self._client.subscribe(topic, self._callback)
self._client.start_client(f"{self._server_uri}")
async def stop(self):
"""stops the pubsub client."""
await self._client.disconnect()
logger.info("stopped topic listener", topics=self._topics)
async def wait_until_done(self):
"""When the listener is a used as a context manager, this method waits
until the client is done (i.e: terminated) to prevent exiting the
context."""
return await self._client.wait_until_done()
def get_authorization_header(token: str) -> Tuple[str, str]:
return "Authorization", f"Bearer {token}"
opal_server_config = OpalServerConfig(prefix="OPAL_")
def setup_webhook_listener(
callback: TopicCallback,
server_uri: str = None,
server_token: str = None,
topic: Topic = "webhook",
) -> TopicListener:
# load defaults
server_uri = load_conf_if_none(server_uri, opal_server_config.OPAL_WS_LOCAL_URL)
server_token = load_conf_if_none(server_token, opal_server_config.OPAL_WS_TOKEN)
return TopicListener(
client=PubSubClient(extra_headers=[get_authorization_header(server_token)]),
server_uri=server_uri,
topics=[topic],
callback=callback,
) | null |
156,201 | import os
from pathlib import Path
from typing import List, Optional
import fastapi.responses
from fastapi import APIRouter, Depends, Header, HTTPException, Query, Response, status
from git import Repo
from opal_common.confi.confi import load_conf_if_none
from opal_common.git.bundle_maker import BundleMaker
from opal_common.git.commit_viewer import CommitViewer
from opal_common.git.repo_cloner import RepoClonePathFinder
from opal_common.logger import logger
from opal_common.schemas.policy import PolicyBundle
from opal_server.config import opal_server_config
from starlette.responses import RedirectResponse
async def get_repo(
base_clone_path: str = None,
clone_subdirectory_prefix: str = None,
use_fixed_path: bool = None,
) -> Repo:
async def get_input_paths_or_throw(
repo: Repo = Depends(get_repo),
paths: Optional[List[str]] = Query(None, alias="path"),
) -> List[Path]:
class BundleMaker:
def __init__(
self,
repo: Repo,
in_directories: Set[Path],
extensions: Optional[List[str]] = None,
root_manifest_path: str = ".manifest",
bundle_ignore: Optional[List[str]] = None,
):
def _get_explicit_manifest(self, viewer: CommitViewer) -> Optional[List[str]]:
def _compile_manifest_file(
dir: VersionedDirectory,
manifest_file_name: str = ".manifest",
_branch: List[str] = [],
) -> List[str]:
def _sort_manifest(
self, unsorted_manifest: List[str], explicit_sorting: Optional[List[str]]
) -> List[str]:
def make_bundle(self, commit: Commit) -> PolicyBundle:
def make_diff_bundle(self, old_commit: Commit, new_commit: Commit) -> PolicyBundle:
opal_server_config = OpalServerConfig(prefix="OPAL_")
async def get_policy(
repo: Repo = Depends(get_repo),
input_paths: List[Path] = Depends(get_input_paths_or_throw),
base_hash: Optional[str] = Query(
None,
description="hash of previous bundle already downloaded, server will return a diff bundle.",
),
):
maker = BundleMaker(
repo,
in_directories=set(input_paths),
extensions=opal_server_config.FILTER_FILE_EXTENSIONS,
root_manifest_path=opal_server_config.POLICY_REPO_MANIFEST_PATH,
bundle_ignore=opal_server_config.BUNDLE_IGNORE,
)
# check if commit exist in the repo
revision = None
if base_hash:
try:
revision = repo.rev_parse(base_hash)
except ValueError:
logger.warning(f"base_hash {base_hash} not exist in the repo")
if revision is None:
return maker.make_bundle(repo.head.commit)
try:
old_commit = repo.commit(base_hash)
return maker.make_diff_bundle(old_commit, repo.head.commit)
except ValueError:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"commit with hash {base_hash} was not found in the policy repo!",
) | null |
156,202 | app = create_app()
class OpalServer:
def __init__(
self,
init_policy_watcher: bool = None,
policy_remote_url: str = None,
init_publisher: bool = None,
data_sources_config: Optional[ServerDataSourceConfig] = None,
broadcaster_uri: str = None,
signer: Optional[JWTSigner] = None,
enable_jwks_endpoint=True,
jwks_url: str = None,
jwks_static_dir: str = None,
master_token: str = None,
loadlimit_notation: str = None,
) -> None:
"""
Args:
policy_remote_url (str, optional): the url of the repo watched by policy watcher.
init_publisher (bool, optional): whether or not to launch a publisher pub/sub client.
this publisher is used by the server processes to publish data to the client.
data_sources_config (ServerDataSourceConfig, optional): base data configuration, that opal
clients should get the data from.
broadcaster_uri (str, optional): Which server/medium should the PubSub use for broadcasting.
Defaults to BROADCAST_URI.
loadlimit_notation (str, optional): Rate limit configuration for opal client connections.
Defaults to None, in that case no rate limit is enforced
The server can run in multiple workers (by gunicorn or uvicorn).
Every worker of the server launches the following internal components:
publisher (PubSubClient): a client that is used to publish updates to the client.
data_update_publisher (DataUpdatePublisher): a specialized publisher for data updates.
Besides the components above, the works are also deciding among themselves
on a *leader* worker (the first worker to obtain a file-lock) that also
launches the following internal components:
watcher (PolicyWatcherTask): run by the leader, monitors the policy git repository
by polling on it or by being triggered by the callback subscribed on the "webhook"
topic. upon being triggered, will detect updates to the policy (new commits) and
will update the opal client via pubsub.
"""
# load defaults
init_publisher: bool = load_conf_if_none(
init_publisher, opal_server_config.PUBLISHER_ENABLED
)
broadcaster_uri: str = load_conf_if_none(
broadcaster_uri, opal_server_config.BROADCAST_URI
)
jwks_url: str = load_conf_if_none(jwks_url, opal_server_config.AUTH_JWKS_URL)
jwks_static_dir: str = load_conf_if_none(
jwks_static_dir, opal_server_config.AUTH_JWKS_STATIC_DIR
)
master_token: str = load_conf_if_none(
master_token, opal_server_config.AUTH_MASTER_TOKEN
)
self._init_policy_watcher: bool = load_conf_if_none(
init_policy_watcher, opal_server_config.REPO_WATCHER_ENABLED
)
self.loadlimit_notation: str = load_conf_if_none(
loadlimit_notation, opal_server_config.CLIENT_LOAD_LIMIT_NOTATION
)
self._policy_remote_url = policy_remote_url
self._configure_monitoring()
metrics.increment("startup")
self.data_sources_config: ServerDataSourceConfig = (
data_sources_config
if data_sources_config is not None
else opal_server_config.DATA_CONFIG_SOURCES
)
self.broadcaster_uri = broadcaster_uri
self.master_token = master_token
if signer is not None:
self.signer = signer
else:
self.signer = JWTSigner(
private_key=opal_server_config.AUTH_PRIVATE_KEY,
public_key=opal_common_config.AUTH_PUBLIC_KEY,
algorithm=opal_common_config.AUTH_JWT_ALGORITHM,
audience=opal_common_config.AUTH_JWT_AUDIENCE,
issuer=opal_common_config.AUTH_JWT_ISSUER,
)
if self.signer.enabled:
logger.info(
"OPAL is running in secure mode - will verify API requests with JWT tokens."
)
else:
logger.info(
"OPAL was not provided with JWT encryption keys, cannot verify api requests!"
)
if enable_jwks_endpoint:
self.jwks_endpoint = JwksStaticEndpoint(
signer=self.signer, jwks_url=jwks_url, jwks_static_dir=jwks_static_dir
)
else:
self.jwks_endpoint = None
self.pubsub = PubSub(signer=self.signer, broadcaster_uri=broadcaster_uri)
self.publisher: Optional[TopicPublisher] = None
self.broadcast_keepalive: Optional[PeriodicPublisher] = None
if init_publisher:
self.publisher = ServerSideTopicPublisher(self.pubsub.endpoint)
if (
opal_server_config.BROADCAST_KEEPALIVE_INTERVAL > 0
and self.broadcaster_uri is not None
):
self.broadcast_keepalive = setup_broadcaster_keepalive_task(
self.publisher,
time_interval=opal_server_config.BROADCAST_KEEPALIVE_INTERVAL,
topic=opal_server_config.BROADCAST_KEEPALIVE_TOPIC,
)
if opal_common_config.STATISTICS_ENABLED:
self.opal_statistics = OpalStatistics(self.pubsub.endpoint)
else:
self.opal_statistics = None
# if stats are enabled, the server workers must be listening on the broadcast
# channel for their own synchronization, not just for their clients. therefore
# we need a "global" listening context
self.broadcast_listening_context: Optional[
EventBroadcasterContextManager
] = None
if self.broadcaster_uri is not None and opal_common_config.STATISTICS_ENABLED:
self.broadcast_listening_context = (
self.pubsub.endpoint.broadcaster.get_listening_context()
)
self.watcher: PolicyWatcherTask = None
self.leadership_lock: Optional[NamedLock] = None
if opal_server_config.SCOPES:
self._redis_db = RedisDB(opal_server_config.REDIS_URL)
self._scopes = ScopeRepository(self._redis_db)
logger.info("OPAL Scopes: server is connected to scopes repository")
# init fastapi app
self.app: FastAPI = self._init_fast_api_app()
def _init_fast_api_app(self):
"""inits the fastapi app object."""
app = FastAPI(
title="Opal Server",
description="OPAL is an administration layer for Open Policy Agent (OPA), detecting changes"
+ " to both policy and data and pushing live updates to your agents. The opal server creates"
+ " a pub/sub channel clients can subscribe to (i.e: acts as coordinator). The server also"
+ " tracks a git repository (via webhook) for updates to policy (or static data) and accepts"
+ " continuous data update notifications via REST api, which are then pushed to clients.",
version="0.1.0",
)
configure_middleware(app)
self._configure_api_routes(app)
self._configure_lifecycle_callbacks(app)
return app
def _configure_monitoring(self):
configure_logs()
apm.configure_apm(opal_server_config.ENABLE_DATADOG_APM, "opal-server")
metrics.configure_metrics(
enable_metrics=opal_common_config.ENABLE_METRICS,
statsd_host=os.environ.get("DD_AGENT_HOST", "localhost"),
statsd_port=8125,
namespace="opal",
)
def _configure_api_routes(self, app: FastAPI):
"""mounts the api routes on the app object."""
authenticator = JWTAuthenticator(self.signer)
data_update_publisher: Optional[DataUpdatePublisher] = None
if self.publisher is not None:
data_update_publisher = DataUpdatePublisher(self.publisher)
# Init api routers with required dependencies
data_updates_router = init_data_updates_router(
data_update_publisher, self.data_sources_config, authenticator
)
webhook_router = init_git_webhook_router(self.pubsub.endpoint, authenticator)
security_router = init_security_router(
self.signer, StaticBearerAuthenticator(self.master_token)
)
statistics_router = init_statistics_router(self.opal_statistics)
loadlimit_router = init_loadlimit_router(self.loadlimit_notation)
# mount the api routes on the app object
app.include_router(
bundles_router,
tags=["Bundle Server"],
dependencies=[Depends(authenticator)],
)
app.include_router(data_updates_router, tags=["Data Updates"])
app.include_router(webhook_router, tags=["Github Webhook"])
app.include_router(security_router, tags=["Security"])
app.include_router(self.pubsub.pubsub_router, tags=["Pub/Sub"])
app.include_router(
self.pubsub.api_router,
tags=["Pub/Sub"],
dependencies=[Depends(authenticator)],
)
app.include_router(
statistics_router,
tags=["Server Statistics"],
dependencies=[Depends(authenticator)],
)
app.include_router(
loadlimit_router,
tags=["Client Load Limiting"],
dependencies=[Depends(authenticator)],
)
if opal_server_config.SCOPES:
app.include_router(
init_scope_router(self._scopes, authenticator, self.pubsub.endpoint),
tags=["Scopes"],
prefix="/scopes",
)
if self.jwks_endpoint is not None:
# mount jwts (static) route
self.jwks_endpoint.configure_app(app)
# top level routes (i.e: healthchecks)
def healthcheck():
return {"status": "ok"}
return app
def _configure_lifecycle_callbacks(self, app: FastAPI):
"""registers callbacks on app startup and shutdown.
on app startup we launch our long running processes (async
tasks) on the event loop. on app shutdown we stop these long
running tasks.
"""
async def startup_event():
logger.info("*** OPAL Server Startup ***")
try:
self._task = asyncio.create_task(self.start_server_background_tasks())
except Exception:
logger.critical("Exception while starting OPAL")
traceback.print_exc()
sys.exit(1)
async def shutdown_event():
logger.info("triggered shutdown event")
await self.stop_server_background_tasks()
return app
async def start_server_background_tasks(self):
"""starts the background processes (as asyncio tasks) if such are
configured.
all workers will start these tasks:
- publisher: a client that is used to publish updates to the client.
only the leader worker (first to obtain leadership lock) will start these tasks:
- (repo) watcher: monitors the policy git repository for changes.
"""
if self.publisher is not None:
async with self.publisher:
if self.opal_statistics is not None:
if self.broadcast_listening_context is not None:
logger.info(
"listening on broadcast channel for statistics events..."
)
await self.broadcast_listening_context.__aenter__()
# if the broadcast channel is closed, we want to restart worker process because statistics can't be reliable anymore
self.broadcast_listening_context._event_broadcaster.get_reader_task().add_done_callback(
lambda _: self._graceful_shutdown()
)
asyncio.create_task(self.opal_statistics.run())
self.pubsub.endpoint.notifier.register_unsubscribe_event(
self.opal_statistics.remove_client
)
# We want only one worker to run repo watchers
# (otherwise for each new commit, we will publish multiple updates via pub/sub).
# leadership is determined by the first worker to obtain a lock
self.leadership_lock = NamedLock(
opal_server_config.LEADER_LOCK_FILE_PATH
)
async with self.leadership_lock:
# only one worker gets here, the others block. in case the leader worker
# is terminated, another one will obtain the lock and become leader.
logger.info(
"leadership lock acquired, leader pid: {pid}",
pid=os.getpid(),
)
if not opal_server_config.SCOPES:
# bind data updater publishers to the leader worker
asyncio.create_task(
DataUpdatePublisher.mount_and_start_polling_updates(
self.publisher, opal_server_config.DATA_CONFIG_SOURCES
)
)
else:
await load_scopes(self._scopes)
if self.broadcast_keepalive is not None:
self.broadcast_keepalive.start()
if not self._init_policy_watcher:
# Wait on keepalive instead to keep leadership lock acquired
await self.broadcast_keepalive.wait_until_done()
if self._init_policy_watcher:
self.watcher = setup_watcher_task(
self.publisher, self.pubsub.endpoint
)
# running the watcher, and waiting until it stops (until self.watcher.signal_stop() is called)
async with self.watcher:
await self.watcher.wait_until_should_stop()
# Worker should restart when watcher stops
self._graceful_shutdown()
if (
self.opal_statistics is not None
and self.broadcast_listening_context is not None
):
await self.broadcast_listening_context.__aexit__()
logger.info(
"stopped listening for statistics events on the broadcast channel"
)
async def stop_server_background_tasks(self):
logger.info("stopping background tasks...")
tasks: List[asyncio.Task] = []
if self.watcher is not None:
tasks.append(asyncio.create_task(self.watcher.stop()))
if self.publisher is not None:
tasks.append(asyncio.create_task(self.publisher.stop()))
if self.broadcast_keepalive is not None:
tasks.append(asyncio.create_task(self.broadcast_keepalive.stop()))
try:
await asyncio.gather(*tasks)
except Exception:
logger.exception("exception while shutting down background tasks")
def _graceful_shutdown(self):
logger.info("Trigger worker graceful shutdown")
os.kill(os.getpid(), signal.SIGTERM)
def create_app(*args, **kwargs):
from .server import OpalServer
server = OpalServer(*args, **kwargs)
return server.app | null |
156,203 | import pathlib
from typing import List, Optional, cast
import pygit2
from fastapi import (
APIRouter,
Depends,
Header,
HTTPException,
Path,
Query,
Response,
status,
)
from fastapi.responses import RedirectResponse
from fastapi_websocket_pubsub import PubSubEndpoint
from git import InvalidGitRepositoryError
from opal_common.async_utils import run_sync
from opal_common.authentication.authz import (
require_peer_type,
restrict_optional_topics_to_publish,
)
from opal_common.authentication.casting import cast_private_key
from opal_common.authentication.deps import JWTAuthenticator, get_token_from_header
from opal_common.authentication.types import EncryptionKeyFormat, JWTClaims
from opal_common.authentication.verifier import Unauthorized
from opal_common.logger import logger
from opal_common.monitoring import metrics
from opal_common.schemas.data import (
DataSourceConfig,
DataUpdate,
ServerDataSourceConfig,
)
from opal_common.schemas.policy import PolicyBundle, PolicyUpdateMessageNotification
from opal_common.schemas.policy_source import GitPolicyScopeSource, SSHAuthData
from opal_common.schemas.scopes import Scope
from opal_common.schemas.security import PeerType
from opal_common.topics.publisher import (
ScopedServerSideTopicPublisher,
ServerSideTopicPublisher,
)
from opal_common.urls import set_url_query_param
from opal_server.config import opal_server_config
from opal_server.data.data_update_publisher import DataUpdatePublisher
from opal_server.git_fetcher import GitPolicyFetcher
from opal_server.scopes.scope_repository import ScopeNotFoundError, ScopeRepository
def verify_private_key_or_throw(scope_in: Scope):
if isinstance(scope_in.policy.auth, SSHAuthData):
auth = cast(SSHAuthData, scope_in.policy.auth)
if not "\n" in auth.private_key:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail={"error": "private key is expected to contain newlines!"},
)
is_pem_key = verify_private_key(
auth.private_key, key_format=EncryptionKeyFormat.pem
)
is_ssh_key = verify_private_key(
auth.private_key, key_format=EncryptionKeyFormat.ssh
)
if not (is_pem_key or is_ssh_key):
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail={"error": "private key is invalid"},
)
async def run_sync(
func: Callable[P_args, T_result], *args: P_args.args, **kwargs: P_args.kwargs
) -> T_result:
"""Shorthand for running a sync function in an executor within an async
context.
For example: def sync_function_that_takes_time_to_run(arg1,
arg2): time.sleep(5) async def async_function():
await run_sync(sync_function_that_takes_time_to_run, 1, arg2=5)
"""
return await asyncio.get_event_loop().run_in_executor(
None, partial(func, *args, **kwargs)
)
def require_peer_type(
authenticator: JWTAuthenticator, claims: JWTClaims, required_type: PeerType
):
if not authenticator.enabled:
return
peer_type = claims.get("peer_type", None)
if peer_type is None:
raise Unauthorized(description="Missing 'peer_type' claim for OPAL jwt token")
try:
type = PeerType(peer_type)
except ValueError:
raise Unauthorized(
description=f"Invalid 'peer_type' claim for OPAL jwt token: {peer_type}"
)
if type != required_type:
raise Unauthorized(
description=f"Incorrect 'peer_type' claim for OPAL jwt token: {str(type)}, expected: {str(required_type)}"
)
def restrict_optional_topics_to_publish(
authenticator: JWTAuthenticator, claims: JWTClaims, update: DataUpdate
):
if not authenticator.enabled:
return
if "permitted_topics" not in claims:
return
for entry in update.entries:
unauthorized_topics = set(entry.topics).difference(claims["permitted_topics"])
if unauthorized_topics:
raise Unauthorized(
description=f"Invalid 'topics' to publish {unauthorized_topics}"
)
def get_token_from_header(authorization_header: str) -> Optional[str]:
"""extracts a bearer token from an HTTP Authorization header.
when provided bearer token via websocket, we cannot use the fastapi
built-in: oauth2_scheme.
"""
if not authorization_header:
return None
scheme, token = get_authorization_scheme_param(authorization_header)
if not token or scheme.lower() != "bearer":
return None
return token
class JWTAuthenticator(_JWTAuthenticator):
"""bearer token authentication for http(s) api endpoints.
throws 401 if a valid jwt is not provided.
"""
def __call__(self, authorization: Optional[str] = Header(None)) -> JWTClaims:
token = get_token_from_header(authorization)
return verify_logged_in(self._verifier, token)
JWTClaims = Dict[str, Any]
class Unauthorized(HTTPException):
"""HTTP 401 Unauthorized exception."""
def __init__(self, description="Bearer token is not valid!", **kwargs):
super().__init__(
status_code=status.HTTP_401_UNAUTHORIZED,
detail={"error": description, **kwargs},
headers={"WWW-Authenticate": "Bearer"},
)
class DataSourceConfig(BaseModel):
"""Static list of Data Source Entries returned to client.
Answers this question for the client: from where should i get the
full picture of data i need? (as opposed to incremental data
updates)
"""
entries: List[DataSourceEntryWithPollingInterval] = Field(
[], description="list of data sources and how to fetch from them"
)
class ServerDataSourceConfig(BaseModel):
"""As its data source configuration, the server can either hold:
1) A static DataSourceConfig returned to all clients regardless of identity.
If all clients need the same config, this is the way to go.
2) A redirect url (external_source_url), to which the opal client will be redirected when requesting
its DataSourceConfig. The client will issue the same request (with the same headers, including the
JWT token identifying it) to the url configured. This option is good if each client must receive a
different base data configuration, for example for a multi-tenant deployment.
By providing the server that serves external_source_url the value of OPAL_AUTH_PUBLIC_KEY, that server
can validate the JWT and get it's claims, in order to apply authorization and/or other conditions before
returning the data sources relevant to said client.
"""
config: Optional[DataSourceConfig] = Field(
None, description="static list of data sources and how to fetch from them"
)
external_source_url: Optional[AnyHttpUrl] = Field(
None,
description="external url to serve data sources dynamically."
+ " if set, the clients will be redirected to this url when requesting to fetch data sources.",
)
def check_passwords_match(cls, values):
config, redirect_url = values.get("config"), values.get("external_source_url")
if config is None and redirect_url is None:
raise ValueError(
"you must provide one of these fields: config, external_source_url"
)
if config is not None and redirect_url is not None:
raise ValueError(
"you must provide ONLY ONE of these fields: config, external_source_url"
)
return values
class DataUpdate(BaseModel):
"""DataSources used as OPAL-server configuration Data update sent to
clients."""
# a UUID to identify this update (used as part of an updates complition callback)
id: Optional[str] = None
entries: List[DataSourceEntry] = Field(
..., description="list of related updates the OPAL client should perform"
)
reason: str = Field(None, description="Reason for triggering the update")
# Configuration for how to notify other services on the status of Update
callback: UpdateCallback = UpdateCallback(callbacks=[])
class PolicyBundle(BaseSchema):
manifest: List[str]
hash: str = Field(..., description="commit hash (debug version)")
old_hash: Optional[str] = Field(
None, description="old commit hash (in diff bundles)"
)
data_modules: List[DataModule]
policy_modules: List[RegoModule]
deleted_files: Optional[DeletedFiles]
class GitPolicyScopeSource(BasePolicyScopeSource):
branch: str = Field("main", description="Git branch to track")
class Scope(BaseSchema):
scope_id: str = Field(..., description="Scope ID")
policy: Union[GitPolicyScopeSource] = Field(..., description="Policy source")
data: DataSourceConfig = Field(
DataSourceConfig(entries=[]), description="Data source configuration"
)
class PeerType(str, Enum):
client = "client"
datasource = "datasource"
listener = "listener"
class ScopedServerSideTopicPublisher(ServerSideTopicPublisher):
def __init__(self, endpoint: PubSubEndpoint, scope_id: str):
super().__init__(endpoint)
self._scope_id = scope_id
async def publish(self, topics: TopicList, data: Any = None):
scoped_topics = [f"{self._scope_id}:{topic}" for topic in topics]
logger.info("Publishing to topics: {topics}", topics=scoped_topics)
await super().publish(scoped_topics, data)
def set_url_query_param(url: str, param_name: str, param_value: str):
"""Given a url, set or replace a query parameter and return the modified
url.
>> set_url_query_param('https://api.permit.io/opal/data/config', 'token', 'secret')
'https://api.permit.io/opal/data/config?token=secret'
>> set_url_query_param('https://api.permit.io/opal/data/config&some=var', 'token', 'secret')
'https://api.permit.io/opal/data/config&some=var?token=secret'
"""
parsed_url: ParseResult = urlparse(url)
query_params: dict = dict(parse_qsl(parsed_url.query))
query_params[param_name] = param_value
new_query_string = urlencode(query_params)
return urlunparse(
(
parsed_url.scheme,
parsed_url.netloc,
parsed_url.path,
parsed_url.params,
new_query_string,
parsed_url.fragment,
)
)
opal_server_config = OpalServerConfig(prefix="OPAL_")
class DataUpdatePublisher:
def __init__(self, publisher: TopicPublisher) -> None:
self._publisher = publisher
def get_topic_combos(topic: str) -> List[str]:
"""Get the The combinations of sub topics for the given topic e.g.
"policy_data/users/keys" -> ["policy_data", "policy_data/users",
"policy_data/users/keys"]
If a colon (':') is present, only split after the right-most one,
and prepend the prefix before it to every topic, e.g.
"data:policy_data/users/keys" -> ["data:policy_data", "data:policy_data/users",
"data:policy_data/users/keys"]
Args:
topic (str): topic string with sub topics delimited by delimiter
Returns:
List[str]: The combinations of sub topics for the given topic
"""
topic_combos = []
prefix = None
if PREFIX_DELIMITER in topic:
prefix, topic = topic.rsplit(":", 1)
sub_topics = topic.split(TOPIC_DELIMITER)
if sub_topics:
current_topic = sub_topics[0]
if prefix:
topic_combos.append(f"{prefix}{PREFIX_DELIMITER}{current_topic}")
else:
topic_combos.append(current_topic)
if len(sub_topics) > 1:
for sub in sub_topics[1:]:
current_topic = f"{current_topic}{TOPIC_DELIMITER}{sub}"
if prefix:
topic_combos.append(
f"{prefix}{PREFIX_DELIMITER}{current_topic}"
)
else:
topic_combos.append(current_topic)
return topic_combos
async def publish_data_updates(self, update: DataUpdate):
"""Notify OPAL subscribers of a new data update by topic.
Args:
topics (List[str]): topics (with hierarchy) to notify subscribers of
update (DataUpdate): update data-source configuration for subscribers to fetch data from
"""
all_topic_combos = set()
# a nicer format of entries to the log
logged_entries = [
dict(
url=entry.url,
method=entry.save_method,
path=entry.dst_path or "/",
inline_data=(entry.data is not None),
topics=entry.topics,
)
for entry in update.entries
]
# Expand the topics for each event to include sub topic combos (e.g. publish 'a/b/c' as 'a' , 'a/b', and 'a/b/c')
for entry in update.entries:
topic_combos = []
if entry.topics:
for topic in entry.topics:
topic_combos.extend(DataUpdatePublisher.get_topic_combos(topic))
entry.topics = topic_combos # Update entry with the exhaustive list, so client won't have to expand it again
all_topic_combos.update(topic_combos)
else:
logger.warning(
"[{pid}] No topics were provided for the following entry: {entry}",
pid=os.getpid(),
entry=entry,
)
# publish all topics with all their sub combinations
logger.info(
"[{pid}] Publishing data update to topics: {topics}, reason: {reason}, entries: {entries}",
pid=os.getpid(),
topics=all_topic_combos,
reason=update.reason,
entries=logged_entries,
)
await self._publisher.publish(
list(all_topic_combos), update.dict(by_alias=True)
)
async def _periodic_update_callback(
self, update: DataSourceEntryWithPollingInterval
):
"""Called for every periodic update based on repeat_every."""
logger.info(
"[{pid}] Sending Periodic update: {source}", pid=os.getpid(), source=update
)
# Create new publish entry
return await self.publish_data_updates(
DataUpdate(reason="Periodic Update", entries=[update])
)
def create_polling_updates(self, sources: ServerDataSourceConfig):
# For every entry with a non zero period update interval, bind an interval to it
updaters = []
if hasattr(sources, "config") and hasattr(sources.config, "entries"):
for source in sources.config.entries:
if (
hasattr(source, "periodic_update_interval")
and isinstance(source.periodic_update_interval, float)
and source.periodic_update_interval is not None
):
logger.info(
"[{pid}] Establishing Period Updates for the following source: {source}",
pid=os.getpid(),
source=source,
)
async def bind_for_repeat(bind_source=source):
await self._periodic_update_callback(bind_source)
updaters.append(
repeat_every(
seconds=source.periodic_update_interval,
wait_first=True,
logger=logger,
)(bind_for_repeat)
)
return updaters
async def mount_and_start_polling_updates(
publisher: TopicPublisher, sources: ServerDataSourceConfig
):
logger.info("[{pid}] Starting Polling Updates", pid=os.getpid())
data_publisher = DataUpdatePublisher(publisher)
await asyncio.gather(
*(
polling_update()
for polling_update in data_publisher.create_polling_updates(sources)
)
)
class GitPolicyFetcher(PolicyFetcher):
repo_locks = {}
repos = {}
repos_last_fetched = {}
def __init__(
self,
base_dir: Path,
scope_id: str,
source: GitPolicyScopeSource,
callbacks=PolicyFetcherCallbacks(),
remote_name: str = "origin",
):
super().__init__(callbacks)
self._base_dir = GitPolicyFetcher.base_dir(base_dir)
self._source = source
self._auth_callbacks = GitCallback(self._source)
self._repo_path = GitPolicyFetcher.repo_clone_path(base_dir, self._source)
self._remote = remote_name
self._scope_id = scope_id
logger.debug(
f"Initializing git fetcher: scope_id={scope_id}, url={source.url}, branch={self._source.branch}, path={GitPolicyFetcher.source_id(source)}"
)
async def _get_repo_lock(self):
# # This implementation works across multiple processes/threads, but is not fair (next acquiree is random)
# locks_dir = self._base_dir / ".locks"
# await aiofiles.os.makedirs(str(locks_dir), exist_ok=True)
# return NamedLock(
# locks_dir / GitPolicyFetcher.source_id(self._source), attempt_interval=0.1
# )
# This implementation works only within the same process/thread, but is fair (next acquiree is the earliest to enter the lock)
src_id = GitPolicyFetcher.source_id(self._source)
lock = GitPolicyFetcher.repo_locks[src_id] = GitPolicyFetcher.repo_locks.get(
src_id, asyncio.Lock()
)
return lock
async def _was_fetched_after(self, t: datetime.datetime):
last_fetched = GitPolicyFetcher.repos_last_fetched.get(self.source_id, None)
if last_fetched is None:
return False
return last_fetched > t
async def fetch_and_notify_on_changes(
self,
hinted_hash: Optional[str] = None,
force_fetch: bool = False,
req_time: datetime.datetime = None,
):
"""makes sure the repo is already fetched and is up to date.
- if no repo is found, the repo will be cloned.
- if the repo is found and it is deemed out-of-date, the configured remote will be fetched.
- if after a fetch new commits are detected, a callback will be triggered.
- if the hinted commit hash is provided and is already found in the local clone
we use this hint to avoid an necessary fetch.
"""
repo_lock = await self._get_repo_lock()
async with repo_lock:
with tracer.trace(
"git_policy_fetcher.fetch_and_notify_on_changes",
resource=self._scope_id,
):
if self._discover_repository(self._repo_path):
logger.debug("Repo found at {path}", path=self._repo_path)
repo = self._get_valid_repo()
if repo is not None:
should_fetch = await self._should_fetch(
repo,
hinted_hash=hinted_hash,
force_fetch=force_fetch,
req_time=req_time,
)
if should_fetch:
logger.debug(
f"Fetching remote (force_fetch={force_fetch}): {self._remote} ({self._source.url})"
)
GitPolicyFetcher.repos_last_fetched[
self.source_id
] = datetime.datetime.now()
await run_sync(
repo.remotes[self._remote].fetch,
callbacks=self._auth_callbacks,
)
logger.debug(f"Fetch completed: {self._source.url}")
# New commits might be present because of a previous fetch made by another scope
await self._notify_on_changes(repo)
return
else:
# repo dir exists but invalid -> we must delete the directory
logger.warning(
"Deleting invalid repo: {path}", path=self._repo_path
)
shutil.rmtree(self._repo_path)
else:
logger.info("Repo not found at {path}", path=self._repo_path)
# fallthrough to clean clone
await self._clone()
def _discover_repository(self, path: Path) -> bool:
git_path: Path = path / ".git"
return discover_repository(str(path)) and git_path.exists()
async def _clone(self):
logger.info(
"Cloning repo at '{url}' to '{path}'",
url=self._source.url,
path=self._repo_path,
)
try:
repo: Repository = await run_sync(
clone_repository,
self._source.url,
str(self._repo_path),
callbacks=self._auth_callbacks,
)
except pygit2.GitError:
logger.exception(f"Could not clone repo at {self._source.url}")
else:
logger.info(f"Clone completed: {self._source.url}")
await self._notify_on_changes(repo)
def _get_repo(self) -> Repository:
path = str(self._repo_path)
if path not in GitPolicyFetcher.repos:
GitPolicyFetcher.repos[path] = Repository(path)
return GitPolicyFetcher.repos[path]
def _get_valid_repo(self) -> Optional[Repository]:
try:
repo = self._get_repo()
RepoInterface.verify_found_repo_matches_remote(repo, self._source.url)
return repo
except pygit2.GitError:
logger.warning("Invalid repo at: {path}", path=self._repo_path)
return None
async def _should_fetch(
self,
repo: Repository,
hinted_hash: Optional[str] = None,
force_fetch: bool = False,
req_time: datetime.datetime = None,
) -> bool:
if force_fetch:
if req_time is not None and await self._was_fetched_after(req_time):
logger.info(
"Repo was fetched after refresh request, override force_fetch with False"
)
else:
return True # must fetch
if not RepoInterface.has_remote_branch(repo, self._source.branch, self._remote):
logger.info(
"Target branch was not found in local clone, re-fetching the remote"
)
return True # missing branch
if hinted_hash is not None:
try:
_ = repo.revparse_single(hinted_hash)
return False # hinted commit was found, no need to fetch
except KeyError:
logger.info(
"Hinted commit hash was not found in local clone, re-fetching the remote"
)
return True # hinted commit was not found
# by default, we try to avoid re-fetching the repo for performance
return False
def local_branch_name(self) -> str:
# Use the scope id as local branch name, so different scopes could track the same remote branch separately
branch_name_unescaped = f"scopes/{self._scope_id}"
if reference_is_valid_name(branch_name_unescaped):
return branch_name_unescaped
# if scope id can't be used as a gitref (e.g invalid chars), use its hex representation
return f"scopes/{self._scope_id.encode().hex()}"
async def _notify_on_changes(self, repo: Repository):
# Get the latest commit hash of the target branch
new_revision = RepoInterface.get_commit_hash(
repo, self._source.branch, self._remote
)
if new_revision is None:
logger.error(f"Did not find target branch on remote: {self._source.branch}")
return
# Get the previous commit hash of the target branch
local_branch = RepoInterface.get_local_branch(repo, self.local_branch_name)
if local_branch is None:
# First sync of a new branch (the first synced branch in this repo was set by the clone (see `checkout_branch`))
old_revision = None
local_branch = RepoInterface.create_local_branch_ref(
repo, self.local_branch_name, self._remote, self._source.branch
)
else:
old_revision = local_branch.target.hex
await self.callbacks.on_update(old_revision, new_revision)
# Bring forward local branch (a bit like "pull"), so we won't detect changes again
local_branch.set_target(new_revision)
def _get_current_branch_head(self) -> str:
repo = self._get_repo()
head_commit_hash = RepoInterface.get_commit_hash(
repo, self._source.branch, self._remote
)
if not head_commit_hash:
logger.error("Could not find current branch head")
raise ValueError("Could not find current branch head")
return head_commit_hash
def make_bundle(self, base_hash: Optional[str] = None) -> PolicyBundle:
repo = Repo(str(self._repo_path))
bundle_maker = BundleMaker(
repo,
{Path(p) for p in self._source.directories},
extensions=self._source.extensions,
root_manifest_path=self._source.manifest,
bundle_ignore=self._source.bundle_ignore,
)
current_head_commit = repo.commit(self._get_current_branch_head())
if not base_hash:
return bundle_maker.make_bundle(current_head_commit)
else:
try:
base_commit = repo.commit(base_hash)
return bundle_maker.make_diff_bundle(base_commit, current_head_commit)
except ValueError:
return bundle_maker.make_bundle(current_head_commit)
def source_id(source: GitPolicyScopeSource) -> str:
base = hashlib.sha256(source.url.encode("utf-8")).hexdigest()
index = (
hashlib.sha256(source.branch.encode("utf-8")).digest()[0]
% opal_server_config.SCOPES_REPO_CLONES_SHARDS
)
return f"{base}-{index}"
def base_dir(base_dir: Path) -> Path:
return base_dir / "git_sources"
def repo_clone_path(base_dir: Path, source: GitPolicyScopeSource) -> Path:
return GitPolicyFetcher.base_dir(base_dir) / GitPolicyFetcher.source_id(source)
class ScopeNotFoundError(Exception):
def __init__(self, id: str):
self._id = id
def __str__(self) -> str:
return f"Scope {self._id} not found"
class ScopeRepository:
def __init__(self, redis_db: RedisDB):
self._redis_db = redis_db
self._prefix = "permit.io/Scope"
def db(self) -> RedisDB:
return self._redis_db
async def all(self) -> List[Scope]:
scopes = []
async for value in self._redis_db.scan(f"{self._prefix}:*"):
scope = Scope.parse_raw(value)
scopes.append(scope)
return scopes
async def get(self, scope_id: str) -> Scope:
key = self._redis_key(scope_id)
value = await self._redis_db.get(key)
if value:
return Scope.parse_raw(value)
else:
raise ScopeNotFoundError(scope_id)
async def put(self, scope: Scope):
key = self._redis_key(scope.scope_id)
await self._redis_db.set(key, scope)
async def delete(self, scope_id: str):
key = self._redis_key(scope_id)
await self._redis_db.delete(key)
def _redis_key(self, scope_id: str):
return f"{self._prefix}:{scope_id}"
def init_scope_router(
scopes: ScopeRepository,
authenticator: JWTAuthenticator,
pubsub_endpoint: PubSubEndpoint,
):
router = APIRouter()
def _allowed_scoped_authenticator(
claims: JWTClaims = Depends(authenticator), scope_id: str = Path(...)
):
if not authenticator.enabled:
return
allowed_scopes = claims.get("allowed_scopes")
if not allowed_scopes or scope_id not in allowed_scopes:
raise HTTPException(status.HTTP_403_FORBIDDEN)
@router.put("", status_code=status.HTTP_201_CREATED)
async def put_scope(
*,
force_fetch: bool = Query(
False,
description="Whether the policy repo must be fetched from remote",
),
scope_in: Scope,
claims: JWTClaims = Depends(authenticator),
):
try:
require_peer_type(authenticator, claims, PeerType.datasource)
except Unauthorized as ex:
logger.error(f"Unauthorized to PUT scope: {repr(ex)}")
raise
verify_private_key_or_throw(scope_in)
await scopes.put(scope_in)
force_fetch_str = " (force fetch)" if force_fetch else ""
logger.info(f"Sync scope: {scope_in.scope_id}{force_fetch_str}")
# All server replicas (leaders) should sync the scope.
await pubsub_endpoint.publish(
opal_server_config.POLICY_REPO_WEBHOOK_TOPIC,
{"scope_id": scope_in.scope_id, "force_fetch": force_fetch},
)
return Response(status_code=status.HTTP_201_CREATED)
@router.get(
"",
response_model=List[Scope],
response_model_exclude={"policy": {"auth"}},
)
async def get_all_scopes(*, claims: JWTClaims = Depends(authenticator)):
try:
require_peer_type(authenticator, claims, PeerType.datasource)
except Unauthorized as ex:
logger.error(f"Unauthorized to get scopes: {repr(ex)}")
raise
return await scopes.all()
@router.get(
"/{scope_id}",
response_model=Scope,
response_model_exclude={"policy": {"auth"}},
)
async def get_scope(*, scope_id: str, claims: JWTClaims = Depends(authenticator)):
try:
require_peer_type(authenticator, claims, PeerType.datasource)
except Unauthorized as ex:
logger.error(f"Unauthorized to get scope: {repr(ex)}")
raise
try:
scope = await scopes.get(scope_id)
return scope
except ScopeNotFoundError:
raise HTTPException(
status.HTTP_404_NOT_FOUND, detail=f"No such scope: {scope_id}"
)
@router.delete(
"/{scope_id}",
status_code=status.HTTP_204_NO_CONTENT,
)
async def delete_scope(
*, scope_id: str, claims: JWTClaims = Depends(authenticator)
):
try:
require_peer_type(authenticator, claims, PeerType.datasource)
except Unauthorized as ex:
logger.error(f"Unauthorized to delete scope: {repr(ex)}")
raise
# TODO: This should also asynchronously clean the repo from the disk (if it's not used by other scopes)
await scopes.delete(scope_id)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.post("/{scope_id}/refresh", status_code=status.HTTP_200_OK)
async def refresh_scope(
scope_id: str,
hinted_hash: Optional[str] = Query(
None,
description="Commit hash that should exist in the repo. "
+ "If the commit is missing from the local clone, OPAL "
+ "understands it as a hint that the repo should be fetched from remote.",
),
claims: JWTClaims = Depends(authenticator),
):
try:
require_peer_type(authenticator, claims, PeerType.datasource)
except Unauthorized as ex:
logger.error(f"Unauthorized to delete scope: {repr(ex)}")
raise
try:
_ = await scopes.get(scope_id)
logger.info(f"Refresh scope: {scope_id}")
# If the hinted hash is None, we have no way to know whether we should
# re-fetch the remote, so we force fetch, just in case.
force_fetch = hinted_hash is None
# All server replicas (leaders) should sync the scope.
await pubsub_endpoint.publish(
opal_server_config.POLICY_REPO_WEBHOOK_TOPIC,
{
"scope_id": scope_id,
"force_fetch": force_fetch,
"hinted_hash": hinted_hash,
},
)
return Response(status_code=status.HTTP_200_OK)
except ScopeNotFoundError:
raise HTTPException(
status.HTTP_404_NOT_FOUND, detail=f"No such scope: {scope_id}"
)
@router.post("/refresh", status_code=status.HTTP_200_OK)
async def sync_all_scopes(claims: JWTClaims = Depends(authenticator)):
"""sync all scopes."""
try:
require_peer_type(authenticator, claims, PeerType.datasource)
except Unauthorized as ex:
logger.error(f"Unauthorized to refresh all scopes: {repr(ex)}")
raise
# All server replicas (leaders) should sync all scopes.
await pubsub_endpoint.publish(opal_server_config.POLICY_REPO_WEBHOOK_TOPIC)
return Response(status_code=status.HTTP_200_OK)
@router.get(
"/{scope_id}/policy",
response_model=PolicyBundle,
status_code=status.HTTP_200_OK,
dependencies=[Depends(_allowed_scoped_authenticator)],
)
async def get_scope_policy(
*,
scope_id: str = Path(..., title="Scope ID"),
base_hash: Optional[str] = Query(
None,
description="hash of previous bundle already downloaded, server will return a diff bundle.",
),
):
try:
scope = await scopes.get(scope_id)
except ScopeNotFoundError:
logger.warning(
"Requested scope {scope_id} not found, returning default scope",
scope_id=scope_id,
)
return await _generate_default_scope_bundle(scope_id)
if not isinstance(scope.policy, GitPolicyScopeSource):
raise HTTPException(
status.HTTP_501_NOT_IMPLEMENTED,
detail=f"policy source is not yet implemented: {scope_id}",
)
fetcher = GitPolicyFetcher(
pathlib.Path(opal_server_config.BASE_DIR),
scope.scope_id,
cast(GitPolicyScopeSource, scope.policy),
)
try:
return await run_sync(fetcher.make_bundle, base_hash)
except (InvalidGitRepositoryError, pygit2.GitError, ValueError):
logger.warning(
"Requested scope {scope_id} has invalid repo, returning default scope",
scope_id=scope_id,
)
return await _generate_default_scope_bundle(scope_id)
async def _generate_default_scope_bundle(scope_id: str) -> PolicyBundle:
metrics.event(
"ScopeNotFound",
message=f"Scope {scope_id} not found. Serving default scope instead",
tags={"scope_id": scope_id},
)
try:
scope = await scopes.get("default")
fetcher = GitPolicyFetcher(
pathlib.Path(opal_server_config.BASE_DIR),
scope.scope_id,
cast(GitPolicyScopeSource, scope.policy),
)
return fetcher.make_bundle(None)
except (
ScopeNotFoundError,
InvalidGitRepositoryError,
pygit2.GitError,
ValueError,
):
raise ScopeNotFoundError(scope_id)
@router.get(
"/{scope_id}/data",
response_model=DataSourceConfig,
status_code=status.HTTP_200_OK,
dependencies=[Depends(_allowed_scoped_authenticator)],
)
async def get_scope_data_config(
*,
scope_id: str = Path(..., title="Scope ID"),
authorization: Optional[str] = Header(None),
):
logger.info(
"Serving source configuration for scope {scope_id}", scope_id=scope_id
)
try:
scope = await scopes.get(scope_id)
return scope.data
except ScopeNotFoundError as ex:
logger.warning(
"Requested scope {scope_id} not found, returning OPAL_DATA_CONFIG_SOURCES",
scope_id=scope_id,
)
try:
config: ServerDataSourceConfig = opal_server_config.DATA_CONFIG_SOURCES
if config.external_source_url:
url = str(config.external_source_url)
token = get_token_from_header(authorization)
redirect_url = set_url_query_param(url, "token", token)
return RedirectResponse(url=redirect_url)
else:
return config.config
except ScopeNotFoundError:
raise HTTPException(status.HTTP_404_NOT_FOUND, detail=str(ex))
@router.post("/{scope_id}/data/update")
async def publish_data_update_event(
update: DataUpdate,
claims: JWTClaims = Depends(authenticator),
scope_id: str = Path(..., description="Scope ID"),
):
try:
require_peer_type(authenticator, claims, PeerType.datasource)
restrict_optional_topics_to_publish(authenticator, claims, update)
for entry in update.entries:
entry.topics = [f"data:{topic}" for topic in entry.topics]
await DataUpdatePublisher(
ScopedServerSideTopicPublisher(pubsub_endpoint, scope_id)
).publish_data_updates(update)
except Unauthorized as ex:
logger.error(f"Unauthorized to publish update: {repr(ex)}")
raise
return router | null |
156,204 | from opal_common.logger import logger
from opal_common.schemas.policy_source import (
GitPolicyScopeSource,
NoAuthData,
SSHAuthData,
)
from opal_common.schemas.scopes import Scope
from opal_server.config import ServerRole, opal_server_config
from opal_server.scopes.scope_repository import ScopeRepository
async def _load_env_scope(repo: ScopeRepository):
# backwards compatible opal scope
if opal_server_config.POLICY_REPO_URL is not None:
logger.info(
"Adding default scope from env: {url}",
url=opal_server_config.POLICY_REPO_URL,
)
auth = NoAuthData()
if opal_server_config.POLICY_REPO_SSH_KEY is not None:
private_ssh_key = opal_server_config.POLICY_REPO_SSH_KEY
private_ssh_key = private_ssh_key.replace("_", "\n")
if not private_ssh_key.endswith("\n"):
private_ssh_key += "\n"
auth = SSHAuthData(username="git", private_key=private_ssh_key)
scope = Scope(
scope_id=DEFAULT_SCOPE_ID,
policy=GitPolicyScopeSource(
source_type=opal_server_config.POLICY_SOURCE_TYPE.lower(),
url=opal_server_config.POLICY_REPO_URL,
manifest=opal_server_config.POLICY_REPO_MANIFEST_PATH,
branch=opal_server_config.POLICY_REPO_MAIN_BRANCH,
auth=auth,
),
)
await repo.put(scope)
class ScopeRepository:
def __init__(self, redis_db: RedisDB):
self._redis_db = redis_db
self._prefix = "permit.io/Scope"
def db(self) -> RedisDB:
return self._redis_db
async def all(self) -> List[Scope]:
scopes = []
async for value in self._redis_db.scan(f"{self._prefix}:*"):
scope = Scope.parse_raw(value)
scopes.append(scope)
return scopes
async def get(self, scope_id: str) -> Scope:
key = self._redis_key(scope_id)
value = await self._redis_db.get(key)
if value:
return Scope.parse_raw(value)
else:
raise ScopeNotFoundError(scope_id)
async def put(self, scope: Scope):
key = self._redis_key(scope.scope_id)
await self._redis_db.set(key, scope)
async def delete(self, scope_id: str):
key = self._redis_key(scope_id)
await self._redis_db.delete(key)
def _redis_key(self, scope_id: str):
return f"{self._prefix}:{scope_id}"
async def load_scopes(repo: ScopeRepository):
logger.info("Server is primary, loading default scope.")
await _load_env_scope(repo) | null |
156,205 | import datetime
import shutil
from functools import partial
from pathlib import Path
from typing import List, Optional, Set, cast
import git
from ddtrace import tracer
from fastapi_websocket_pubsub import PubSubEndpoint
from opal_common.git.commit_viewer import VersionedFile
from opal_common.logger import logger
from opal_common.schemas.policy import PolicyUpdateMessageNotification
from opal_common.schemas.policy_source import GitPolicyScopeSource
from opal_common.topics.publisher import ScopedServerSideTopicPublisher
from opal_server.git_fetcher import GitPolicyFetcher, PolicyFetcherCallbacks
from opal_server.policy.watcher.callbacks import (
create_policy_update,
create_update_all_directories_in_repo,
)
from opal_server.scopes.scope_repository import Scope, ScopeRepository
class VersionedFile(VersionedNode):
"""Each instance of this class represents *one version* of a file (blob) in
a git repo (the version of the file for a specific git commit)."""
def __init__(self, blob: Blob, commit: Commit):
super().__init__(blob, commit)
self._blob: Blob = blob
def blob(self) -> Blob:
"""the blob containing metadata for the file version."""
return self._blob
def stream(self) -> IO:
"""an io stream to the version of the file represented by that
instance.
reading that stream will return the contents of the file for
that specific version (commit).
"""
return self.blob.data_stream
def read_bytes(self) -> bytes:
"""returns the contents of the file as a byte array (without
encoding)."""
return self.stream.read()
def read(self, encoding="utf-8") -> str:
"""returns the contents of the file as a string, decoded according to
the input `encoding`.
(by default, git usually encodes source files as utf-8).
"""
return self.read_bytes().decode(encoding=encoding)
The provided code snippet includes necessary dependencies for implementing the `is_rego_source_file` function. Write a Python function `def is_rego_source_file( f: VersionedFile, extensions: Optional[List[str]] = None ) -> bool` to solve the following problem:
filters only rego files or data.json files.
Here is the function:
def is_rego_source_file(
f: VersionedFile, extensions: Optional[List[str]] = None
) -> bool:
"""filters only rego files or data.json files."""
REGO = ".rego"
JSON = ".json"
OPA_JSON = "data.json"
if extensions is None:
extensions = [REGO, JSON]
if JSON in extensions and f.path.suffix == JSON:
return f.path.name == OPA_JSON
return f.path.suffix in extensions | filters only rego files or data.json files. |
156,206 | from typing import Optional
from fastapi import APIRouter, Depends, Header, HTTPException, status
from fastapi.responses import RedirectResponse
from opal_common.authentication.authz import (
require_peer_type,
restrict_optional_topics_to_publish,
)
from opal_common.authentication.deps import JWTAuthenticator, get_token_from_header
from opal_common.authentication.types import JWTClaims
from opal_common.authentication.verifier import Unauthorized
from opal_common.logger import logger
from opal_common.schemas.data import (
DataSourceConfig,
DataUpdate,
DataUpdateReport,
ServerDataSourceConfig,
)
from opal_common.schemas.security import PeerType
from opal_common.urls import set_url_query_param
from opal_server.config import opal_server_config
from opal_server.data.data_update_publisher import DataUpdatePublisher
def require_peer_type(
authenticator: JWTAuthenticator, claims: JWTClaims, required_type: PeerType
):
def restrict_optional_topics_to_publish(
authenticator: JWTAuthenticator, claims: JWTClaims, update: DataUpdate
):
def get_token_from_header(authorization_header: str) -> Optional[str]:
class JWTAuthenticator(_JWTAuthenticator):
def __call__(self, authorization: Optional[str] = Header(None)) -> JWTClaims:
JWTClaims = Dict[str, Any]
class Unauthorized(HTTPException):
def __init__(self, description="Bearer token is not valid!", **kwargs):
class DataSourceConfig(BaseModel):
class ServerDataSourceConfig(BaseModel):
def check_passwords_match(cls, values):
class DataUpdate(BaseModel):
class DataUpdateReport(BaseModel):
class PeerType(str, Enum):
def set_url_query_param(url: str, param_name: str, param_value: str):
opal_server_config = OpalServerConfig(prefix="OPAL_")
class DataUpdatePublisher:
def __init__(self, publisher: TopicPublisher) -> None:
def get_topic_combos(topic: str) -> List[str]:
async def publish_data_updates(self, update: DataUpdate):
async def _periodic_update_callback(
self, update: DataSourceEntryWithPollingInterval
):
def create_polling_updates(self, sources: ServerDataSourceConfig):
async def bind_for_repeat(bind_source=source):
async def mount_and_start_polling_updates(
publisher: TopicPublisher, sources: ServerDataSourceConfig
):
def init_data_updates_router(
data_update_publisher: DataUpdatePublisher,
data_sources_config: ServerDataSourceConfig,
authenticator: JWTAuthenticator,
):
router = APIRouter()
@router.get(opal_server_config.ALL_DATA_ROUTE)
async def default_all_data():
"""A fake data source configured to be fetched by the default data
source config.
If the user deploying OPAL did not set DATA_CONFIG_SOURCES
properly, OPAL clients will be hitting this route, which will
return an empty dataset (empty dict).
"""
logger.warning(
"Serving default all-data route, meaning DATA_CONFIG_SOURCES was not configured!"
)
return {}
@router.post(
opal_server_config.DATA_CALLBACK_DEFAULT_ROUTE,
dependencies=[Depends(authenticator)],
)
async def log_client_update_report(report: DataUpdateReport):
"""A data update callback to be called by the OPAL client after
completing an update.
If the user deploying OPAL-client did not set
OPAL_DEFAULT_UPDATE_CALLBACKS properly, this method will be
called as the default callback (will simply log the report).
"""
logger.info(
"Received update report: {report}",
report=report.dict(
exclude={"reports": {"__all__": {"entry": {"config", "data"}}}}
),
)
return {} # simply returns 200
@router.get(
opal_server_config.DATA_CONFIG_ROUTE,
response_model=DataSourceConfig,
responses={
307: {
"description": "The data source configuration is available at another location (redirect)"
},
},
dependencies=[Depends(authenticator)],
)
async def get_data_sources_config(authorization: Optional[str] = Header(None)):
"""Provides OPAL clients with their base data config, meaning from
where they should fetch a *complete* picture of the policy data they
need.
Clients will use this config to pull all data when they
initially load and when they are reconnected to server after a
period of disconnection (in which they cannot receive
incremental updates).
"""
token = get_token_from_header(authorization)
if data_sources_config.config is not None:
logger.info("Serving source configuration")
return data_sources_config.config
elif data_sources_config.external_source_url is not None:
url = str(data_sources_config.external_source_url)
short_token = token[:5] + "..." + token[-5:]
logger.info(
"Source configuration is available at '{url}', redirecting with token={token} (abbrv.)",
url=url,
token=short_token,
)
redirect_url = set_url_query_param(url, "token", token)
return RedirectResponse(url=redirect_url)
else:
logger.error("pydantic model invalid", model=data_sources_config)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Did not find a data source configuration!",
)
@router.post(opal_server_config.DATA_CONFIG_ROUTE)
async def publish_data_update_event(
update: DataUpdate, claims: JWTClaims = Depends(authenticator)
):
"""Provides data providers (i.e: one of the backend services owned by
whomever deployed OPAL) with the ability to push incremental policy
data updates to OPAL clients.
Each update contains instructions on:
- how to fetch the data
- where should OPAL client store the data in OPA document hierarchy
- what clients should receive the update (through topics, only clients subscribed to provided topics will be notified)
"""
try:
require_peer_type(
authenticator, claims, PeerType.datasource
) # may throw Unauthorized
restrict_optional_topics_to_publish(
authenticator, claims, update
) # may throw Unauthorized
except Unauthorized as e:
logger.error(f"Unauthorized to publish update: {repr(e)}")
raise
await data_update_publisher.publish_data_updates(update)
return {"status": "ok"}
return router | null |
156,207 | from fastapi import APIRouter, Request
from opal_common.logger import logger
from slowapi import Limiter
The provided code snippet includes necessary dependencies for implementing the `init_loadlimit_router` function. Write a Python function `def init_loadlimit_router(loadlimit_notation: str = None)` to solve the following problem:
initializes a route where a client (or any other network peer) can inquire what opal clients are currently connected to the server and on what topics are they registered. If the OPAL server does not have statistics enabled, the route will return 501 Not Implemented
Here is the function:
def init_loadlimit_router(loadlimit_notation: str = None):
"""initializes a route where a client (or any other network peer) can
inquire what opal clients are currently connected to the server and on what
topics are they registered.
If the OPAL server does not have statistics enabled, the route will
return 501 Not Implemented
"""
router = APIRouter()
# We want to globally limit the endpoint, not per client
limiter = Limiter(key_func=lambda: "global")
if loadlimit_notation:
logger.info(f"rate limiting is on, configured limit: {loadlimit_notation}")
@router.get("/loadlimit")
@limiter.limit(loadlimit_notation)
async def loadlimit(request: Request):
return
else:
@router.get("/loadlimit")
async def loadlimit(request: Request):
return
return router | initializes a route where a client (or any other network peer) can inquire what opal clients are currently connected to the server and on what topics are they registered. If the OPAL server does not have statistics enabled, the route will return 501 Not Implemented |
156,208 | from datetime import datetime
from fastapi import APIRouter, Depends, HTTPException, status
from opal_common.authentication.deps import StaticBearerAuthenticator
from opal_common.authentication.signer import JWTSigner
from opal_common.logger import logger
from opal_common.schemas.security import AccessToken, AccessTokenRequest, TokenDetails
class StaticBearerAuthenticator:
"""bearer token authentication for http(s) api endpoints.
throws 401 if token does not match a preconfigured value.
"""
def __init__(self, preconfigured_token: Optional[str]):
self._preconfigured_token = preconfigured_token
def __call__(self, authorization: Optional[str] = Header(None)):
if self._preconfigured_token is None:
# always allow
return
if authorization is None:
raise Unauthorized(description="Authorization header is required!")
token = get_token_from_header(authorization)
if token is None or token != self._preconfigured_token:
raise Unauthorized(
token=token, description="unauthorized to access this endpoint!"
)
class JWTSigner(JWTVerifier):
"""given cryptographic keys, signs and verifies jwt tokens."""
def __init__(
self,
private_key: Optional[PrivateKey],
public_key: Optional[PublicKey],
algorithm: JWTAlgorithm,
audience: str,
issuer: str,
):
"""inits the signer if and only if the keys provided to __init__ were
generate together are are valid. otherwise will throw.
JWT signer can be initialized with empty keys (None),
in which case signer.enabled == False.
This allows opal to run both in secure mode (which keys, requires jwt authentication)
and in insecure mode (good for development and running locally).
Args:
private_key (PrivateKey): a valid private key or None
public_key (PublicKey): a valid public key or None
algorithm (JWTAlgorithm): the jwt algorithm to use
(possible values: https://pyjwt.readthedocs.io/en/stable/algorithms.html)
audience (string): the value for the aud claim: https://tools.ietf.org/html/rfc7519#section-4.1.3
issuer (string): the value for the iss claim: https://tools.ietf.org/html/rfc7519#section-4.1.1
"""
super().__init__(
public_key=public_key, algorithm=algorithm, audience=audience, issuer=issuer
)
self._private_key = private_key
self._verify_crypto_keys()
def _verify_crypto_keys(self):
"""verifies whether or not valid crypto keys were provided to the
signer. if both keys are valid, encodes and decodes a JWT to make sure
the keys match.
if both private and public keys are valid and are matching =>
signer is enabled if both private and public keys are None =>
signer is disabled (self.enabled == False) if only one key is
valid/not-None => throws ValueError any other case => throws
ValueError
"""
if self._private_key is not None and self._public_key is not None:
# both keys provided, let's make sure these keys were generated correctly
token = jwt.encode(
{"some": "payload"}, self._private_key, algorithm=self._algorithm
)
try:
jwt.decode(token, self._public_key, algorithms=[self._algorithm])
except jwt.PyJWTError as exc:
logger.info(
"JWT Signer key verification failed with error: {err}",
err=repr(exc),
)
raise InvalidJWTCryptoKeysException(
"private key and public key do not match!"
) from exc
# save jwk
self._jwk: PyJWK = PyJWK.from_json(
self.get_jwk(), algorithm=self._algorithm
)
elif self._private_key is None and self._public_key is not None:
raise ValueError(
"JWT Signer not valid, you provided a public key without a private key!"
)
elif self._private_key is not None and self._public_key is None:
raise ValueError(
"JWT Signer not valid, you provided a private key without a public key!"
)
elif self._private_key is None and self._public_key is None:
# valid situation, running in dev mode and api security is off
self._disable()
else:
raise ValueError("Invalid JWT Signer input!")
def sign(
self, sub: UUID, token_lifetime: timedelta, custom_claims: dict = {}
) -> str:
payload = {}
issued_at = datetime.utcnow()
expire_at = issued_at + token_lifetime
payload = {
"iat": issued_at,
"exp": expire_at,
"aud": self._audience,
"iss": self._issuer,
"sub": sub.hex,
}
if custom_claims:
payload.update(custom_claims)
headers = {}
if self._jwk.key_id is not None:
headers = {"kid": self._jwk.key_id}
return jwt.encode(
payload, self._private_key, algorithm=self._algorithm, headers=headers
)
class AccessTokenRequest(BaseModel):
"""a request to generate an access token to opal server."""
id: UUID = Field(default_factory=uuid4)
type: PeerType = Field(PeerType.client, description=PEER_TYPE_DESCRIPTION)
ttl: timedelta = Field(timedelta(days=365), description=TTL_DESCRIPTION)
claims: dict = Field({}, description=CLAIMS_DESCRIPTION)
def force_enum(cls, v):
if isinstance(v, str):
return PeerType(v)
if isinstance(v, PeerType):
return v
raise ValueError(f"invalid value: {v}")
class Config:
use_enum_values = True
allow_population_by_field_name = True
class TokenDetails(BaseModel):
id: UUID
type: PeerType = Field(PeerType.client, description=PEER_TYPE_DESCRIPTION)
expired: datetime
claims: dict
class AccessToken(BaseModel):
token: str
type: str = "bearer"
details: Optional[TokenDetails]
def init_security_router(signer: JWTSigner, authenticator: StaticBearerAuthenticator):
router = APIRouter()
@router.post(
"/token",
status_code=status.HTTP_200_OK,
response_model=AccessToken,
dependencies=[Depends(authenticator)],
)
async def generate_new_access_token(req: AccessTokenRequest):
if not signer.enabled:
raise HTTPException(
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail="opal server was not configured with security, cannot generate tokens!",
)
claims = {"peer_type": req.type.value, **req.claims}
token = signer.sign(sub=req.id, token_lifetime=req.ttl, custom_claims=claims)
logger.info(f"Generated opal token: peer_type={req.type.value}")
return AccessToken(
token=token,
details=TokenDetails(
id=req.id,
type=req.type,
expired=datetime.utcnow() + req.ttl,
claims=claims,
),
)
return router | null |
156,209 | from fastapi_websocket_pubsub import PubSubClient, Topic
from opal_common.confi.confi import load_conf_if_none
from opal_common.topics.publisher import (
ClientSideTopicPublisher,
PeriodicPublisher,
ServerSideTopicPublisher,
TopicPublisher,
)
from opal_common.utils import get_authorization_header
from opal_server.config import opal_server_config
def load_conf_if_none(variable, conf):
class TopicPublisher:
def __init__(self):
async def publish(self, topics: TopicList, data: Any = None):
async def __aenter__(self):
async def __aexit__(self, exc_type, exc, tb):
def start(self):
async def _add_task(self, task: asyncio.Task):
async def wait(self):
async def stop(self):
def _cleanup_task(self, task: asyncio.Task):
class ClientSideTopicPublisher(TopicPublisher):
def __init__(self, client: PubSubClient, server_uri: str):
def start(self):
async def stop(self):
async def wait_until_done(self):
async def publish(self, topics: TopicList, data: Any = None):
async def _publish(self, topics: TopicList, data: Any = None) -> bool:
def get_authorization_header(token: str) -> Tuple[str, str]:
opal_server_config = OpalServerConfig(prefix="OPAL_")
def setup_publisher_task(
server_uri: str = None,
server_token: str = None,
) -> TopicPublisher:
server_uri = load_conf_if_none(
server_uri,
opal_server_config.OPAL_WS_LOCAL_URL,
)
server_token = load_conf_if_none(
server_token,
opal_server_config.OPAL_WS_TOKEN,
)
return ClientSideTopicPublisher(
client=PubSubClient(extra_headers=[get_authorization_header(server_token)]),
server_uri=server_uri,
) | null |
156,210 | from fastapi_websocket_pubsub import PubSubClient, Topic
from opal_common.confi.confi import load_conf_if_none
from opal_common.topics.publisher import (
ClientSideTopicPublisher,
PeriodicPublisher,
ServerSideTopicPublisher,
TopicPublisher,
)
from opal_common.utils import get_authorization_header
from opal_server.config import opal_server_config
class PeriodicPublisher:
"""Wrapper for a task that publishes to topic on fixed interval
periodically."""
def __init__(
self,
publisher: TopicPublisher,
time_interval: int,
topic: Topic,
message: Any = None,
task_name: str = "periodic publish task",
):
"""inits the publisher.
Args:
publisher (TopicPublisher): can publish messages on the pub/sub channel
interval (int): the time interval between publishing consecutive messages
topic (Topic): the topic to publish on
message (Any): the message to publish
"""
self._publisher = publisher
self._interval = time_interval
self._topic = topic
self._message = message
self._task_name = task_name
self._task: Optional[asyncio.Task] = None
async def __aenter__(self):
self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
def start(self):
"""starts the periodic publisher task."""
if self._task is not None:
logger.warning(f"{self._task_name} already started")
return
logger.info(
f"started {self._task_name}: topic is '{self._topic}', interval is {self._interval} seconds"
)
self._task = asyncio.create_task(self._publish_task())
async def stop(self):
"""stops the publisher (cancels any running publishing tasks)"""
if self._task is not None:
self._task.cancel()
try:
await self._task
except asyncio.CancelledError:
pass
self._task = None
logger.info(f"cancelled {self._task_name} to topic: {self._topic}")
async def wait_until_done(self):
await self._task
async def _publish_task(self):
while True:
await asyncio.sleep(self._interval)
logger.info(
f"{self._task_name}: publishing message on topic '{self._topic}', next publish is scheduled in {self._interval} seconds"
)
async with self._publisher:
await self._publisher.publish(topics=[self._topic], data=self._message)
class ServerSideTopicPublisher(TopicPublisher):
"""A simple wrapper around a PubSubEndpoint that exposes publish()."""
def __init__(self, endpoint: PubSubEndpoint):
"""inits the publisher.
Args:
endpoint (PubSubEndpoint): a pub/sub endpoint
"""
self._endpoint = endpoint
super().__init__()
async def _publish_impl(self, topics: TopicList, data: Any = None):
with tracer.trace("topic_publisher.publish", resource=str(topics)):
await self._endpoint.publish(topics=topics, data=data)
async def publish(self, topics: TopicList, data: Any = None):
await self._add_task(asyncio.create_task(self._publish_impl(topics, data)))
The provided code snippet includes necessary dependencies for implementing the `setup_broadcaster_keepalive_task` function. Write a Python function `def setup_broadcaster_keepalive_task( publisher: ServerSideTopicPublisher, time_interval: int, topic: Topic = "__broadcast_session_keepalive__", ) -> PeriodicPublisher` to solve the following problem:
a periodic publisher with the intent to trigger messages on the broadcast channel, so that the session to the backbone won't become idle and close on the backbone end.
Here is the function:
def setup_broadcaster_keepalive_task(
publisher: ServerSideTopicPublisher,
time_interval: int,
topic: Topic = "__broadcast_session_keepalive__",
) -> PeriodicPublisher:
"""a periodic publisher with the intent to trigger messages on the
broadcast channel, so that the session to the backbone won't become idle
and close on the backbone end."""
return PeriodicPublisher(
publisher, time_interval, topic, task_name="broadcaster keepalive task"
) | a periodic publisher with the intent to trigger messages on the broadcast channel, so that the session to the backbone won't become idle and close on the backbone end. |
156,211 | import asyncio
from datetime import datetime
from random import uniform
from typing import Dict, List, Optional, Set
from uuid import uuid4
import pydantic
from fastapi import APIRouter, HTTPException, status
from fastapi_websocket_pubsub.event_notifier import Subscription, TopicList
from fastapi_websocket_pubsub.pub_sub_server import PubSubEndpoint
from opal_common.config import opal_common_config
from opal_common.logger import get_logger
from opal_server.config import opal_server_config
from pydantic import BaseModel, Field
class ServerStats(BaseModel):
uptime: datetime = Field(..., description="uptime for this opal server worker")
clients: Dict[str, List[ChannelStats]] = Field(
...,
description="connected opal clients, each client can have multiple subscriptions",
)
logger = get_logger("opal.statistics")
class OpalStatistics:
"""manage opal server statistics.
Args:
endpoint:
The pub/sub server endpoint that allows us to subscribe to the stats channel on the server side
"""
def __init__(self, endpoint):
self._endpoint: PubSubEndpoint = endpoint
self._uptime = datetime.utcnow()
# state: Dict[str, List[ChannelStats]]
# The state is built in this way so it will be easy to understand how much OPAL clients (vs. rpc clients)
# you have connected to your OPAL server and to help merge client lists between servers.
# The state is keyed by unique client id (A unique id that each opal client can set in env var `OPAL_CLIENT_STAT_ID`)
self._state: ServerStats = ServerStats(uptime=self._uptime, clients={})
# rpc_id_to_client_id:
# dict to help us get client id without another loop
self._rpc_id_to_client_id: Dict[str, str] = {}
self._lock = asyncio.Lock()
# helps us realize when another server already responded to a sync request
self._worker_id = uuid4().hex
self._synced_after_wakeup = asyncio.Event()
self._received_sync_messages: Set[str] = set()
def state(self) -> ServerStats:
return self._state
async def run(self):
"""subscribe to two channels to be able to sync add and delete of
clients."""
await self._endpoint.subscribe(
[opal_server_config.STATISTICS_WAKEUP_CHANNEL],
self._receive_other_worker_wakeup_message,
)
await self._endpoint.subscribe(
[opal_server_config.STATISTICS_STATE_SYNC_CHANNEL],
self._receive_other_worker_synced_state,
)
await self._endpoint.subscribe(
[opal_common_config.STATISTICS_ADD_CLIENT_CHANNEL], self._add_client
)
await self._endpoint.subscribe(
[opal_common_config.STATISTICS_REMOVE_CLIENT_CHANNEL],
self._sync_remove_client,
)
# wait before publishing the wakeup message, due to the fact we are
# counting on the broadcaster to listen and to replicate the message
# to the other workers / server nodes in the networks.
# However, since broadcaster is using asyncio.create_task(), there is a
# race condition that is mitigate by this asyncio.sleep() call.
await asyncio.sleep(SLEEP_TIME_FOR_BROADCASTER_READER_TO_START)
# Let all the other opal servers know that new opal server started
logger.info(f"sending stats wakeup message: {self._worker_id}")
asyncio.create_task(
self._endpoint.publish(
[opal_server_config.STATISTICS_WAKEUP_CHANNEL],
SyncRequest(requesting_worker_id=self._worker_id).dict(),
)
)
async def _sync_remove_client(self, subscription: Subscription, rpc_id: str):
"""helper function to recall remove client in all servers.
Args:
subscription (Subscription): not used, we get it from callbacks.
rpc_id (str): channel id of rpc channel used as identifier to client id
"""
await self.remove_client(rpc_id=rpc_id, topics=[], publish=False)
async def _receive_other_worker_wakeup_message(
self, subscription: Subscription, sync_request: dict
):
"""Callback when new server wakes up and requests our statistics state.
Sends state only if we have state of our own and another
response to that request was not already received.
"""
try:
request = SyncRequest(**sync_request)
except pydantic.ValidationError as e:
logger.warning(
f"Got invalid statistics sync request from another server, error: {repr(e)}"
)
return
if self._worker_id == request.requesting_worker_id:
# skip my own requests
logger.debug(
f"IGNORING my own stats wakeup message: {request.requesting_worker_id}"
)
return
logger.debug(f"received stats wakeup message: {request.requesting_worker_id}")
if len(self._state.clients):
# wait random time in order to reduce the number of messages sent by all the other opal servers
await asyncio.sleep(uniform(MIN_TIME_TO_WAIT, MAX_TIME_TO_WAIT))
# if didn't got any other message it means that this server is the first one to pass the sleep
if not request.requesting_worker_id in self._received_sync_messages:
logger.info(
f"[{request.requesting_worker_id}] respond with my own stats"
)
asyncio.create_task(
self._endpoint.publish(
[opal_server_config.STATISTICS_STATE_SYNC_CHANNEL],
SyncResponse(
requesting_worker_id=request.requesting_worker_id,
clients=self._state.clients,
rpc_id_to_client_id=self._rpc_id_to_client_id,
).dict(),
)
)
async def _receive_other_worker_synced_state(
self, subscription: Subscription, sync_response: dict
):
"""Callback when another server sends us it's statistics data as a
response to a sync request.
Args:
subscription (Subscription): not used, we get it from callbacks.
rpc_id (Dict[str, List[ChannelStats]]): state from remote server
"""
try:
response = SyncResponse(**sync_response)
except pydantic.ValidationError as e:
logger.warning(
f"Got invalid statistics sync response from another server, error: {repr(e)}"
)
return
async with self._lock:
self._received_sync_messages.add(response.requesting_worker_id)
# update my state only if this server don't have a state
if not len(self._state.clients) and not self._synced_after_wakeup.is_set():
logger.info(f"[{response.requesting_worker_id}] applying server stats")
self._state.clients = response.clients
self._rpc_id_to_client_id = response.rpc_id_to_client_id
self._synced_after_wakeup.set()
async def _add_client(self, subscription: Subscription, stats_message: dict):
"""add client record to statistics state.
Args:
subscription (Subscription): not used, we get it from callbacks.
stat_msg (ChannelStats): statistics data for channel, rpc_id - channel identifier; client_id - client identifier
"""
try:
stats = ChannelStats(**stats_message)
except pydantic.ValidationError as e:
logger.warning(
f"Got invalid statistics message from client, error: {repr(e)}"
)
return
try:
client_id = stats.client_id
rpc_id = stats.rpc_id
logger.info(
"Set client statistics {client_id} on channel {rpc_id} with {topics}",
client_id=client_id,
rpc_id=rpc_id,
topics=", ".join(stats.topics),
)
async with self._lock:
self._rpc_id_to_client_id[rpc_id] = client_id
if client_id in self._state.clients:
# Limiting the number of channels per client to avoid memory issues if client opens too many channels
if (
len(self._state.clients[client_id])
< opal_server_config.MAX_CHANNELS_PER_CLIENT
):
self._state.clients[client_id].append(stats)
else:
logger.warning(
f"Client '{client_id}' reached the maximum number of open RPC channels"
)
else:
self._state.clients[client_id] = [stats]
except Exception as err:
logger.exception("Add client to server statistics failed")
async def remove_client(self, rpc_id: str, topics: TopicList, publish=True):
"""remove client record from statistics state.
Args:
rpc_id (str): channel id of rpc channel used as identifier to client id
topics (TopicList): not used, we get it from callbacks.
publish (bool): used to stop republish cycle
"""
if rpc_id not in self._rpc_id_to_client_id:
logger.debug(
f"Statistics.remove_client() got unknown rpc id: {rpc_id} (probably broadcaster)"
)
return
try:
logger.info("Trying to remove {rpc_id} from statistics", rpc_id=rpc_id)
client_id = self._rpc_id_to_client_id[rpc_id]
for index, stats in enumerate(self._state.clients[client_id]):
if stats.rpc_id == rpc_id:
async with self._lock:
# remove the stats record matching the removed rpc id
del self._state.clients[client_id][index]
# remove the connection between rpc and client, once we removed it from state
del self._rpc_id_to_client_id[rpc_id]
# if no client records left in state remove the client entry
if not len(self._state.clients[client_id]):
del self._state.clients[client_id]
break
except Exception as err:
logger.warning(f"Remove client from server statistics failed: {repr(err)}")
# publish removed client so each server worker and server instance would get it
if publish:
logger.info(
"Publish rpc_id={rpc_id} to be removed from statistics",
rpc_id=rpc_id,
)
asyncio.create_task(
self._endpoint.publish(
[opal_common_config.STATISTICS_REMOVE_CLIENT_CHANNEL],
rpc_id,
)
)
The provided code snippet includes necessary dependencies for implementing the `init_statistics_router` function. Write a Python function `def init_statistics_router(stats: Optional[OpalStatistics] = None)` to solve the following problem:
initializes a route where a client (or any other network peer) can inquire what opal clients are currently connected to the server and on what topics are they registered. If the OPAL server does not have statistics enabled, the route will return 501 Not Implemented
Here is the function:
def init_statistics_router(stats: Optional[OpalStatistics] = None):
"""initializes a route where a client (or any other network peer) can
inquire what opal clients are currently connected to the server and on what
topics are they registered.
If the OPAL server does not have statistics enabled, the route will
return 501 Not Implemented
"""
router = APIRouter()
@router.get("/statistics", response_model=ServerStats)
async def get_statistics():
"""Route to serve server statistics."""
if stats is None:
raise HTTPException(
status_code=status.HTTP_501_NOT_IMPLEMENTED,
detail={
"error": "This OPAL server does not have statistics turned on."
+ " To turn on, set this config var: OPAL_STATISTICS_ENABLED=true"
},
)
logger.info("Serving statistics")
return stats.state
return router | initializes a route where a client (or any other network peer) can inquire what opal clients are currently connected to the server and on what topics are they registered. If the OPAL server does not have statistics enabled, the route will return 501 Not Implemented |
156,212 | import os
The provided code snippet includes necessary dependencies for implementing the `get_install_requires` function. Write a Python function `def get_install_requires(here)` to solve the following problem:
Gets the contents of install_requires from text file. Getting the minimum requirements from a text file allows us to pre-install them in docker, speeding up our docker builds and better utilizing the docker layer cache. The requirements in requires.txt are in fact the minimum set of packages you need to run OPAL (and are thus different from a "requirements.txt" file).
Here is the function:
def get_install_requires(here):
"""Gets the contents of install_requires from text file.
Getting the minimum requirements from a text file allows us to pre-install
them in docker, speeding up our docker builds and better utilizing the docker layer cache.
The requirements in requires.txt are in fact the minimum set of packages
you need to run OPAL (and are thus different from a "requirements.txt" file).
"""
with open(os.path.join(here, "requires.txt")) as fp:
return [
line.strip() for line in fp.read().splitlines() if not line.startswith("#")
] | Gets the contents of install_requires from text file. Getting the minimum requirements from a text file allows us to pre-install them in docker, speeding up our docker builds and better utilizing the docker layer cache. The requirements in requires.txt are in fact the minimum set of packages you need to run OPAL (and are thus different from a "requirements.txt" file). |
156,216 | import os
import sys
import typer
from fastapi.applications import FastAPI
from typer.main import Typer
from typer.models import Context
from opal_client.config import opal_client_config
from opal_common.cli.docs import MainTexts
from opal_common.cli.typer_app import get_typer_app
from opal_common.config import opal_common_config
app = get_typer_app()
opal_client_config = OpalClientConfig(prefix="OPAL_")
def run_gunicorn(app, number_of_workers=None, host=None, port=None, **kwargs):
options = {
"bind": "%s:%s" % (host or "127.0.0.1", port or "8080"),
"workers": number_of_workers or calc_default_number_of_workers(),
"worker_class": "uvicorn.workers.UvicornWorker",
}
options.update(kwargs)
GunicornApp(app, options).run()
def run_uvicorn(
app_path, number_of_workers=None, host=None, port=None, reload=False, **kwargs
):
options = {
"host": host or "127.0.0.1",
"port": port or "8080",
"reload": reload,
"workers": number_of_workers or calc_default_number_of_workers(),
}
options.update(kwargs)
import uvicorn
uvicorn.run(app_path, **options)
app = client.app
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run(engine_type: str = typer.Option("uvicron", help="uvicorn or gunicorn"))` to solve the following problem:
Run the client as a daemon.
Here is the function:
def run(engine_type: str = typer.Option("uvicron", help="uvicorn or gunicorn")):
"""Run the client as a daemon."""
typer.echo(f"-- Starting OPAL client (with {engine_type}) --")
from opal_common.corn_utils import run_gunicorn, run_uvicorn
if engine_type == "gunicorn":
app: FastAPI
from opal_client.main import app
run_gunicorn(
app,
opal_client_config.CLIENT_API_SERVER_WORKER_COUNT,
host=opal_client_config.CLIENT_API_SERVER_HOST,
port=opal_client_config.CLIENT_API_SERVER_PORT,
)
else:
run_uvicorn(
"opal_client.main:app",
workers=opal_client_config.CLIENT_API_SERVER_WORKER_COUNT,
host=opal_client_config.CLIENT_API_SERVER_HOST,
port=opal_client_config.CLIENT_API_SERVER_PORT,
) | Run the client as a daemon. |
156,217 | import os
import sys
import typer
from fastapi.applications import FastAPI
from typer.main import Typer
from typer.models import Context
from opal_client.config import opal_client_config
from opal_common.cli.docs import MainTexts
from opal_common.cli.typer_app import get_typer_app
from opal_common.config import opal_common_config
opal_client_config = OpalClientConfig(prefix="OPAL_")
opal_common_config = OpalCommonConfig(prefix="OPAL_")
The provided code snippet includes necessary dependencies for implementing the `print_config` function. Write a Python function `def print_config()` to solve the following problem:
To test config values, print the configuration parsed from ENV and CMD.
Here is the function:
def print_config():
"""To test config values, print the configuration parsed from ENV and
CMD."""
typer.echo("Printing configuration values")
typer.echo(str(opal_client_config))
typer.echo(str(opal_common_config)) | To test config values, print the configuration parsed from ENV and CMD. |
156,218 | import os
import sys
import typer
from fastapi.applications import FastAPI
from typer.main import Typer
from typer.models import Context
from opal_client.config import opal_client_config
from opal_common.cli.docs import MainTexts
from opal_common.cli.typer_app import get_typer_app
from opal_common.config import opal_common_config
app = get_typer_app()
opal_client_config = OpalClientConfig(prefix="OPAL_")
class MainTexts:
def __init__(self, first_line, name):
self.header = f"""\b
{first_line}
Open-Policy Administration Layer - {name}\b\f"""
self.docs = f"""\b
Config top level options:
- Use env-vars (same as cmd options) but uppercase
and with "_" instead of "-"; all prefixed with "OPAL_"
- Use command line options as detailed by '--help'
- Use .env or .ini files
\b
Examples:
- opal-{name} --help Detailed help on CLI
- opal-{name} run --help Help on run command
- opal-{name} run --engine-type gunicorn Run {name} with gunicorn
\b
"""
opal_common_config = OpalCommonConfig(prefix="OPAL_")
app = client.app
def cli():
main_texts = MainTexts("OPAL-CLIENT", "client")
def on_start(ctx: Context, **kwargs):
if ctx.invoked_subcommand is None or ctx.invoked_subcommand == "run":
typer.secho(main_texts.header, bold=True, fg=typer.colors.MAGENTA)
if ctx.invoked_subcommand is None:
typer.echo(ctx.get_usage())
typer.echo(main_texts.docs)
opal_client_config.cli(
[opal_common_config],
typer_app=app,
help=main_texts.docs,
on_start=on_start,
) | null |
156,219 | import aiohttp
from fastapi import Response
from fastapi.encoders import jsonable_encoder
def exclude_none_fields(data):
# remove default values from the pydatic model with a None value and also
# convert the model to a valid JSON serializable type using jsonable_encoder
return jsonable_encoder(data, exclude_none=True) | null |
156,220 | from fastapi import APIRouter, status
from opal_client.policy.updater import PolicyUpdater
from opal_common.logger import logger
class PolicyUpdater:
"""
Keeps policy-stores (e.g. OPA) up to date with relevant policy code
(e.g: rego) and static data (e.g: data.json files like in OPA bundles).
Uses Pub/Sub to subscribe to specific directories in the policy code
repository (i.e: git), and fetches bundles containing updated policy code.
"""
def __init__(
self,
token: str = None,
pubsub_url: str = None,
subscription_directories: List[str] = None,
policy_store: BasePolicyStoreClient = None,
data_fetcher: Optional[DataFetcher] = None,
callbacks_register: Optional[CallbacksRegister] = None,
opal_client_id: str = None,
):
"""inits the policy updater.
Args:
token (str, optional): Auth token to include in connections to OPAL server. Defaults to CLIENT_TOKEN.
pubsub_url (str, optional): URL for Pub/Sub updates for policy. Defaults to OPAL_SERVER_PUBSUB_URL.
subscription_directories (List[str], optional): directories in the policy source repo to subscribe to.
Defaults to POLICY_SUBSCRIPTION_DIRS. every time the directory is updated by a commit we will receive
a message on its respective topic. we dedups directories with ancestral relation, and will only
receive one message for each updated file.
policy_store (BasePolicyStoreClient, optional): Policy store client to use to store policy code. Defaults to DEFAULT_POLICY_STORE.
"""
# defaults
token: str = token or opal_client_config.CLIENT_TOKEN
pubsub_url: str = pubsub_url or opal_client_config.SERVER_PUBSUB_URL
self._subscription_directories: List[str] = (
subscription_directories or opal_client_config.POLICY_SUBSCRIPTION_DIRS
)
self._opal_client_id = opal_client_id
self._scope_id = opal_client_config.SCOPE_ID
# The policy store we'll save policy modules into (i.e: OPA)
self._policy_store = policy_store or DEFAULT_POLICY_STORE_GETTER()
# pub/sub server url and authentication data
self._server_url = pubsub_url
self._token = token
if self._token is None:
self._extra_headers = None
else:
self._extra_headers = [get_authorization_header(self._token)]
# Pub/Sub topics we subscribe to for policy updates
if self._scope_id == "default":
self._topics = pubsub_topics_from_directories(
self._subscription_directories
)
else:
self._topics = [f"{self._scope_id}:policy:."]
# The pub/sub client for data updates
self._client = None
# The task running the Pub/Sub subscribing client
self._subscriber_task = None
self._policy_update_task = None
self._stopping = False
# policy fetcher - fetches policy bundles
self._policy_fetcher = PolicyFetcher()
# callbacks on policy changes
self._data_fetcher = data_fetcher or DataFetcher()
self._callbacks_register = callbacks_register or CallbacksRegister()
self._callbacks_reporter = CallbacksReporter(self._callbacks_register)
self._should_send_reports = (
opal_client_config.SHOULD_REPORT_ON_DATA_UPDATES or False
)
# custom SSL context (for self-signed certificates)
self._custom_ssl_context = get_custom_ssl_context()
self._ssl_context_kwargs = (
{"ssl": self._custom_ssl_context}
if self._custom_ssl_context is not None
else {}
)
self._policy_update_queue = asyncio.Queue()
self._tasks = TasksPool()
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
if not self._stopping:
await self.stop()
async def _update_policy_callback(
self, data: dict = None, topic: str = "", **kwargs
):
"""
Pub/Sub callback - triggering policy updates
will run when we get notifications on the policy topic.
i.e: when the source repository changes (new commits)
"""
if data is None:
logger.warning(
"got policy update message without data, skipping policy update!"
)
return
try:
message = PolicyUpdateMessage(**data)
except pydantic.ValidationError as e:
logger.warning(f"Got invalid policy update message from server: {repr(e)}")
return
logger.info(
"Received policy update: topic={topic}, message={message}",
topic=topic,
message=message.dict(),
)
directories = list(
set(message.changed_directories).intersection(
set(self._subscription_directories)
)
)
await self.trigger_update_policy(directories)
async def trigger_update_policy(
self, directories: List[str] = None, force_full_update: bool = False
):
await self._policy_update_queue.put((directories, force_full_update))
async def _on_connect(self, client: PubSubClient, channel: RpcChannel):
"""Pub/Sub on_connect callback On connection to backend, whether its
the first connection, or reconnecting after downtime, refetch the state
opa needs.
As long as the connection is alive we know we are in sync with
the server, when the connection is lost we assume we need to
start from scratch.
"""
logger.info("Connected to server")
await self.trigger_update_policy()
if opal_common_config.STATISTICS_ENABLED:
await self._client.wait_until_ready()
# publish statistics to the server about new connection from client (only if STATISTICS_ENABLED is True, default to False)
await self._client.publish(
[opal_common_config.STATISTICS_ADD_CLIENT_CHANNEL],
data={
"topics": self._topics,
"client_id": self._opal_client_id,
"rpc_id": channel.id,
},
)
async def _on_disconnect(self, channel: RpcChannel):
"""Pub/Sub on_disconnect callback."""
logger.info("Disconnected from server")
async def start(self):
"""launches the policy updater."""
logger.info("Launching policy updater")
await self._callbacks_reporter.start()
if self._policy_update_task is None:
self._policy_update_task = asyncio.create_task(self.handle_policy_updates())
if self._subscriber_task is None:
self._subscriber_task = asyncio.create_task(self._subscriber())
await self._data_fetcher.start()
async def stop(self):
"""stops the policy updater."""
self._stopping = True
logger.info("Stopping policy updater")
# disconnect from Pub/Sub
if self._client is not None:
try:
await asyncio.wait_for(self._client.disconnect(), timeout=3)
except asyncio.TimeoutError:
logger.debug(
"Timeout waiting for PolicyUpdater pubsub client to disconnect"
)
# stop subscriber task
if self._subscriber_task is not None:
logger.debug("Cancelling PolicyUpdater subscriber task")
self._subscriber_task.cancel()
try:
await self._subscriber_task
except asyncio.CancelledError as exc:
logger.debug(
"PolicyUpdater subscriber task was force-cancelled: {exc}",
exc=repr(exc),
)
self._subscriber_task = None
logger.debug("PolicyUpdater subscriber task was cancelled")
await self._data_fetcher.stop()
# stop queue handling
if self._policy_update_task is not None:
self._policy_update_task.cancel()
try:
await self._policy_update_task
except asyncio.CancelledError:
pass
self._policy_update_task = None
# stop the callbacks reporter
await self._callbacks_reporter.stop()
async def wait_until_done(self):
if self._subscriber_task is not None:
await self._subscriber_task
async def _subscriber(self):
"""Coroutine meant to be spunoff with create_task to listen in the
background for policy update events and pass them to the
update_policy() callback (which will fetch the relevant policy bundle
from the server and update the policy store)."""
logger.info("Subscribing to topics: {topics}", topics=self._topics)
self._client = PubSubClient(
topics=self._topics,
callback=self._update_policy_callback,
on_connect=[self._on_connect],
on_disconnect=[self._on_disconnect],
extra_headers=self._extra_headers,
keep_alive=opal_client_config.KEEP_ALIVE_INTERVAL,
server_uri=self._server_url,
**self._ssl_context_kwargs,
)
async with self._client:
await self._client.wait_until_done()
async def update_policy(
self,
directories: List[str],
force_full_update: bool,
):
"""fetches policy (code, e.g: rego) from backend and stores it in the
policy store.
Args:
policy_store (BasePolicyStoreClient, optional): Policy store client to use to store policy code.
directories (List[str], optional): specific source directories we want.
force_full_update (bool, optional): if true, ignore stored hash and fetch full policy bundle.
"""
directories = (
directories
if directories is not None
else default_subscribed_policy_directories()
)
if force_full_update:
logger.info("full update was forced (ignoring stored hash if exists)")
base_hash = None
else:
base_hash = await self._policy_store.get_policy_version()
if base_hash is None:
logger.info("Refetching policy code (full bundle)")
else:
logger.info(
"Refetching policy code (delta bundle), base hash: '{base_hash}'",
base_hash=base_hash,
)
bundle_error = None
bundle = None
bundle_succeeded = True
try:
bundle: Optional[
PolicyBundle
] = await self._policy_fetcher.fetch_policy_bundle(
directories, base_hash=base_hash
)
if bundle:
if bundle.old_hash is None:
logger.info(
"Got policy bundle with {rego_files} rego files, {data_files} data files, commit hash: '{commit_hash}'",
rego_files=len(bundle.policy_modules),
data_files=len(bundle.data_modules),
commit_hash=bundle.hash,
manifest=bundle.manifest,
)
else:
deleted_files = (
None
if bundle.deleted_files is None
else bundle.deleted_files.dict()
)
logger.info(
"got policy bundle (delta): '{diff_against_hash}' -> '{commit_hash}', manifest: {manifest}, deleted: {deleted}",
commit_hash=bundle.hash,
diff_against_hash=bundle.old_hash,
manifest=bundle.manifest,
deleted=deleted_files,
)
except Exception as err:
bundle_error = repr(err)
bundle_succeeded = False
bundle_hash = None if bundle is None else bundle.hash
# store policy bundle in OPA cache
# We wrap our interaction with the policy store with a transaction, so that
# if the write-op fails, we will mark the transaction as failed.
async with self._policy_store.transaction_context(
bundle_hash, transaction_type=TransactionType.policy
) as store_transaction:
store_transaction._update_remote_status(
url=self._policy_fetcher.policy_endpoint_url,
status=bundle_succeeded,
error=bundle_error,
)
if bundle:
await store_transaction.set_policies(bundle)
# if we got here, we did not throw during the transaction
if self._should_send_reports:
# spin off reporting (no need to wait on it)
report = DataUpdateReport(policy_hash=bundle.hash, reports=[])
self._tasks.add_task(
self._callbacks_reporter.report_update_results(report)
)
async def handle_policy_updates(self):
while True:
try:
directories, force_full_update = await self._policy_update_queue.get()
await self.update_policy(directories, force_full_update)
except asyncio.CancelledError:
logger.debug("PolicyUpdater policy update task was cancelled")
break
except Exception:
logger.exception("Failed to update policy")
def topics(self) -> List[str]:
return self._topics
def callbacks_reporter(self) -> CallbacksReporter:
return self._callbacks_reporter
def init_policy_router(policy_updater: PolicyUpdater):
router = APIRouter()
@router.post("/policy-updater/trigger", status_code=status.HTTP_200_OK)
async def trigger_policy_update():
logger.info("triggered policy update from api")
await policy_updater.trigger_update_policy(force_full_update=True)
return {"status": "ok"}
return router | null |
156,221 | from pathlib import Path
from typing import List
from opal_client.config import opal_client_config
from opal_common.paths import PathUtils
opal_client_config = OpalClientConfig(prefix="OPAL_")
class PathUtils:
def intermediate_directories(paths: List[Path]) -> List[Path]:
"""returns the set of all parent directories for a list of paths.
i.e: calculate all partial paths that are directories.
"""
directories = set()
for path in paths:
directories.update(path.parents)
return sorted_list_from_set(directories)
def is_child_of_directories(path: Path, directories: Set[Path]) -> bool:
"""whether the input path is a child of one of the input
directories."""
return bool(directories & set(path.parents))
def filter_children_paths_of_directories(
paths: List[Path], directories: Set[Path]
) -> List[Path]:
"""returns only paths in :paths that are children of one of the paths
in :directories."""
return [
path
for path in paths
if PathUtils.is_child_of_directories(path, directories)
]
def non_intersecting_directories(paths: List[Path]) -> Set[Path]:
"""gets a list of paths (directories), and returns a set of directories
that are non-intersecting, meaning no directory in the set is a parent
of another directory in the set (i.e: parent directories "swallow"
their subdirectories)."""
output_paths = set()
for candidate in paths:
if set(candidate.parents) & output_paths:
# the next candidate is covered by a parent which is already in output -> SKIP
# or the next candidate is already in the list
continue
for out_path in list(output_paths):
# the next candidate can displace a child from the output
if candidate in list(out_path.parents):
output_paths.remove(out_path)
output_paths.add(candidate)
return output_paths
def sort_paths_according_to_explicit_sorting(
unsorted_paths: List[Path], explicit_sorting: List[Path]
) -> List[Path]:
"""the way this sorting works, is assuming that explicit_sorting does
NOT necessarily contains all the paths found in the original list.
We must ensure that all items in unsorted_paths must also exist
in the output list.
"""
unsorted = unsorted_paths.copy()
sorted_paths: List[Path] = []
for path in explicit_sorting:
try:
# we look for Path objects and not str for normalization of the path
found_path: Path = unsorted.pop(unsorted.index(path))
sorted_paths.append(found_path)
except ValueError:
continue # skip, not found in the original list
# add the remainder to the end of the sorted list
sorted_paths.extend(unsorted)
return sorted_paths
def glob_style_match_path_to_list(path: str, match_paths: List[str]):
"""
Check if given path matches any of the match_paths either via glob style matching or by being nested under - when the match path ends with "/**"
return the match path if there's a match, and None otherwise
"""
# check if any of our ignore paths match the given path
for match_path in match_paths:
# if the path is indicated as a parent via "/**" at the end
if match_path.endswith("/**"):
# check if the path is under the parent
if path.startswith(match_path[:-3]):
return match_path
# otherwise check for simple (non-recursive glob matching)
else:
path_object = Path(path)
if path_object.match(match_path):
return match_path
# if no match - this path shouldn't be ignored
return None
The provided code snippet includes necessary dependencies for implementing the `default_subscribed_policy_directories` function. Write a Python function `def default_subscribed_policy_directories() -> List[str]` to solve the following problem:
wraps the configured value of POLICY_SUBSCRIPTION_DIRS, but dedups intersecting dirs.
Here is the function:
def default_subscribed_policy_directories() -> List[str]:
"""wraps the configured value of POLICY_SUBSCRIPTION_DIRS, but dedups
intersecting dirs."""
subscription_directories = [
Path(d) for d in opal_client_config.POLICY_SUBSCRIPTION_DIRS
]
non_intersecting_directories = PathUtils.non_intersecting_directories(
subscription_directories
)
return [str(directory) for directory in non_intersecting_directories] | wraps the configured value of POLICY_SUBSCRIPTION_DIRS, but dedups intersecting dirs. |
156,222 | from typing import List, Optional
import aiohttp
from fastapi import HTTPException, status
from opal_client.config import opal_client_config
from opal_client.logger import logger
from opal_common.schemas.policy import PolicyBundle
from opal_common.security.sslcontext import get_custom_ssl_context
from opal_common.utils import (
get_authorization_header,
throw_if_bad_status_code,
tuple_to_dict,
)
from pydantic import ValidationError
from tenacity import retry, stop, wait
class PolicyBundle(BaseSchema):
manifest: List[str]
hash: str = Field(..., description="commit hash (debug version)")
old_hash: Optional[str] = Field(
None, description="old commit hash (in diff bundles)"
)
data_modules: List[DataModule]
policy_modules: List[RegoModule]
deleted_files: Optional[DeletedFiles]
def force_valid_bundle(bundle) -> PolicyBundle:
try:
return PolicyBundle(**bundle)
except ValidationError as e:
logger.warning(
"server returned invalid bundle: {err}", bundle=bundle, err=repr(e)
)
raise | null |
156,223 | from typing import List
from fastapi import APIRouter, Depends, HTTPException, Response, status
from opal_client.callbacks.register import CallbacksRegister
from opal_client.config import opal_client_config
from opal_common.authentication.authz import require_peer_type
from opal_common.authentication.deps import JWTAuthenticator
from opal_common.authentication.types import JWTClaims
from opal_common.authentication.verifier import Unauthorized
from opal_common.logger import logger
from opal_common.schemas.data import CallbackEntry
from opal_common.schemas.security import PeerType
from starlette.status import HTTP_500_INTERNAL_SERVER_ERROR
class CallbacksRegister:
"""A store for callbacks to other services, invoked on OPA state changes.
Every time OPAL client successfully finishes a transaction to update
OPA state, all the callbacks in this register will be called.
"""
def __init__(
self, initial_callbacks: Optional[List[Union[str, CallbackConfig]]] = None
) -> None:
self._callbacks: Dict[str, CallbackConfig] = {}
if initial_callbacks is not None:
self._load_initial_callbacks(initial_callbacks)
logger.info("Callbacks register loaded")
def _load_initial_callbacks(
self, initial_callbacks: List[Union[str, CallbackConfig]]
) -> None:
normalized_callbacks = self.normalize_callbacks(initial_callbacks)
for callback in normalized_callbacks:
url, config = callback
key = self.calc_hash(url, config)
self._register(key, url, config)
def normalize_callbacks(
self, callbacks: List[Union[str, CallbackConfig]]
) -> List[CallbackConfig]:
normalized_callbacks = []
for callback in callbacks:
if isinstance(callback, str):
url = callback
config = cast(
HttpFetcherConfig, opal_client_config.DEFAULT_UPDATE_CALLBACK_CONFIG
)
normalized_callbacks.append((url, config))
continue
elif isinstance(callback, tuple):
normalized_callbacks.append(callback)
continue
logger.warning(
f"Unsupported type for callback config: {type(callback).__name__}"
)
return normalized_callbacks
def _register(self, key: str, url: str, config: HttpFetcherConfig):
self._callbacks[key] = (url, config)
def calc_hash(self, url: str, config: HttpFetcherConfig) -> str:
"""gets a unique hash key from a callback url and config."""
m = hashlib.sha256()
m.update(url.encode())
m.update(config.json().encode())
return m.hexdigest()
def get(self, key: str) -> Optional[CallbackEntry]:
"""gets a registered callback by its key, or None if no such key found
in register."""
callback = self._callbacks.get(key, None)
if callback is None:
return None
(url, config) = callback
return CallbackEntry(key=key, url=url, config=config)
def put(
self,
url: str,
config: Optional[HttpFetcherConfig] = None,
key: Optional[str] = None,
) -> str:
"""puts a callback in the register.
if no config is provided, the default callback config will be
used. if no key is provided, the key will be calculated by
hashing the url and config.
"""
default_config = opal_client_config.DEFAULT_UPDATE_CALLBACK_CONFIG
if isinstance(default_config, dict):
default_config = HttpFetcherConfig(**default_config)
callback_config = config or default_config
auto_key = self.calc_hash(url, callback_config)
callback_key = key or auto_key
# if the same callback is already registered with another key - remove that callback.
# there is no point in calling the same callback twice.
self.remove(auto_key)
# register the callback under the intended key (auto-generated or provided)
self._register(callback_key, url, callback_config)
return callback_key
def remove(self, key: str):
"""removes a callback from the register, if exists."""
if key in self._callbacks:
del self._callbacks[key]
def all(self) -> Generator[CallbackEntry, None, None]:
"""a generator yielding all the callback configs currently registered.
Yields:
the next callback config found
"""
for key, (url, config) in iter(self._callbacks.items()):
yield CallbackEntry(key=key, url=url, config=config)
def require_peer_type(
authenticator: JWTAuthenticator, claims: JWTClaims, required_type: PeerType
):
if not authenticator.enabled:
return
peer_type = claims.get("peer_type", None)
if peer_type is None:
raise Unauthorized(description="Missing 'peer_type' claim for OPAL jwt token")
try:
type = PeerType(peer_type)
except ValueError:
raise Unauthorized(
description=f"Invalid 'peer_type' claim for OPAL jwt token: {peer_type}"
)
if type != required_type:
raise Unauthorized(
description=f"Incorrect 'peer_type' claim for OPAL jwt token: {str(type)}, expected: {str(required_type)}"
)
class JWTAuthenticator(_JWTAuthenticator):
"""bearer token authentication for http(s) api endpoints.
throws 401 if a valid jwt is not provided.
"""
def __call__(self, authorization: Optional[str] = Header(None)) -> JWTClaims:
token = get_token_from_header(authorization)
return verify_logged_in(self._verifier, token)
JWTClaims = Dict[str, Any]
class Unauthorized(HTTPException):
"""HTTP 401 Unauthorized exception."""
def __init__(self, description="Bearer token is not valid!", **kwargs):
super().__init__(
status_code=status.HTTP_401_UNAUTHORIZED,
detail={"error": description, **kwargs},
headers={"WWW-Authenticate": "Bearer"},
)
class CallbackEntry(BaseModel):
"""an entry in the callbacks register.
this schema is used by the callbacks api
"""
key: Optional[str] = Field(
None, description="unique id to identify this callback (optional)"
)
url: str = Field(..., description="http/https url to call back on update")
config: Optional[HttpFetcherConfig] = Field(
None,
description="optional http config for the target url (i.e: http method, headers, etc)",
)
class PeerType(str, Enum):
client = "client"
datasource = "datasource"
listener = "listener"
def init_callbacks_api(authenticator: JWTAuthenticator, register: CallbacksRegister):
async def require_listener_token(claims: JWTClaims = Depends(authenticator)):
try:
require_peer_type(
authenticator, claims, PeerType.listener
) # may throw Unauthorized
except Unauthorized as e:
logger.error(f"Unauthorized to publish update: {repr(e)}")
raise
# all the methods in this router requires a valid JWT token with peer_type == listener
router = APIRouter(
prefix="/callbacks", dependencies=[Depends(require_listener_token)]
)
@router.get("", response_model=List[CallbackEntry])
async def list_callbacks():
"""list all the callbacks currently registered by OPAL client."""
return list(register.all())
@router.get("/{key}", response_model=CallbackEntry)
async def get_callback_by_key(key: str):
"""get a callback by its key (if such callback is indeed
registered)."""
callback = register.get(key)
if callback is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="no callback found with this key",
)
return callback
@router.post("", response_model=CallbackEntry)
async def register_callback(entry: CallbackEntry):
"""register a new callback by OPAL client, to be called on OPA state
updates."""
saved_key = register.put(url=entry.url, config=entry.config, key=entry.key)
saved_entry = register.get(saved_key)
if saved_entry is None:
raise HTTPException(
status_code=HTTP_500_INTERNAL_SERVER_ERROR,
detail="could not register callback",
)
return saved_entry
@router.delete("/{key}", status_code=status.HTTP_204_NO_CONTENT)
async def get_callback_by_key(key: str):
"""unregisters a callback identified by its key (if such callback is
indeed registered)."""
callback = register.get(key)
if callback is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="no callback found with this key",
)
register.remove(key)
return Response(status_code=status.HTTP_204_NO_CONTENT)
return router | null |
156,224 | from typing import Optional
from fastapi import APIRouter, HTTPException, status
from opal_client.data.updater import DataUpdater
from opal_common.logger import logger
class DataUpdater:
def __init__(
self,
token: str = None,
pubsub_url: str = None,
data_sources_config_url: str = None,
fetch_on_connect: bool = True,
data_topics: List[str] = None,
policy_store: BasePolicyStoreClient = None,
should_send_reports=None,
data_fetcher: Optional[DataFetcher] = None,
callbacks_register: Optional[CallbacksRegister] = None,
opal_client_id: str = None,
shard_id: Optional[str] = None,
):
"""Keeps policy-stores (e.g. OPA) up to date with relevant data Obtains
data configuration on startup from OPAL-server Uses Pub/Sub to
subscribe to data update events, and fetches (using FetchingEngine)
data from sources.
Args:
token (str, optional): Auth token to include in connections to OPAL server. Defaults to CLIENT_TOKEN.
pubsub_url (str, optional): URL for Pub/Sub updates for data. Defaults to OPAL_SERVER_PUBSUB_URL.
data_sources_config_url (str, optional): URL to retrieve base data configuration. Defaults to DEFAULT_DATA_SOURCES_CONFIG_URL.
fetch_on_connect (bool, optional): Should the update fetch basic data immediately upon connection/reconnection. Defaults to True.
data_topics (List[str], optional): Topics of data to fetch and subscribe to. Defaults to DATA_TOPICS.
policy_store (BasePolicyStoreClient, optional): Policy store client to use to store data. Defaults to DEFAULT_POLICY_STORE.
"""
# Defaults
token: str = token or opal_client_config.CLIENT_TOKEN
pubsub_url: str = pubsub_url or opal_client_config.SERVER_PUBSUB_URL
self._scope_id = opal_client_config.SCOPE_ID
self._data_topics = (
data_topics if data_topics is not None else opal_client_config.DATA_TOPICS
)
if self._scope_id == "default":
data_sources_config_url: str = (
data_sources_config_url
or opal_client_config.DEFAULT_DATA_SOURCES_CONFIG_URL
)
else:
data_sources_config_url = (
f"{opal_client_config.SERVER_URL}/scopes/{self._scope_id}/data"
)
self._data_topics = [
f"{self._scope_id}:data:{topic}" for topic in self._data_topics
]
# Should the client use the default data source to fetch on connect
self._fetch_on_connect = fetch_on_connect
# The policy store we'll save data updates into
self._policy_store = policy_store or DEFAULT_POLICY_STORE_GETTER()
self._should_send_reports = (
should_send_reports
if should_send_reports is not None
else opal_client_config.SHOULD_REPORT_ON_DATA_UPDATES
)
# The pub/sub client for data updates
self._client = None
# The task running the Pub/Sub subscribing client
self._subscriber_task = None
# Data fetcher
self._data_fetcher = data_fetcher or DataFetcher()
self._callbacks_register = callbacks_register or CallbacksRegister()
self._callbacks_reporter = CallbacksReporter(
self._callbacks_register,
)
self._token = token
self._shard_id = shard_id
self._server_url = pubsub_url
self._data_sources_config_url = data_sources_config_url
self._opal_client_id = opal_client_id
self._extra_headers = []
if self._token is not None:
self._extra_headers.append(get_authorization_header(self._token))
if self._shard_id is not None:
self._extra_headers.append(("X-Shard-ID", self._shard_id))
if len(self._extra_headers) == 0:
self._extra_headers = None
self._stopping = False
# custom SSL context (for self-signed certificates)
self._custom_ssl_context = get_custom_ssl_context()
self._ssl_context_kwargs = (
{"ssl": self._custom_ssl_context}
if self._custom_ssl_context is not None
else {}
)
self._updates_storing_queue = TakeANumberQueue(logger)
self._tasks = TasksPool()
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
"""Context handler to terminate internal tasks."""
if not self._stopping:
await self.stop()
async def _update_policy_data_callback(self, data: dict = None, topic=""):
"""
Pub/Sub callback - triggering data updates
will run when we get notifications on the policy_data topic.
i.e: when new roles are added, changes to permissions, etc.
"""
if data is not None:
reason = data.get("reason", "")
else:
reason = "Periodic update"
logger.info("Updating policy data, reason: {reason}", reason=reason)
update = DataUpdate.parse_obj(data)
await self.trigger_data_update(update)
async def trigger_data_update(self, update: DataUpdate):
# make sure the id has a unique id for tracking
if update.id is None:
update.id = uuid.uuid4().hex
logger.info("Triggering data update with id: {id}", id=update.id)
# Fetching should be concurrent, but storing should be done in the original order
store_queue_number = await self._updates_storing_queue.take_a_number()
self._tasks.add_task(self._update_policy_data(update, store_queue_number))
async def get_policy_data_config(self, url: str = None) -> DataSourceConfig:
"""
Get the configuration for
Args:
url: the URL to query for the config, Defaults to self._data_sources_config_url
Returns:
DataSourceConfig: the data sources config
"""
if url is None:
url = self._data_sources_config_url
logger.info("Getting data-sources configuration from '{source}'", source=url)
try:
async with ClientSession(headers=self._extra_headers) as session:
response = await session.get(url, **self._ssl_context_kwargs)
if response.status == 200:
return DataSourceConfig.parse_obj(await response.json())
else:
error_details = await response.json()
raise ClientError(
f"Fetch data sources failed with status code {response.status}, error: {error_details}"
)
except:
logger.exception(f"Failed to load data sources config")
raise
async def get_base_policy_data(
self, config_url: str = None, data_fetch_reason="Initial load"
):
"""Load data into the policy store according to the data source's
config provided in the config URL.
Args:
config_url (str, optional): URL to retrieve data sources config from. Defaults to None ( self._data_sources_config_url).
data_fetch_reason (str, optional): Reason to log for the update operation. Defaults to "Initial load".
"""
logger.info(
"Performing data configuration, reason: {reason}", reason=data_fetch_reason
)
sources_config = await self.get_policy_data_config(url=config_url)
# translate config to a data update
entries = sources_config.entries
update = DataUpdate(reason=data_fetch_reason, entries=entries)
await self.trigger_data_update(update)
async def on_connect(self, client: PubSubClient, channel: RpcChannel):
"""Pub/Sub on_connect callback On connection to backend, whether its
the first connection, or reconnecting after downtime, refetch the state
opa needs.
As long as the connection is alive we know we are in sync with
the server, when the connection is lost we assume we need to
start from scratch.
"""
logger.info("Connected to server")
if self._fetch_on_connect:
await self.get_base_policy_data()
if opal_common_config.STATISTICS_ENABLED:
await self._client.wait_until_ready()
# publish statistics to the server about new connection from client (only if STATISTICS_ENABLED is True, default to False)
await self._client.publish(
[opal_common_config.STATISTICS_ADD_CLIENT_CHANNEL],
data={
"topics": self._data_topics,
"client_id": self._opal_client_id,
"rpc_id": channel.id,
},
)
async def on_disconnect(self, channel: RpcChannel):
logger.info("Disconnected from server")
async def start(self):
logger.info("Launching data updater")
await self._callbacks_reporter.start()
await self._updates_storing_queue.start_queue_handling(
self._store_fetched_update
)
if self._subscriber_task is None:
self._subscriber_task = asyncio.create_task(self._subscriber())
await self._data_fetcher.start()
async def _subscriber(self):
"""Coroutine meant to be spunoff with create_task to listen in the
background for data events and pass them to the data_fetcher."""
logger.info("Subscribing to topics: {topics}", topics=self._data_topics)
self._client = PubSubClient(
self._data_topics,
self._update_policy_data_callback,
methods_class=TenantAwareRpcEventClientMethods,
on_connect=[self.on_connect],
extra_headers=self._extra_headers,
keep_alive=opal_client_config.KEEP_ALIVE_INTERVAL,
server_uri=self._server_url,
**self._ssl_context_kwargs,
)
async with self._client:
await self._client.wait_until_done()
async def stop(self):
self._stopping = True
logger.info("Stopping data updater")
# disconnect from Pub/Sub
if self._client is not None:
try:
await asyncio.wait_for(self._client.disconnect(), timeout=3)
except asyncio.TimeoutError:
logger.debug(
"Timeout waiting for DataUpdater pubsub client to disconnect"
)
# stop subscriber task
if self._subscriber_task is not None:
logger.debug("Cancelling DataUpdater subscriber task")
self._subscriber_task.cancel()
try:
await self._subscriber_task
except asyncio.CancelledError as exc:
logger.debug(
"DataUpdater subscriber task was force-cancelled: {exc}",
exc=repr(exc),
)
self._subscriber_task = None
logger.debug("DataUpdater subscriber task was cancelled")
# stop the data fetcher
logger.debug("Stopping data fetcher")
await self._data_fetcher.stop()
# stop queue handling
await self._updates_storing_queue.stop_queue_handling()
# stop the callbacks reporter
await self._callbacks_reporter.stop()
async def wait_until_done(self):
if self._subscriber_task is not None:
await self._subscriber_task
def calc_hash(data):
"""Calculate an hash (sah256) on the given data, if data isn't a
string, it will be converted to JSON.
String are encoded as 'utf-8' prior to hash calculation.
Returns:
the hash of the given data (as a a hexdigit string) or '' on failure to process.
"""
try:
if not isinstance(data, str):
data = json.dumps(data, default=pydantic_encoder)
return hashlib.sha256(data.encode("utf-8")).hexdigest()
except:
logger.exception("Failed to calculate hash for data {data}", data=data)
return ""
async def _update_policy_data(
self,
update: DataUpdate,
store_queue_number: TakeANumberQueue.Number,
):
"""fetches policy data (policy configuration) from backend and updates
it into policy-store (i.e. OPA)"""
if update is None:
return
# types / defaults
urls: List[Tuple[str, FetcherConfig, Optional[JsonableValue]]] = None
entries: List[DataSourceEntry] = []
# if we have an actual specification for the update
if update is not None:
# Check each entry's topics to only process entries designated to us
entries = [
entry
for entry in update.entries
if entry.topics
and not set(entry.topics).isdisjoint(set(self._data_topics))
]
urls = []
for entry in entries:
config = entry.config
if self._shard_id is not None:
headers = config.get("headers", {})
headers.update({"X-Shard-ID": self._shard_id})
config["headers"] = headers
urls.append((entry.url, config, entry.data))
if len(entries) > 0:
logger.info("Fetching policy data", urls=repr(urls))
else:
logger.warning(
"None of the update's entries are designated to subscribed topics"
)
# Urls may be None - handle_urls has a default for None
policy_data_with_urls = await self._data_fetcher.handle_urls(urls)
store_queue_number.put((update, entries, policy_data_with_urls))
async def _store_fetched_update(self, update_item):
(update, entries, policy_data_with_urls) = update_item
# track the result of each url in order to report back
reports: List[DataEntryReport] = []
# Save the data from the update
# We wrap our interaction with the policy store with a transaction
async with self._policy_store.transaction_context(
update.id, transaction_type=TransactionType.data
) as store_transaction:
# for intellisense treat store_transaction as a PolicyStoreClient (which it proxies)
store_transaction: BasePolicyStoreClient
error_content = None
for (url, fetch_config, result), entry in itertools.zip_longest(
policy_data_with_urls, entries
):
fetched_data_successfully = True
if isinstance(result, Exception):
fetched_data_successfully = False
logger.error(
"Failed to fetch url {url}, got exception: {exc}",
url=url,
exc=result,
)
if isinstance(
result, aiohttp.ClientResponse
) and is_http_error_response(
result
): # error responses
fetched_data_successfully = False
try:
error_content = await result.json()
logger.error(
"Failed to fetch url {url}, got response code {status} with error: {error}",
url=url,
status=result.status,
error=error_content,
)
except json.JSONDecodeError:
error_content = await result.text()
logger.error(
"Failed to decode response from url:{url}, got response code {status} with response: {error}",
url=url,
status=result.status,
error=error_content,
)
store_transaction._update_remote_status(
url=url,
status=fetched_data_successfully,
error=str(error_content),
)
if fetched_data_successfully:
# get path to store the URL data (default mode (None) is as "" - i.e. as all the data at root)
policy_store_path = "" if entry is None else entry.dst_path
# None is not valid - use "" (protect from missconfig)
if policy_store_path is None:
policy_store_path = ""
# fix opa_path (if not empty must start with "/" to be nested under data)
if policy_store_path != "" and not policy_store_path.startswith(
"/"
):
policy_store_path = f"/{policy_store_path}"
policy_data = result
# Create a report on the data-fetching
report = DataEntryReport(
entry=entry, hash=self.calc_hash(policy_data), fetched=True
)
try:
if (
opal_client_config.SPLIT_ROOT_DATA
and policy_store_path in ("/", "")
and isinstance(policy_data, dict)
):
await self._set_split_policy_data(
store_transaction,
url=url,
save_method=entry.save_method,
data=policy_data,
)
else:
await self._set_policy_data(
store_transaction,
url=url,
path=policy_store_path,
save_method=entry.save_method,
data=policy_data,
)
# No exception we we're able to save to the policy-store
report.saved = True
# save the report for the entry
reports.append(report)
except Exception:
logger.exception("Failed to save data update to policy-store")
# we failed to save to policy-store
report.saved = False
# save the report for the entry
reports.append(report)
# re-raise so the context manager will be aware of the failure
raise
else:
report = DataEntryReport(entry=entry, fetched=False, saved=False)
# save the report for the entry
reports.append(report)
# should we send a report to defined callbackers?
if self._should_send_reports:
# spin off reporting (no need to wait on it)
whole_report = DataUpdateReport(update_id=update.id, reports=reports)
extra_callbacks = self._callbacks_register.normalize_callbacks(
update.callback.callbacks
)
self._tasks.add_task(
self._callbacks_reporter.report_update_results(
whole_report, extra_callbacks
)
)
async def _set_split_policy_data(
self, tx, url: str, save_method: str, data: Dict[str, Any]
):
"""Split data writes to root ("/") path, so they won't overwrite other
sources."""
logger.info("Splitting root data to {n} keys", n=len(data))
for prefix, obj in data.items():
await self._set_policy_data(
tx, url=url, path=f"/{prefix}", save_method=save_method, data=obj
)
async def _set_policy_data(
self, tx, url: str, path: str, save_method: str, data: JsonableValue
):
logger.info(
"Saving fetched data to policy-store: source url='{url}', destination path='{path}'",
url=url,
path=path or "/",
)
if save_method == "PUT":
await tx.set_policy_data(data, path=path)
else:
await tx.patch_policy_data(data, path=path)
def callbacks_reporter(self) -> CallbacksReporter:
return self._callbacks_reporter
def init_data_router(data_updater: Optional[DataUpdater]):
router = APIRouter()
@router.post("/data-updater/trigger", status_code=status.HTTP_200_OK)
async def trigger_policy_data_update():
logger.info("triggered policy data update from api")
if data_updater:
await data_updater.get_base_policy_data(
data_fetch_reason="request from sdk"
)
return {"status": "ok"}
else:
raise HTTPException(
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail="Data Updater is currently disabled. Dynamic data updates are not available.",
)
return router | null |
156,225 | import asyncio
import os
import signal
import time
from typing import Callable, Coroutine, List, Optional
import psutil
from opal_client.config import EngineLogFormat
from opal_client.engine.logger import log_engine_output_opa, log_engine_output_simple
from opal_client.engine.options import CedarServerOptions, OpaServerOptions
from opal_client.logger import logger
from tenacity import retry, wait_random_exponential
AsyncCallback = Callable[[], Coroutine]
The provided code snippet includes necessary dependencies for implementing the `wait_until_process_is_up` function. Write a Python function `async def wait_until_process_is_up( process_pid: int, callback: Optional[AsyncCallback], wait_interval: float = 0.1, timeout: Optional[float] = None, )` to solve the following problem:
waits until the pid of the process exists, then optionally runs a callback. optionally receives a timeout to give up.
Here is the function:
async def wait_until_process_is_up(
process_pid: int,
callback: Optional[AsyncCallback],
wait_interval: float = 0.1,
timeout: Optional[float] = None,
):
"""waits until the pid of the process exists, then optionally runs a
callback.
optionally receives a timeout to give up.
"""
start_time = time.time()
while not psutil.pid_exists(process_pid):
if timeout is not None and start_time - time.time() > timeout:
break
await asyncio.sleep(wait_interval)
if callback is not None:
await callback() | waits until the pid of the process exists, then optionally runs a callback. optionally receives a timeout to give up. |
156,226 | import asyncio
import json
import logging
from enum import Enum
from typing import Optional
from opal_client.config import EngineLogFormat
from opal_client.logger import logger
def logging_level_from_string(level: str) -> int:
"""logger.log() requires an int logging level."""
level = level.lower()
if level == "info":
return logging.INFO
elif level == "critical":
return logging.CRITICAL
elif level == "fatal":
return logging.FATAL
elif level == "error":
return logging.ERROR
elif level == "warning" or level == "warn":
return logging.WARNING
elif level == "debug":
return logging.DEBUG
# default
return logging.INFO
def log_event_name(level: str, msg: Optional[str]) -> bool:
if msg is not None:
logger.log(level, "{msg: <20}", msg=msg)
return True
return False
def log_formatted_http_details(level: str, msg: Optional[str], log_line: dict) -> bool:
method: Optional[str] = log_line.pop("req_method", None)
path: Optional[str] = log_line.pop("req_path", None)
status: Optional[int] = log_line.pop("resp_status", None)
if msg is None or method is None or path is None:
return False
if status is None:
format = "{msg: <20} <fg #999>{method} {path}</>"
logger.opt(colors=True).log(level, format, msg=msg, method=method, path=path)
else:
format = "{msg: <20} <fg #999>{method} {path} -> {status}</>"
logger.opt(colors=True).log(
level, format, msg=msg, method=method, path=path, status=status
)
return True
def log_entire_dict(level: str, msg: Optional[str], log_line: dict):
if msg is None:
format = "<fg #999>{log_line}</>"
else:
format = "{msg: <20} <fg #bfbfbf>{log_line}</>"
try:
log_line = json.dumps(log_line) # should be ok, originated in json
except:
pass # fallback to dict
logger.opt(colors=True).log(level, format, msg=msg, log_line=log_line)
return True
class EngineLogFormat(str, Enum):
NONE = "none" # no opa logs are piped
MINIMAL = "minimal" # only the event name is logged
HTTP = "http" # tries to extract http method, path and response status code
FULL = "full" # logs the entire data dict returned
async def log_engine_output_opa(
line: bytes, logs_format: EngineLogFormat = EngineLogFormat.NONE
):
if logs_format == EngineLogFormat.NONE:
return
try:
log_line = json.loads(line)
level = logging.getLevelName(
logging_level_from_string(log_line.pop("level", "info"))
)
msg = log_line.pop("msg", None)
logged = False
if logs_format == EngineLogFormat.MINIMAL:
logged = log_event_name(level, msg)
elif logs_format == EngineLogFormat.HTTP:
logged = log_formatted_http_details(level, msg, log_line)
# always fall back to log the entire line
if not logged or logs_format == EngineLogFormat.FULL:
log_entire_dict(level, msg, log_line)
except json.JSONDecodeError:
logger.info(line) | null |
156,227 | import asyncio
import json
import logging
from enum import Enum
from typing import Optional
from opal_client.config import EngineLogFormat
from opal_client.logger import logger
async def log_engine_output_simple(line: bytes):
try:
line = line.decode().strip()
except UnicodeDecodeError:
...
logger.info(line) | null |
156,228 | from fastapi import APIRouter, Depends
from opal_client.config import opal_client_config
from opal_client.policy_store.schemas import PolicyStoreAuth, PolicyStoreDetails
from opal_common.authentication.authz import require_peer_type
from opal_common.authentication.deps import JWTAuthenticator
from opal_common.authentication.types import JWTClaims
from opal_common.authentication.verifier import Unauthorized
from opal_common.logger import logger
from opal_common.schemas.security import PeerType
opal_client_config = OpalClientConfig(prefix="OPAL_")
class PolicyStoreAuth(Enum):
NONE = "none"
TOKEN = "token"
OAUTH = "oauth"
TLS = "tls"
class PolicyStoreDetails(BaseModel):
"""
represents a policy store endpoint - contains the policy store's:
- location (url)
- type
- credentials
"""
type: PolicyStoreTypes = Field(
PolicyStoreTypes.OPA,
description="the type of policy store, currently only OPA is officially supported",
)
url: str = Field(
...,
description="the url that OPA can be found in. if localhost is the host - "
"it means OPA is on the same hostname as OPAL client.",
)
token: Optional[str] = Field(
None, description="optional access token required by the policy store"
)
auth_type: PolicyStoreAuth = Field(
PolicyStoreAuth.NONE,
description="the type of authentication is supported for the policy store.",
)
oauth_client_id: Optional[str] = Field(
None, description="optional OAuth client id required by the policy store"
)
oauth_client_secret: Optional[str] = Field(
None, description="optional OAuth client secret required by the policy store"
)
oauth_server: Optional[str] = Field(
None, description="optional OAuth server required by the policy store"
)
def force_enum(cls, v):
if isinstance(v, str):
return PolicyStoreTypes(v)
if isinstance(v, PolicyStoreTypes):
return v
raise ValueError(f"invalid value: {v}")
class Config:
use_enum_values = True
allow_population_by_field_name = True
def require_peer_type(
authenticator: JWTAuthenticator, claims: JWTClaims, required_type: PeerType
):
if not authenticator.enabled:
return
peer_type = claims.get("peer_type", None)
if peer_type is None:
raise Unauthorized(description="Missing 'peer_type' claim for OPAL jwt token")
try:
type = PeerType(peer_type)
except ValueError:
raise Unauthorized(
description=f"Invalid 'peer_type' claim for OPAL jwt token: {peer_type}"
)
if type != required_type:
raise Unauthorized(
description=f"Incorrect 'peer_type' claim for OPAL jwt token: {str(type)}, expected: {str(required_type)}"
)
class JWTAuthenticator(_JWTAuthenticator):
"""bearer token authentication for http(s) api endpoints.
throws 401 if a valid jwt is not provided.
"""
def __call__(self, authorization: Optional[str] = Header(None)) -> JWTClaims:
token = get_token_from_header(authorization)
return verify_logged_in(self._verifier, token)
JWTClaims = Dict[str, Any]
class Unauthorized(HTTPException):
"""HTTP 401 Unauthorized exception."""
def __init__(self, description="Bearer token is not valid!", **kwargs):
super().__init__(
status_code=status.HTTP_401_UNAUTHORIZED,
detail={"error": description, **kwargs},
headers={"WWW-Authenticate": "Bearer"},
)
class PeerType(str, Enum):
client = "client"
datasource = "datasource"
listener = "listener"
def init_policy_store_router(authenticator: JWTAuthenticator):
router = APIRouter()
@router.get(
"/policy-store/config",
response_model=PolicyStoreDetails,
response_model_exclude_none=True,
)
async def get_policy_store_details(claims: JWTClaims = Depends(authenticator)):
try:
require_peer_type(
authenticator, claims, PeerType.listener
) # may throw Unauthorized
except Unauthorized as e:
logger.error(f"Unauthorized to publish update: {repr(e)}")
raise
return PolicyStoreDetails(
url=opal_client_config.POLICY_STORE_URL,
token=opal_client_config.POLICY_STORE_AUTH_TOKEN or None,
auth_type=opal_client_config.POLICY_STORE_AUTH_TYPE or PolicyStoreAuth.NONE,
oauth_client_id=opal_client_config.POLICY_STORE_AUTH_OAUTH_CLIENT_ID
or None,
oauth_client_secret=opal_client_config.POLICY_STORE_AUTH_OAUTH_CLIENT_SECRET
or None,
oauth_server=opal_client_config.POLICY_STORE_AUTH_OAUTH_SERVER or None,
)
return router | null |
156,229 | import asyncio
import functools
import json
import ssl
import time
from typing import Any, Awaitable, Callable, Dict, List, Optional, Set
from urllib.parse import urlencode
import aiohttp
import dpath
import jsonpatch
from aiofiles.threadpool.text import AsyncTextIOWrapper
from fastapi import Response, status
from opal_client.config import opal_client_config
from opal_client.logger import logger
from opal_client.policy_store.base_policy_store_client import (
BasePolicyStoreClient,
JsonableValue,
)
from opal_client.policy_store.schemas import PolicyStoreAuth
from opal_client.utils import exclude_none_fields, proxy_response
from opal_common.engine.parsing import get_rego_package
from opal_common.git.bundle_utils import BundleUtils
from opal_common.paths import PathUtils
from opal_common.schemas.policy import DataModule, PolicyBundle, RegoModule
from opal_common.schemas.store import JSONPatchAction, StoreTransaction, TransactionType
from pydantic import BaseModel
from tenacity import RetryError, retry
class PathUtils:
def intermediate_directories(paths: List[Path]) -> List[Path]:
"""returns the set of all parent directories for a list of paths.
i.e: calculate all partial paths that are directories.
"""
directories = set()
for path in paths:
directories.update(path.parents)
return sorted_list_from_set(directories)
def is_child_of_directories(path: Path, directories: Set[Path]) -> bool:
"""whether the input path is a child of one of the input
directories."""
return bool(directories & set(path.parents))
def filter_children_paths_of_directories(
paths: List[Path], directories: Set[Path]
) -> List[Path]:
"""returns only paths in :paths that are children of one of the paths
in :directories."""
return [
path
for path in paths
if PathUtils.is_child_of_directories(path, directories)
]
def non_intersecting_directories(paths: List[Path]) -> Set[Path]:
"""gets a list of paths (directories), and returns a set of directories
that are non-intersecting, meaning no directory in the set is a parent
of another directory in the set (i.e: parent directories "swallow"
their subdirectories)."""
output_paths = set()
for candidate in paths:
if set(candidate.parents) & output_paths:
# the next candidate is covered by a parent which is already in output -> SKIP
# or the next candidate is already in the list
continue
for out_path in list(output_paths):
# the next candidate can displace a child from the output
if candidate in list(out_path.parents):
output_paths.remove(out_path)
output_paths.add(candidate)
return output_paths
def sort_paths_according_to_explicit_sorting(
unsorted_paths: List[Path], explicit_sorting: List[Path]
) -> List[Path]:
"""the way this sorting works, is assuming that explicit_sorting does
NOT necessarily contains all the paths found in the original list.
We must ensure that all items in unsorted_paths must also exist
in the output list.
"""
unsorted = unsorted_paths.copy()
sorted_paths: List[Path] = []
for path in explicit_sorting:
try:
# we look for Path objects and not str for normalization of the path
found_path: Path = unsorted.pop(unsorted.index(path))
sorted_paths.append(found_path)
except ValueError:
continue # skip, not found in the original list
# add the remainder to the end of the sorted list
sorted_paths.extend(unsorted)
return sorted_paths
def glob_style_match_path_to_list(path: str, match_paths: List[str]):
"""
Check if given path matches any of the match_paths either via glob style matching or by being nested under - when the match path ends with "/**"
return the match path if there's a match, and None otherwise
"""
# check if any of our ignore paths match the given path
for match_path in match_paths:
# if the path is indicated as a parent via "/**" at the end
if match_path.endswith("/**"):
# check if the path is under the parent
if path.startswith(match_path[:-3]):
return match_path
# otherwise check for simple (non-recursive glob matching)
else:
path_object = Path(path)
if path_object.match(match_path):
return match_path
# if no match - this path shouldn't be ignored
return None
The provided code snippet includes necessary dependencies for implementing the `should_ignore_path` function. Write a Python function `def should_ignore_path(path, ignore_paths)` to solve the following problem:
Helper function to check if the policy-store should ignore to given path.
Here is the function:
def should_ignore_path(path, ignore_paths):
"""Helper function to check if the policy-store should ignore to given
path."""
return PathUtils.glob_style_match_path_to_list(path, ignore_paths) is not None | Helper function to check if the policy-store should ignore to given path. |
156,230 | import asyncio
import functools
import json
import ssl
import time
from typing import Any, Awaitable, Callable, Dict, List, Optional, Set
from urllib.parse import urlencode
import aiohttp
import dpath
import jsonpatch
from aiofiles.threadpool.text import AsyncTextIOWrapper
from fastapi import Response, status
from opal_client.config import opal_client_config
from opal_client.logger import logger
from opal_client.policy_store.base_policy_store_client import (
BasePolicyStoreClient,
JsonableValue,
)
from opal_client.policy_store.schemas import PolicyStoreAuth
from opal_client.utils import exclude_none_fields, proxy_response
from opal_common.engine.parsing import get_rego_package
from opal_common.git.bundle_utils import BundleUtils
from opal_common.paths import PathUtils
from opal_common.schemas.policy import DataModule, PolicyBundle, RegoModule
from opal_common.schemas.store import JSONPatchAction, StoreTransaction, TransactionType
from pydantic import BaseModel
from tenacity import RetryError, retry
def fail_silently(fallback=None):
def decorator(func):
@functools.wraps(func)
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except aiohttp.ClientError as e:
return fallback
return wrapper
return decorator | null |
156,231 | import asyncio
import functools
import json
import ssl
import time
from typing import Any, Awaitable, Callable, Dict, List, Optional, Set
from urllib.parse import urlencode
import aiohttp
import dpath
import jsonpatch
from aiofiles.threadpool.text import AsyncTextIOWrapper
from fastapi import Response, status
from opal_client.config import opal_client_config
from opal_client.logger import logger
from opal_client.policy_store.base_policy_store_client import (
BasePolicyStoreClient,
JsonableValue,
)
from opal_client.policy_store.schemas import PolicyStoreAuth
from opal_client.utils import exclude_none_fields, proxy_response
from opal_common.engine.parsing import get_rego_package
from opal_common.git.bundle_utils import BundleUtils
from opal_common.paths import PathUtils
from opal_common.schemas.policy import DataModule, PolicyBundle, RegoModule
from opal_common.schemas.store import JSONPatchAction, StoreTransaction, TransactionType
from pydantic import BaseModel
from tenacity import RetryError, retry
The provided code snippet includes necessary dependencies for implementing the `affects_transaction` function. Write a Python function `def affects_transaction(func)` to solve the following problem:
mark a method as write (affecting state of transaction) for transaction log.
Here is the function:
def affects_transaction(func):
"""mark a method as write (affecting state of transaction) for transaction
log."""
setattr(func, "affects_transaction", True)
return func | mark a method as write (affecting state of transaction) for transaction log. |
156,232 | import asyncio
import functools
import json
import ssl
import time
from typing import Any, Awaitable, Callable, Dict, List, Optional, Set
from urllib.parse import urlencode
import aiohttp
import dpath
import jsonpatch
from aiofiles.threadpool.text import AsyncTextIOWrapper
from fastapi import Response, status
from opal_client.config import opal_client_config
from opal_client.logger import logger
from opal_client.policy_store.base_policy_store_client import (
BasePolicyStoreClient,
JsonableValue,
)
from opal_client.policy_store.schemas import PolicyStoreAuth
from opal_client.utils import exclude_none_fields, proxy_response
from opal_common.engine.parsing import get_rego_package
from opal_common.git.bundle_utils import BundleUtils
from opal_common.paths import PathUtils
from opal_common.schemas.policy import DataModule, PolicyBundle, RegoModule
from opal_common.schemas.store import JSONPatchAction, StoreTransaction, TransactionType
from pydantic import BaseModel
from tenacity import RetryError, retry
async def proxy_response(response: aiohttp.ClientResponse) -> Response:
content = await response.text()
return Response(
content=content,
status_code=response.status,
headers=dict(response.headers),
media_type="application/json",
)
The provided code snippet includes necessary dependencies for implementing the `proxy_response_unless_invalid` function. Write a Python function `async def proxy_response_unless_invalid( raw_response: aiohttp.ClientResponse, accepted_status_codes: List[int] ) -> Response` to solve the following problem:
throws value error if the http response received has an unexpected status code.
Here is the function:
async def proxy_response_unless_invalid(
raw_response: aiohttp.ClientResponse, accepted_status_codes: List[int]
) -> Response:
"""throws value error if the http response received has an unexpected
status code."""
response = await proxy_response(raw_response)
if response.status_code not in accepted_status_codes:
try:
error = await raw_response.json()
except json.JSONDecodeError:
error = ""
raise ValueError(
"OPA Client: unexpected status code: {}, error: {}".format(
response.status_code, error
)
)
return response | throws value error if the http response received has an unexpected status code. |
156,233 | import os
from opal_common.logger import logger
The provided code snippet includes necessary dependencies for implementing the `post_fork` function. Write a Python function `def post_fork(server, worker)` to solve the following problem:
this hook takes effect if we are using gunicorn to run OPAL.
Here is the function:
def post_fork(server, worker):
"""this hook takes effect if we are using gunicorn to run OPAL."""
pass | this hook takes effect if we are using gunicorn to run OPAL. |
156,234 | import os
from opal_common.logger import logger
def when_ready(server):
try:
import opal_server.scopes.task
except ImportError:
# Not opal server
return
opal_server.scopes.task.ScopesPolicyWatcherTask.preload_scopes()
logger.warning("Finished pre loading scopes...") | null |
156,235 | from termcolor import colored
The provided code snippet includes necessary dependencies for implementing the `error` function. Write a Python function `def error(message: str, show_emoji: bool = True) -> None` to solve the following problem:
Prints an error message. Args: message (str): The error message show_emoji (bool): Whether to show the emoji Returns: None
Here is the function:
def error(message: str, show_emoji: bool = True) -> None:
"""
Prints an error message.
Args:
message (str): The error message
show_emoji (bool): Whether to show the emoji
Returns:
None
"""
emoji = "❌" if show_emoji else ""
print(colored(f"{emoji} {message}", "red")) | Prints an error message. Args: message (str): The error message show_emoji (bool): Whether to show the emoji Returns: None |
156,236 | from termcolor import colored
The provided code snippet includes necessary dependencies for implementing the `success` function. Write a Python function `def success(message: str, show_emoji: bool = True) -> None` to solve the following problem:
Prints a success message. Args: message (str): The success message show_emoji (bool): Whether to show the emoji Returns: None
Here is the function:
def success(message: str, show_emoji: bool = True) -> None:
"""
Prints a success message.
Args:
message (str): The success message
show_emoji (bool): Whether to show the emoji
Returns:
None
"""
emoji = "✅" if show_emoji else ""
print(colored(f"{emoji} {message}", "green")) | Prints a success message. Args: message (str): The success message show_emoji (bool): Whether to show the emoji Returns: None |
156,237 | from termcolor import colored
The provided code snippet includes necessary dependencies for implementing the `info` function. Write a Python function `def info(message: str, show_emoji: bool = True) -> None` to solve the following problem:
Prints an info message. Args: message (str): The info message show_emoji (bool): Whether to show the emoji Returns: None
Here is the function:
def info(message: str, show_emoji: bool = True) -> None:
"""
Prints an info message.
Args:
message (str): The info message
show_emoji (bool): Whether to show the emoji
Returns:
None
"""
emoji = "ℹ️" if show_emoji else ""
print(colored(f"{emoji} {message}", "magenta")) | Prints an info message. Args: message (str): The info message show_emoji (bool): Whether to show the emoji Returns: None |
156,238 | from termcolor import colored
The provided code snippet includes necessary dependencies for implementing the `warning` function. Write a Python function `def warning(message: str, show_emoji: bool = True) -> None` to solve the following problem:
Prints a warning message. Args: message (str): The warning message show_emoji (bool): Whether to show the emoji Returns: None
Here is the function:
def warning(message: str, show_emoji: bool = True) -> None:
"""
Prints a warning message.
Args:
message (str): The warning message
show_emoji (bool): Whether to show the emoji
Returns:
None
"""
emoji = "⚠️" if show_emoji else ""
print(colored(f"{emoji} {message}", "yellow")) | Prints a warning message. Args: message (str): The warning message show_emoji (bool): Whether to show the emoji Returns: None |
156,239 | from termcolor import colored
The provided code snippet includes necessary dependencies for implementing the `question` function. Write a Python function `def question(message: str, show_emoji: bool = True) -> str` to solve the following problem:
Prints a question message and returns the user's input. Args: message (str): The question message show_emoji (bool): Whether to show the emoji Returns: user_input (str): The user's input
Here is the function:
def question(message: str, show_emoji: bool = True) -> str:
"""
Prints a question message and returns the user's input.
Args:
message (str): The question message
show_emoji (bool): Whether to show the emoji
Returns:
user_input (str): The user's input
"""
emoji = "❓" if show_emoji else ""
return input(colored(f"{emoji} {message}", "magenta")) | Prints a question message and returns the user's input. Args: message (str): The question message show_emoji (bool): Whether to show the emoji Returns: user_input (str): The user's input |
156,240 | import os
import random
import zipfile
import requests
import platform
from status import *
from config import *
The provided code snippet includes necessary dependencies for implementing the `close_running_selenium_instances` function. Write a Python function `def close_running_selenium_instances() -> None` to solve the following problem:
Closes any running Selenium instances. Returns: None
Here is the function:
def close_running_selenium_instances() -> None:
"""
Closes any running Selenium instances.
Returns:
None
"""
try:
info(" => Closing running Selenium instances...")
# Kill all running Firefox instances
if platform.system() == "Windows":
os.system("taskkill /f /im firefox.exe")
else:
os.system("pkill firefox")
success(" => Closed running Selenium instances.")
except Exception as e:
error(f"Error occurred while closing running Selenium instances: {str(e)}") | Closes any running Selenium instances. Returns: None |
156,241 | import os
import random
import zipfile
import requests
import platform
from status import *
from config import *
The provided code snippet includes necessary dependencies for implementing the `build_url` function. Write a Python function `def build_url(youtube_video_id: str) -> str` to solve the following problem:
Builds the URL to the YouTube video. Args: youtube_video_id (str): The YouTube video ID. Returns: url (str): The URL to the YouTube video.
Here is the function:
def build_url(youtube_video_id: str) -> str:
"""
Builds the URL to the YouTube video.
Args:
youtube_video_id (str): The YouTube video ID.
Returns:
url (str): The URL to the YouTube video.
"""
return f"https://www.youtube.com/watch?v={youtube_video_id}" | Builds the URL to the YouTube video. Args: youtube_video_id (str): The YouTube video ID. Returns: url (str): The URL to the YouTube video. |
156,242 | import os
import random
import zipfile
import requests
import platform
from status import *
from config import *
The provided code snippet includes necessary dependencies for implementing the `rem_temp_files` function. Write a Python function `def rem_temp_files() -> None` to solve the following problem:
Removes temporary files in the `.mp` directory. Returns: None
Here is the function:
def rem_temp_files() -> None:
"""
Removes temporary files in the `.mp` directory.
Returns:
None
"""
# Path to the `.mp` directory
mp_dir = os.path.join(ROOT_DIR, ".mp")
files = os.listdir(mp_dir)
for file in files:
if not file.endswith(".json"):
os.remove(os.path.join(mp_dir, file)) | Removes temporary files in the `.mp` directory. Returns: None |
156,243 | import os
import random
import zipfile
import requests
import platform
from status import *
from config import *
The provided code snippet includes necessary dependencies for implementing the `fetch_songs` function. Write a Python function `def fetch_songs() -> None` to solve the following problem:
Downloads songs into songs/ directory to use with geneated videos. Returns: None
Here is the function:
def fetch_songs() -> None:
"""
Downloads songs into songs/ directory to use with geneated videos.
Returns:
None
"""
try:
info(f" => Fetching songs...")
files_dir = os.path.join(ROOT_DIR, "Songs")
if not os.path.exists(files_dir):
os.mkdir(files_dir)
if get_verbose():
info(f" => Created directory: {files_dir}")
else:
# Skip if songs are already downloaded
return
# Download songs
response = requests.get(get_zip_url() or "https://filebin.net/bb9ewdtckolsf3sg/drive-download-20240209T180019Z-001.zip")
# Save the zip file
with open(os.path.join(files_dir, "songs.zip"), "wb") as file:
file.write(response.content)
# Unzip the file
with zipfile.ZipFile(os.path.join(files_dir, "songs.zip"), "r") as file:
file.extractall(files_dir)
# Remove the zip file
os.remove(os.path.join(files_dir, "songs.zip"))
success(" => Downloaded Songs to ../Songs.")
except Exception as e:
error(f"Error occurred while fetching songs: {str(e)}") | Downloads songs into songs/ directory to use with geneated videos. Returns: None |
156,244 | import os
import random
import zipfile
import requests
import platform
from status import *
from config import *
The provided code snippet includes necessary dependencies for implementing the `choose_random_song` function. Write a Python function `def choose_random_song() -> str` to solve the following problem:
Chooses a random song from the songs/ directory. Returns: str: The path to the chosen song.
Here is the function:
def choose_random_song() -> str:
"""
Chooses a random song from the songs/ directory.
Returns:
str: The path to the chosen song.
"""
try:
songs = os.listdir(os.path.join(ROOT_DIR, "Songs"))
song = random.choice(songs)
success(f" => Chose song: {song}")
return os.path.join(ROOT_DIR, "Songs", song)
except Exception as e:
error(f"Error occurred while choosing random song: {str(e)}") | Chooses a random song from the songs/ directory. Returns: str: The path to the chosen song. |
156,245 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
def get_verbose() -> bool:
"""
Gets the verbose flag from the config file.
Returns:
verbose (bool): The verbose flag
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["verbose"]
The provided code snippet includes necessary dependencies for implementing the `assert_folder_structure` function. Write a Python function `def assert_folder_structure() -> None` to solve the following problem:
Make sure that the nessecary folder structure is present. Returns: None
Here is the function:
def assert_folder_structure() -> None:
"""
Make sure that the nessecary folder structure is present.
Returns:
None
"""
# Create the .mp folder
if not os.path.exists(os.path.join(ROOT_DIR, ".mp")):
if get_verbose():
print(colored(f"=> Creating .mp folder at {os.path.join(ROOT_DIR, '.mp')}", "green"))
os.makedirs(os.path.join(ROOT_DIR, ".mp")) | Make sure that the nessecary folder structure is present. Returns: None |
156,246 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_first_time_running` function. Write a Python function `def get_first_time_running() -> bool` to solve the following problem:
Checks if the program is running for the first time by checking if .mp folder exists. Returns: exists (bool): True if the program is running for the first time, False otherwise
Here is the function:
def get_first_time_running() -> bool:
"""
Checks if the program is running for the first time by checking if .mp folder exists.
Returns:
exists (bool): True if the program is running for the first time, False otherwise
"""
return not os.path.exists(os.path.join(ROOT_DIR, ".mp")) | Checks if the program is running for the first time by checking if .mp folder exists. Returns: exists (bool): True if the program is running for the first time, False otherwise |
156,247 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_email_credentials` function. Write a Python function `def get_email_credentials() -> dict` to solve the following problem:
Gets the email credentials from the config file. Returns: credentials (dict): The email credentials
Here is the function:
def get_email_credentials() -> dict:
"""
Gets the email credentials from the config file.
Returns:
credentials (dict): The email credentials
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["email"] | Gets the email credentials from the config file. Returns: credentials (dict): The email credentials |
156,248 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_firefox_profile_path` function. Write a Python function `def get_firefox_profile_path() -> str` to solve the following problem:
Gets the path to the Firefox profile. Returns: path (str): The path to the Firefox profile
Here is the function:
def get_firefox_profile_path() -> str:
"""
Gets the path to the Firefox profile.
Returns:
path (str): The path to the Firefox profile
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["firefox_profile"] | Gets the path to the Firefox profile. Returns: path (str): The path to the Firefox profile |
156,249 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_headless` function. Write a Python function `def get_headless() -> bool` to solve the following problem:
Gets the headless flag from the config file. Returns: headless (bool): The headless flag
Here is the function:
def get_headless() -> bool:
"""
Gets the headless flag from the config file.
Returns:
headless (bool): The headless flag
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["headless"] | Gets the headless flag from the config file. Returns: headless (bool): The headless flag |
156,250 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_model` function. Write a Python function `def get_model() -> str` to solve the following problem:
Gets the model from the config file. Returns: model (str): The model
Here is the function:
def get_model() -> str:
"""
Gets the model from the config file.
Returns:
model (str): The model
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["llm"] | Gets the model from the config file. Returns: model (str): The model |
156,251 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_twitter_language` function. Write a Python function `def get_twitter_language() -> str` to solve the following problem:
Gets the Twitter language from the config file. Returns: language (str): The Twitter language
Here is the function:
def get_twitter_language() -> str:
"""
Gets the Twitter language from the config file.
Returns:
language (str): The Twitter language
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["twitter_language"] | Gets the Twitter language from the config file. Returns: language (str): The Twitter language |
156,252 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_image_model` function. Write a Python function `def get_image_model() -> str` to solve the following problem:
Gets the Image MOdel from the config file. Returns: model (str): The image model
Here is the function:
def get_image_model() -> str:
"""
Gets the Image MOdel from the config file.
Returns:
model (str): The image model
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["image_model"] | Gets the Image MOdel from the config file. Returns: model (str): The image model |
156,253 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_threads` function. Write a Python function `def get_threads() -> int` to solve the following problem:
Gets the amount of threads to use for example when writing to a file with MoviePy. Returns: threads (int): Amount of threads
Here is the function:
def get_threads() -> int:
"""
Gets the amount of threads to use for example when writing to a file with MoviePy.
Returns:
threads (int): Amount of threads
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["threads"] | Gets the amount of threads to use for example when writing to a file with MoviePy. Returns: threads (int): Amount of threads |
156,254 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_image_prompt_llm` function. Write a Python function `def get_image_prompt_llm() -> str` to solve the following problem:
Gets the image prompt for LLM from the config file. Returns: prompt (str): The image prompt
Here is the function:
def get_image_prompt_llm() -> str:
"""
Gets the image prompt for LLM from the config file.
Returns:
prompt (str): The image prompt
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["image_prompt_llm"] | Gets the image prompt for LLM from the config file. Returns: prompt (str): The image prompt |
156,255 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_zip_url` function. Write a Python function `def get_zip_url() -> str` to solve the following problem:
Gets the URL to the zip file containing the songs. Returns: url (str): The URL to the zip file
Here is the function:
def get_zip_url() -> str:
"""
Gets the URL to the zip file containing the songs.
Returns:
url (str): The URL to the zip file
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["zip_url"] | Gets the URL to the zip file containing the songs. Returns: url (str): The URL to the zip file |
156,256 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_is_for_kids` function. Write a Python function `def get_is_for_kids() -> bool` to solve the following problem:
Gets the is for kids flag from the config file. Returns: is_for_kids (bool): The is for kids flag
Here is the function:
def get_is_for_kids() -> bool:
"""
Gets the is for kids flag from the config file.
Returns:
is_for_kids (bool): The is for kids flag
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["is_for_kids"] | Gets the is for kids flag from the config file. Returns: is_for_kids (bool): The is for kids flag |
156,257 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_google_maps_scraper_zip_url` function. Write a Python function `def get_google_maps_scraper_zip_url() -> str` to solve the following problem:
Gets the URL to the zip file containing the Google Maps scraper. Returns: url (str): The URL to the zip file
Here is the function:
def get_google_maps_scraper_zip_url() -> str:
"""
Gets the URL to the zip file containing the Google Maps scraper.
Returns:
url (str): The URL to the zip file
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["google_maps_scraper"] | Gets the URL to the zip file containing the Google Maps scraper. Returns: url (str): The URL to the zip file |
156,258 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_google_maps_scraper_niche` function. Write a Python function `def get_google_maps_scraper_niche() -> str` to solve the following problem:
Gets the niche for the Google Maps scraper. Returns: niche (str): The niche
Here is the function:
def get_google_maps_scraper_niche() -> str:
"""
Gets the niche for the Google Maps scraper.
Returns:
niche (str): The niche
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["google_maps_scraper_niche"] | Gets the niche for the Google Maps scraper. Returns: niche (str): The niche |
156,259 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_scraper_timeout` function. Write a Python function `def get_scraper_timeout() -> int` to solve the following problem:
Gets the timeout for the scraper. Returns: timeout (int): The timeout
Here is the function:
def get_scraper_timeout() -> int:
"""
Gets the timeout for the scraper.
Returns:
timeout (int): The timeout
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["scraper_timeout"] or 300 | Gets the timeout for the scraper. Returns: timeout (int): The timeout |
156,260 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_outreach_message_subject` function. Write a Python function `def get_outreach_message_subject() -> str` to solve the following problem:
Gets the outreach message subject. Returns: subject (str): The outreach message subject
Here is the function:
def get_outreach_message_subject() -> str:
"""
Gets the outreach message subject.
Returns:
subject (str): The outreach message subject
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["outreach_message_subject"] | Gets the outreach message subject. Returns: subject (str): The outreach message subject |
156,261 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_outreach_message_body_file` function. Write a Python function `def get_outreach_message_body_file() -> str` to solve the following problem:
Gets the outreach message body file. Returns: file (str): The outreach message body file
Here is the function:
def get_outreach_message_body_file() -> str:
"""
Gets the outreach message body file.
Returns:
file (str): The outreach message body file
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["outreach_message_body_file"] | Gets the outreach message body file. Returns: file (str): The outreach message body file |
156,262 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_assemblyai_api_key` function. Write a Python function `def get_assemblyai_api_key() -> str` to solve the following problem:
Gets the AssemblyAI API key. Returns: key (str): The AssemblyAI API key
Here is the function:
def get_assemblyai_api_key() -> str:
"""
Gets the AssemblyAI API key.
Returns:
key (str): The AssemblyAI API key
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["assembly_ai_api_key"] | Gets the AssemblyAI API key. Returns: key (str): The AssemblyAI API key |
156,263 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
The provided code snippet includes necessary dependencies for implementing the `equalize_subtitles` function. Write a Python function `def equalize_subtitles(srt_path: str, max_chars: int = 10) -> None` to solve the following problem:
Equalizes the subtitles in a SRT file. Args: srt_path (str): The path to the SRT file max_chars (int): The maximum amount of characters in a subtitle Returns: None
Here is the function:
def equalize_subtitles(srt_path: str, max_chars: int = 10) -> None:
"""
Equalizes the subtitles in a SRT file.
Args:
srt_path (str): The path to the SRT file
max_chars (int): The maximum amount of characters in a subtitle
Returns:
None
"""
srt_equalizer.equalize_srt_file(srt_path, srt_path, max_chars) | Equalizes the subtitles in a SRT file. Args: srt_path (str): The path to the SRT file max_chars (int): The maximum amount of characters in a subtitle Returns: None |
156,264 | import os
import sys
import json
import srt_equalizer
from termcolor import colored
ROOT_DIR = os.path.dirname(sys.path[0])
The provided code snippet includes necessary dependencies for implementing the `get_font` function. Write a Python function `def get_font() -> str` to solve the following problem:
Gets the font from the config file. Returns: font (str): The font
Here is the function:
def get_font() -> str:
"""
Gets the font from the config file.
Returns:
font (str): The font
"""
with open(os.path.join(ROOT_DIR, "config.json"), "r") as file:
return json.load(file)["font"] | Gets the font from the config file. Returns: font (str): The font |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.