code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def set_autoscaling_override(request):
"""Set a temporary autoscaling override for a service/instance.
This endpoint creates or updates a ConfigMap entry with override information
including expiration time. The override will be applied by the autoscaler.
Required parameters:
- service: The service name
- instance: The instance name
- min_instances: The minimum number of instances to enforce
- expires_after: unix timestamp after which the override is no longer valid
"""
service = request.swagger_data.get("service")
instance = request.swagger_data.get("instance")
cluster = settings.cluster
soa_dir = settings.soa_dir
instance_config = get_instance_config(
service, instance, cluster, soa_dir, load_deployments=False
)
if not isinstance(instance_config, KubernetesDeploymentConfig):
error_message = (
f"Autoscaling is not supported for {service}.{instance} because instance type is not "
f"kubernetes."
)
raise ApiFailure(error_message, 501)
json_body = request.swagger_data.get("json_body", {})
min_instances_override = json_body.get("min_instances")
expire_after = json_body.get("expire_after")
if not isinstance(min_instances_override, int) or min_instances_override < 1:
raise ApiFailure("min_instances must be a positive integer", 400)
if not expire_after:
raise ApiFailure("expire_after is required", 400)
max_instances = instance_config.get_max_instances()
if max_instances is None:
raise ApiFailure(f"Autoscaling is not enabled for {service}.{instance}", 400)
if max_instances < min_instances_override:
raise ApiFailure(
f"min_instances ({min_instances_override}) cannot be greater than max_instances ({max_instances})",
400,
)
configmap, created = get_or_create_autoscaling_overrides_configmap()
if created:
log.info("Created new autoscaling overrides ConfigMap")
# i dunno why this is necessary, but a newly created configmap doesn't have a data field
# even when we set it in the create call
if not configmap.data:
configmap.data = {}
override_data = {
"min_instances": min_instances_override,
"created_at": datetime.now(timezone.utc).isoformat(),
# NOTE: we may want to also allow setting a max_instances override in the future, but if we do that
# we'd probably want to force folks to either set one or both and share the same expiration time
"expire_after": expire_after,
}
service_instance = f"{service}.{instance}"
existing_overrides = (
json.loads(configmap.data[service_instance])
if service_instance in configmap.data
else {}
)
merged_overrides = {**existing_overrides, **override_data}
serialized_overrides = json.dumps(merged_overrides)
patch_namespaced_configmap(
name=AUTOSCALING_OVERRIDES_CONFIGMAP_NAME,
namespace=AUTOSCALING_OVERRIDES_CONFIGMAP_NAMESPACE,
# this should only update the single entry for the $service.$instance key
# ain't k8s grand?
body={"data": {service_instance: serialized_overrides}},
kube_client=settings.kubernetes_client,
)
response_body = {
"service": service,
"instance": instance,
"cluster": cluster,
"min_instances": min_instances_override,
"expire_after": expire_after,
"status": "SUCCESS",
}
# NOTE: this is an HTTP 202 since actually updating the HPA happens asynchronously
# through setup_kubernetes_job
# XXX: should we try to patch things here as well?
return Response(json_body=response_body, status_code=202)
|
Set a temporary autoscaling override for a service/instance.
This endpoint creates or updates a ConfigMap entry with override information
including expiration time. The override will be applied by the autoscaler.
Required parameters:
- service: The service name
- instance: The instance name
- min_instances: The minimum number of instances to enforce
- expires_after: unix timestamp after which the override is no longer valid
|
set_autoscaling_override
|
python
|
Yelp/paasta
|
paasta_tools/api/views/autoscaler.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/api/views/autoscaler.py
|
Apache-2.0
|
def api_failure_response(exc, request):
"""Construct an HTTP response with an error status code. This happens when
the API service has to stop on a 'hard' error. In contrast, the API service
continues to produce results on a 'soft' error. It will place a 'message'
field in the output. Multiple 'soft' errors are concatenated in the same
'message' field when errors happen in the same hierarchy.
"""
log.error(exc.msg)
response = Response("ERROR: %s" % exc.msg)
response.status_int = exc.err
return response
|
Construct an HTTP response with an error status code. This happens when
the API service has to stop on a 'hard' error. In contrast, the API service
continues to produce results on a 'soft' error. It will place a 'message'
field in the output. Multiple 'soft' errors are concatenated in the same
'message' field when errors happen in the same hierarchy.
|
api_failure_response
|
python
|
Yelp/paasta
|
paasta_tools/api/views/exception.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/api/views/exception.py
|
Apache-2.0
|
def window_historical_load(historical_load, window_begin, window_end):
"""Filter historical_load down to just the datapoints lying between times window_begin and window_end, inclusive."""
filtered = []
for timestamp, value in historical_load:
if timestamp >= window_begin and timestamp <= window_end:
filtered.append((timestamp, value))
return filtered
|
Filter historical_load down to just the datapoints lying between times window_begin and window_end, inclusive.
|
window_historical_load
|
python
|
Yelp/paasta
|
paasta_tools/autoscaling/forecasting.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/autoscaling/forecasting.py
|
Apache-2.0
|
def moving_average_forecast_policy(
historical_load,
moving_average_window_seconds=DEFAULT_UWSGI_AUTOSCALING_MOVING_AVERAGE_WINDOW,
**kwargs,
):
"""Does a simple average of all historical load data points within the moving average window. Weights all data
points within the window equally."""
windowed_data = trailing_window_historical_load(
historical_load, moving_average_window_seconds
)
windowed_values = [value for timestamp, value in windowed_data]
return sum(windowed_values) / len(windowed_values)
|
Does a simple average of all historical load data points within the moving average window. Weights all data
points within the window equally.
|
moving_average_forecast_policy
|
python
|
Yelp/paasta
|
paasta_tools/autoscaling/forecasting.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/autoscaling/forecasting.py
|
Apache-2.0
|
def linreg_forecast_policy(
historical_load,
linreg_window_seconds,
linreg_extrapolation_seconds,
linreg_default_slope=0,
**kwargs,
):
"""Does a linear regression on the load data within the last linreg_window_seconds. For every time delta in
linreg_extrapolation_seconds, forecasts the value at that time delta from now, and returns the maximum of these
predicted values. (With linear extrapolation, it doesn't make sense to forecast at more than two points, as the max
load will always be at the first or last time delta.)
:param linreg_window_seconds: Consider all data from this many seconds ago until now.
:param linreg_extrapolation_seconds: A list of floats representing a number of seconds in the future at which to
predict the load. The highest prediction will be returned.
:param linreg_default_slope: If there is only one data point within the window, the equation for slope is undefined,
so we use this value (expressed in load/second) for prediction instead. Default is
0.
"""
window = trailing_window_historical_load(historical_load, linreg_window_seconds)
loads = [load for timestamp, load in window]
times = [timestamp for timestamp, load in window]
mean_time = sum(times) / len(times)
mean_load = sum(loads) / len(loads)
if len(window) > 1:
slope = sum((t - mean_time) * (l - mean_load) for t, l in window) / sum(
(t - mean_time) ** 2 for t in times
)
else:
slope = linreg_default_slope
intercept = mean_load - slope * mean_time
def predict(timestamp):
return slope * timestamp + intercept
if isinstance(linreg_extrapolation_seconds, (int, float)):
linreg_extrapolation_seconds = [linreg_extrapolation_seconds]
now, _ = historical_load[-1]
forecasted_values = [predict(now + delta) for delta in linreg_extrapolation_seconds]
return max(forecasted_values)
|
Does a linear regression on the load data within the last linreg_window_seconds. For every time delta in
linreg_extrapolation_seconds, forecasts the value at that time delta from now, and returns the maximum of these
predicted values. (With linear extrapolation, it doesn't make sense to forecast at more than two points, as the max
load will always be at the first or last time delta.)
:param linreg_window_seconds: Consider all data from this many seconds ago until now.
:param linreg_extrapolation_seconds: A list of floats representing a number of seconds in the future at which to
predict the load. The highest prediction will be returned.
:param linreg_default_slope: If there is only one data point within the window, the equation for slope is undefined,
so we use this value (expressed in load/second) for prediction instead. Default is
0.
|
linreg_forecast_policy
|
python
|
Yelp/paasta
|
paasta_tools/autoscaling/forecasting.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/autoscaling/forecasting.py
|
Apache-2.0
|
def get_sso_auth_token(paasta_apis: bool = False) -> str:
"""Generate an authentication token for the calling user from the Single Sign On provider
:param bool paasta_apis: authenticate for PaaSTA APIs
"""
system_config = load_system_paasta_config()
client_id = (
system_config.get_api_auth_sso_oidc_client_id()
if paasta_apis
else system_config.get_service_auth_sso_oidc_client_id()
)
return get_and_cache_jwt_default(client_id)
|
Generate an authentication token for the calling user from the Single Sign On provider
:param bool paasta_apis: authenticate for PaaSTA APIs
|
get_sso_auth_token
|
python
|
Yelp/paasta
|
paasta_tools/cli/authentication.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/authentication.py
|
Apache-2.0
|
def load_method(module_name, method_name):
"""Return a function given a module and method name.
:param module_name: a string
:param method_name: a string
:return: a function
"""
module = __import__(module_name, fromlist=[method_name])
method = getattr(module, method_name)
return method
|
Return a function given a module and method name.
:param module_name: a string
:param method_name: a string
:return: a function
|
load_method
|
python
|
Yelp/paasta
|
paasta_tools/cli/cli.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cli.py
|
Apache-2.0
|
def add_subparser(command, subparsers):
"""Given a command name, paasta_cmd, execute the add_subparser method
implemented in paasta_cmd.py.
Each paasta client command must implement a method called add_subparser.
This allows the client to dynamically add subparsers to its subparser, which
provides the benefits of argcomplete/argparse but gets it done in a modular
fashion.
:param command: a simple string - e.g. 'list'
:param subparsers: an ArgumentParser object"""
module_name = "paasta_tools.cli.cmds.%s" % command
add_subparser_fn = load_method(module_name, "add_subparser")
add_subparser_fn(subparsers)
|
Given a command name, paasta_cmd, execute the add_subparser method
implemented in paasta_cmd.py.
Each paasta client command must implement a method called add_subparser.
This allows the client to dynamically add subparsers to its subparser, which
provides the benefits of argcomplete/argparse but gets it done in a modular
fashion.
:param command: a simple string - e.g. 'list'
:param subparsers: an ArgumentParser object
|
add_subparser
|
python
|
Yelp/paasta
|
paasta_tools/cli/cli.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cli.py
|
Apache-2.0
|
def get_argparser(commands=None):
"""Create and return argument parser for a set of subcommands.
:param commands: Union[None, List[str]] If `commands` argument is `None`,
add full parsers for all subcommands, if `commands` is empty list -
add thin parsers for all subcommands, otherwise - add full parsers for
subcommands in the argument.
"""
parser = PrintsHelpOnErrorArgumentParser(
description=(
"The PaaSTA command line tool. The 'paasta' command is the entry point "
"to multiple subcommands, see below.\n\n"
"You can see more help for individual commands by appending them with '--help', "
"for example, 'paasta status --help' or see the man page with 'man paasta status'."
),
epilog=(
"The 'paasta' command line tool is designed to be used by humans, and therefore has "
"command line completion for almost all options and uses pretty formatting when "
"possible."
),
# Suppressing usage prevents it from being printed twice upon print_help
usage=argparse.SUPPRESS,
)
# http://stackoverflow.com/a/8521644/812183
parser.add_argument(
"-V",
"--version",
action="version",
version=f"paasta-tools {paasta_tools.__version__}",
)
subparsers = parser.add_subparsers(dest="command", metavar="")
subparsers.required = True
# Adding a separate help subparser allows us to respond to "help" without --help
help_parser = subparsers.add_parser(
"help", help=f"run `paasta <subcommand> -h` for help"
)
help_parser.set_defaults(command=None)
# Build a list of subcommands to add them in alphabetical order later
command_choices: List[Tuple[str, Any]] = []
if commands is None:
for command in sorted(modules_in_pkg(cmds)):
command_choices.append(
(command, (add_subparser, [command, subparsers], {}))
)
elif commands:
for command in commands:
if command not in PAASTA_SUBCOMMANDS:
# could be external subcommand
continue
command_choices.append(
(
command,
(add_subparser, [PAASTA_SUBCOMMANDS[command], subparsers], {}),
)
)
else:
for command in PAASTA_SUBCOMMANDS.keys():
command_choices.append(
(
command,
(subparsers.add_parser, [command], dict(help="", add_help=False)),
)
)
for command in list_external_commands():
command_choices.append(
(command, (subparsers.add_parser, [command], dict(help="")))
)
for (_, (fn, args, kwds)) in sorted(command_choices, key=lambda e: e[0]):
fn(*args, **kwds)
return parser
|
Create and return argument parser for a set of subcommands.
:param commands: Union[None, List[str]] If `commands` argument is `None`,
add full parsers for all subcommands, if `commands` is empty list -
add thin parsers for all subcommands, otherwise - add full parsers for
subcommands in the argument.
|
get_argparser
|
python
|
Yelp/paasta
|
paasta_tools/cli/cli.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cli.py
|
Apache-2.0
|
def parse_args(argv):
"""Initialize autocompletion and configure the argument parser.
:return: an argparse.Namespace object mapping parameter names to the inputs
from sys.argv
"""
parser = get_argparser(commands=[])
argcomplete.autocomplete(parser)
args, _ = parser.parse_known_args(argv)
if args.command:
parser = get_argparser(commands=[args.command])
argcomplete.autocomplete(parser)
return parser.parse_args(argv), parser
|
Initialize autocompletion and configure the argument parser.
:return: an argparse.Namespace object mapping parameter names to the inputs
from sys.argv
|
parse_args
|
python
|
Yelp/paasta
|
paasta_tools/cli/cli.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cli.py
|
Apache-2.0
|
def main(argv=None):
"""Perform a paasta call. Read args from sys.argv and pass parsed args onto
appropriate command in paasta_cli/cmds directory.
Ensure we kill any child pids before we quit
"""
logging.basicConfig()
warnings.filterwarnings("ignore", category=DeprecationWarning)
# if we are an external command, we need to exec out early.
# The reason we exec out early is so we don't bother trying to parse
# "foreign" arguments, which would cause a stack trace.
if calling_external_command():
exec_subcommand(sys.argv)
try:
args, parser = parse_args(argv)
if args.command is None:
parser.print_help()
return_code = 0
else:
return_code = args.command(args)
except KeyboardInterrupt:
return_code = 130
sys.exit(return_code)
|
Perform a paasta call. Read args from sys.argv and pass parsed args onto
appropriate command in paasta_cli/cmds directory.
Ensure we kill any child pids before we quit
|
main
|
python
|
Yelp/paasta
|
paasta_tools/cli/cli.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cli.py
|
Apache-2.0
|
def make_copyfile_symlink_aware():
"""The reasoning behind this monkeypatch is that cookiecutter doesn't
respect symlinks at all, and at Yelp we use symlinks to reduce duplication
in the soa configs. Maybe cookie-cutter will accept a symlink-aware PR?
"""
orig_copyfile = shutil.copyfile
orig_copymode = shutil.copymode
def symlink_aware_copyfile(*args, **kwargs):
kwargs.setdefault("follow_symlinks", False)
orig_copyfile(*args, **kwargs)
def symlink_aware_copymode(*args, **kwargs):
kwargs.setdefault("follow_symlinks", False)
orig_copymode(*args, **kwargs)
shutil.copyfile = symlink_aware_copyfile
shutil.copymode = symlink_aware_copymode
try:
yield
finally:
shutil.copyfile = orig_copyfile
shutil.copymode = orig_copymode
|
The reasoning behind this monkeypatch is that cookiecutter doesn't
respect symlinks at all, and at Yelp we use symlinks to reduce duplication
in the soa configs. Maybe cookie-cutter will accept a symlink-aware PR?
|
make_copyfile_symlink_aware
|
python
|
Yelp/paasta
|
paasta_tools/cli/fsm_cmd.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/fsm_cmd.py
|
Apache-2.0
|
def is_file_in_dir(file_name, path):
"""Recursively search path for file_name.
:param file_name: a string of a file name to find
:param path: a string path
:param file_ext: a string of a file extension
:return: a boolean
"""
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if fnmatch.fnmatch(filename, file_name):
return os.path.join(root, filename)
return False
|
Recursively search path for file_name.
:param file_name: a string of a file name to find
:param path: a string path
:param file_ext: a string of a file extension
:return: a boolean
|
is_file_in_dir
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def validate_service_name(service, soa_dir=DEFAULT_SOA_DIR):
"""Determine whether directory named service exists in the provided soa_dir
:param service: a string of the name of the service you wish to check exists
:param soa_dir: directory to look for service names
:return : boolean True
:raises: NoSuchService exception
"""
if not service or not os.path.isdir(os.path.join(soa_dir, service)):
raise NoSuchService(service)
return True
|
Determine whether directory named service exists in the provided soa_dir
:param service: a string of the name of the service you wish to check exists
:param soa_dir: directory to look for service names
:return : boolean True
:raises: NoSuchService exception
|
validate_service_name
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def list_paasta_services(soa_dir: str = DEFAULT_SOA_DIR):
"""Returns a sorted list of services that happen to have at
least one service.instance, which indicates it is on PaaSTA
"""
the_list = []
for service in list_services(soa_dir):
if list_all_instances_for_service(service, soa_dir=soa_dir):
the_list.append(service)
return the_list
|
Returns a sorted list of services that happen to have at
least one service.instance, which indicates it is on PaaSTA
|
list_paasta_services
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def list_service_instances(soa_dir: str = DEFAULT_SOA_DIR):
"""Returns a sorted list of service<SPACER>instance names"""
the_list = []
for service in list_services(soa_dir):
for instance in list_all_instances_for_service(
service=service, soa_dir=soa_dir
):
the_list.append(compose_job_id(service, instance))
return the_list
|
Returns a sorted list of service<SPACER>instance names
|
list_service_instances
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def list_instances(**kwargs):
"""Returns a sorted list of all possible instance names
for tab completion. We try to guess what service you might be
operating on, otherwise we just provide *all* of them
"""
all_instances: Set[str] = set()
service = guess_service_name()
try:
validate_service_name(service)
all_instances = set(list_all_instances_for_service(service))
except NoSuchService:
for service in list_services():
for instance in list_all_instances_for_service(service):
all_instances.add(instance)
return sorted(all_instances)
|
Returns a sorted list of all possible instance names
for tab completion. We try to guess what service you might be
operating on, otherwise we just provide *all* of them
|
list_instances
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def calculate_remote_masters(
cluster: str, system_paasta_config: SystemPaastaConfig
) -> Tuple[List[str], str]:
"""Given a cluster, do a DNS lookup of that cluster (which
happens to point, eventually, to the Mesos masters in that cluster).
Return IPs of those Mesos masters.
"""
cluster_fqdn = system_paasta_config.get_cluster_fqdn_format().format(
cluster=cluster
)
try:
_, _, ips = socket.gethostbyname_ex(cluster_fqdn)
output = None
except socket.gaierror as e:
output = f"ERROR while doing DNS lookup of {cluster_fqdn}:\n{e.strerror}\n "
ips = []
return (ips, output)
|
Given a cluster, do a DNS lookup of that cluster (which
happens to point, eventually, to the Mesos masters in that cluster).
Return IPs of those Mesos masters.
|
calculate_remote_masters
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def check_ssh_on_master(master, timeout=10):
"""Given a master, attempt to ssh to the master and run a simple command
with sudo to verify that ssh and sudo work properly. Return a tuple of the
success status (True or False) and any output from attempting the check.
"""
check_command = "ssh -A -n -o StrictHostKeyChecking=no %s /bin/true" % master
rc, output = _run(check_command, timeout=timeout)
if rc == 0:
return (True, None)
if rc == 255: # ssh error
reason = "Return code was %d which probably means an ssh failure." % rc
hint = "HINT: Are you allowed to ssh to this machine %s?" % master
if rc == 1: # sudo error
reason = "Return code was %d which probably means a sudo failure." % rc
hint = "HINT: Is your ssh agent forwarded? (ssh-add -l)"
if rc == -9: # timeout error
reason = (
"Return code was %d which probably means ssh took too long and timed out."
% rc
)
hint = "HINT: Is there network latency? Try running somewhere closer to the cluster."
else: # unknown error
reason = "Return code was %d which is an unknown failure." % rc
hint = "HINT: Talk to #paasta and pastebin this output"
output = (
"ERROR cannot run check command %(check_command)s\n"
"%(reason)s\n"
"%(hint)s\n"
"Output from check command: %(output)s"
% {
"check_command": check_command,
"reason": reason,
"hint": hint,
"output": output,
}
)
return (False, output)
|
Given a master, attempt to ssh to the master and run a simple command
with sudo to verify that ssh and sudo work properly. Return a tuple of the
success status (True or False) and any output from attempting the check.
|
check_ssh_on_master
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def run_on_master(
cluster,
system_paasta_config,
cmd_parts,
timeout=None,
err_code=-1,
graceful_exit=False,
stdin=None,
):
"""Find connectable master for :cluster: and :system_paasta_config: args and
invoke command from :cmd_parts:, wrapping it in ssh call.
:returns (exit code, output)
:param cluster: cluster to find master in
:param system_paasta_config: system configuration to lookup master data
:param cmd_parts: passed into paasta_tools.utils._run as command along with
ssh bits
:param timeout: see paasta_tools.utils._run documentation (default: None)
:param err_code: code to return along with error message when something goes
wrong (default: -1)
:param graceful_exit: wrap command in a bash script that waits for input and
kills the original command; trap SIGINT and send newline into stdin
"""
try:
master = connectable_master(cluster, system_paasta_config)
except NoMasterError as e:
return (err_code, str(e))
if graceful_exit:
# Signals don't travel over ssh, kill process when anything lands on stdin instead
# The procedure here is:
# 1. send process to background and capture it's pid
# 2. wait for stdin with timeout in a loop, exit when original process finished
# 3. kill original process if loop finished (something on stdin)
cmd_parts.append(
"& p=$!; "
+ "while ! read -t1; do ! kill -0 $p 2>/dev/null && kill $$; done; "
+ "kill $p; wait"
)
stdin = subprocess.PIPE
stdin_interrupt = True
popen_kwargs = {"preexec_fn": os.setsid}
else:
stdin_interrupt = False
popen_kwargs = {}
cmd_parts = [
"ssh",
"-q",
"-t",
"-t",
"-A",
master,
"sudo /bin/bash -c %s" % quote(" ".join(cmd_parts)),
]
log.debug("Running %s" % " ".join(cmd_parts))
return _run(
cmd_parts,
timeout=timeout,
stream=True,
stdin=stdin,
stdin_interrupt=stdin_interrupt,
popen_kwargs=popen_kwargs,
)
|
Find connectable master for :cluster: and :system_paasta_config: args and
invoke command from :cmd_parts:, wrapping it in ssh call.
:returns (exit code, output)
:param cluster: cluster to find master in
:param system_paasta_config: system configuration to lookup master data
:param cmd_parts: passed into paasta_tools.utils._run as command along with
ssh bits
:param timeout: see paasta_tools.utils._run documentation (default: None)
:param err_code: code to return along with error message when something goes
wrong (default: -1)
:param graceful_exit: wrap command in a bash script that waits for input and
kills the original command; trap SIGINT and send newline into stdin
|
run_on_master
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def figure_out_service_name(args, soa_dir=DEFAULT_SOA_DIR):
"""Figures out and validates the input service name"""
service = args.service or guess_service_name()
try:
validate_service_name(service, soa_dir=soa_dir)
except NoSuchService as service_not_found:
print(service_not_found)
exit(1)
return service
|
Figures out and validates the input service name
|
figure_out_service_name
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def get_jenkins_build_output_url():
"""Returns the URL for Jenkins job's output.
Returns None if it's not available.
"""
build_output = os.environ.get("BUILD_URL")
if build_output:
build_output = build_output + "console"
return build_output
|
Returns the URL for Jenkins job's output.
Returns None if it's not available.
|
get_jenkins_build_output_url
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def get_instance_config(
service: str,
instance: str,
cluster: str,
soa_dir: str = DEFAULT_SOA_DIR,
load_deployments: bool = False,
instance_type: Optional[str] = None,
) -> InstanceConfig:
"""Returns the InstanceConfig object for whatever type of instance
it is. (kubernetes)"""
if instance_type is None:
instance_type = validate_service_instance(
service=service, instance=instance, cluster=cluster, soa_dir=soa_dir
)
instance_config_loader = INSTANCE_TYPE_HANDLERS[instance_type].loader
if instance_config_loader is None:
raise NotImplementedError(
"instance is %s of type %s which is not supported by paasta"
% (instance, instance_type)
)
return instance_config_loader(
service=service,
instance=instance,
cluster=cluster,
load_deployments=load_deployments,
soa_dir=soa_dir,
)
|
Returns the InstanceConfig object for whatever type of instance
it is. (kubernetes)
|
get_instance_config
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def extract_tags(paasta_tag: str) -> Mapping[str, str]:
"""Returns a dictionary containing information from a git tag"""
regex = r"^refs/tags/(?:paasta-){1,2}(?P<deploy_group>[a-zA-Z0-9._-]+)(?:\+(?P<image_version>.*)){0,1}-(?P<tstamp>\d{8}T\d{6})-(?P<tag>.*?)$"
regex_match = re.match(regex, paasta_tag)
return regex_match.groupdict() if regex_match else {}
|
Returns a dictionary containing information from a git tag
|
extract_tags
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def validate_given_deploy_groups(
all_deploy_groups: Collection[str], args_deploy_groups: Collection[str]
) -> Tuple[Set[str], Set[str]]:
"""Given two lists of deploy groups, return the intersection and difference between them.
:param all_deploy_groups: instances actually belonging to a service
:param args_deploy_groups: the desired instances
:returns: a tuple with (common, difference) indicating deploy groups common in both
lists and those only in args_deploy_groups
"""
invalid_deploy_groups: Set[str]
valid_deploy_groups = set(args_deploy_groups).intersection(all_deploy_groups)
invalid_deploy_groups = set(args_deploy_groups).difference(all_deploy_groups)
return valid_deploy_groups, invalid_deploy_groups
|
Given two lists of deploy groups, return the intersection and difference between them.
:param all_deploy_groups: instances actually belonging to a service
:param args_deploy_groups: the desired instances
:returns: a tuple with (common, difference) indicating deploy groups common in both
lists and those only in args_deploy_groups
|
validate_given_deploy_groups
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def pick_random_port(service_name):
"""Return a random port.
Tries to return the same port for the same service each time, when
possible.
"""
hash_key = f"{service_name},{getpass.getuser()}".encode("utf8")
hash_number = int(hashlib.sha1(hash_key).hexdigest(), 16)
preferred_port = 33000 + (hash_number % 25000)
return ephemeral_port_reserve.reserve("0.0.0.0", preferred_port)
|
Return a random port.
Tries to return the same port for the same service each time, when
possible.
|
pick_random_port
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def trigger_deploys(
service: str,
system_config: Optional["SystemPaastaConfig"] = None,
) -> None:
"""Connects to the deploymentsd watcher on sysgit, which is an extremely simple
service that listens for a service string and then generates a service deployment"""
logline = f"Notifying soa-configs primary to generate a deployment for {service}"
_log(service=service, line=logline, component="deploy", level="event")
if not system_config:
system_config = load_system_paasta_config()
server = system_config.get_git_repo_config("yelpsoa-configs").get(
"deploy_server",
DEFAULT_SOA_CONFIGS_GIT_URL,
)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect((server, 5049))
client.send(f"{service}\n".encode("utf-8"))
finally:
client.close()
|
Connects to the deploymentsd watcher on sysgit, which is an extremely simple
service that listens for a service string and then generates a service deployment
|
trigger_deploys
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def verify_instances(
args_instances: str,
service: str,
clusters: Sequence[str],
soa_dir: str = DEFAULT_SOA_DIR,
) -> Sequence[str]:
"""Verify that a list of instances specified by user is correct for this service.
:param args_instances: a list of instances.
:param service: the service name
:param cluster: a list of clusters
:returns: a list of instances specified in args_instances without any exclusions.
"""
unverified_instances = args_instances.split(",")
service_instances: Set[str] = list_all_instances_for_service(
service, clusters=clusters, soa_dir=soa_dir
)
misspelled_instances: Sequence[str] = [
i for i in unverified_instances if i not in service_instances
]
if len(misspelled_instances) == 0:
return misspelled_instances
# Check for instances with suffixes other than Tron instances (i.e. Flink instances)
instances_without_suffixes = [x.split(".")[0] for x in unverified_instances]
misspelled_instances = [
i for i in instances_without_suffixes if i not in service_instances
]
if misspelled_instances:
suggestions: List[str] = []
for instance in misspelled_instances:
matches = difflib.get_close_matches(
instance, service_instances, n=5, cutoff=0.5
)
suggestions.extend(matches) # type: ignore
suggestions = list(set(suggestions))
if clusters:
message = "{} doesn't have any instances matching {} on {}.".format(
service,
", ".join(sorted(misspelled_instances)),
", ".join(sorted(clusters)),
)
else:
message = "{} doesn't have any instances matching {}.".format(
service, ", ".join(sorted(misspelled_instances))
)
print(PaastaColors.red(message))
if suggestions:
print("Did you mean any of these?")
for instance in sorted(suggestions):
print(" %s" % instance)
return misspelled_instances
|
Verify that a list of instances specified by user is correct for this service.
:param args_instances: a list of instances.
:param service: the service name
:param cluster: a list of clusters
:returns: a list of instances specified in args_instances without any exclusions.
|
verify_instances
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def run_interactive_cli(
cmd: str, shell: str = "/bin/bash", term: str = "xterm-256color"
):
"""Runs interactive command in a pseudo terminal, handling terminal size management
:param str cmd: shell command
:param str shell: shell utility to use as wrapper
:param str term: terminal type
"""
cols, rows = shutil.get_terminal_size()
if not os.path.isabs(shell):
shell = shutil.which(shell)
wrapped_cmd = (
f"export SHELL={shell};"
f"export TERM={term};"
f"stty columns {cols} rows {rows};"
f"exec {cmd}"
)
pty.spawn([shell, "-c", wrapped_cmd])
|
Runs interactive command in a pseudo terminal, handling terminal size management
:param str cmd: shell command
:param str shell: shell utility to use as wrapper
:param str term: terminal type
|
run_interactive_cli
|
python
|
Yelp/paasta
|
paasta_tools/cli/utils.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/utils.py
|
Apache-2.0
|
def parse_duration_to_seconds(duration: str) -> Optional[int]:
"""Parse a duration string like '3h' or '30m' into seconds.
Args:
duration_str: A string representing a duration (e.g., "3h", "30m", "1d")
Returns:
The duration in seconds, or None if parsing failed
"""
if not duration:
return None
seconds = timeparse(duration)
return seconds
|
Parse a duration string like '3h' or '30m' into seconds.
Args:
duration_str: A string representing a duration (e.g., "3h", "30m", "1d")
Returns:
The duration in seconds, or None if parsing failed
|
parse_duration_to_seconds
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/autoscale.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/autoscale.py
|
Apache-2.0
|
def deploy_check(service_path):
"""Check whether deploy.yaml exists in service directory. Prints success or
error message.
:param service_path: path to a directory containing deploy.yaml"""
if is_file_in_dir("deploy.yaml", service_path):
print(PaastaCheckMessages.DEPLOY_YAML_FOUND)
else:
print(PaastaCheckMessages.DEPLOY_YAML_MISSING)
|
Check whether deploy.yaml exists in service directory. Prints success or
error message.
:param service_path: path to a directory containing deploy.yaml
|
deploy_check
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/check.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py
|
Apache-2.0
|
def docker_check():
"""Check whether Dockerfile exists in service directory, and is valid.
Prints suitable message depending on outcome"""
docker_file_path = is_file_in_dir("Dockerfile", os.getcwd())
if docker_file_path:
print(PaastaCheckMessages.DOCKERFILE_FOUND)
else:
print(PaastaCheckMessages.DOCKERFILE_MISSING)
|
Check whether Dockerfile exists in service directory, and is valid.
Prints suitable message depending on outcome
|
docker_check
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/check.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py
|
Apache-2.0
|
def makefile_responds_to(target):
"""Runs `make --question <target>` to detect if a makefile responds to the
specified target."""
# According to http://www.gnu.org/software/make/manual/make.html#index-exit-status-of-make,
# 0 means OK, 1 means the target is not up to date, and 2 means error
returncode, _ = _run(["make", "--question", target], timeout=5)
return returncode != 2
|
Runs `make --question <target>` to detect if a makefile responds to the
specified target.
|
makefile_responds_to
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/check.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py
|
Apache-2.0
|
def makefile_check():
"""Detects if you have a makefile and runs some sanity tests against
it to ensure it is paasta-ready"""
makefile_path = is_file_in_dir("Makefile", os.getcwd())
if makefile_path:
print(PaastaCheckMessages.MAKEFILE_FOUND)
if makefile_has_a_tab(makefile_path):
print(PaastaCheckMessages.MAKEFILE_HAS_A_TAB)
else:
print(PaastaCheckMessages.MAKEFILE_HAS_NO_TABS)
if makefile_has_docker_tag(makefile_path):
print(PaastaCheckMessages.MAKEFILE_HAS_DOCKER_TAG)
else:
print(PaastaCheckMessages.MAKEFILE_HAS_NO_DOCKER_TAG)
if makefile_responds_to("cook-image"):
print(PaastaCheckMessages.MAKEFILE_RESPONDS_BUILD_IMAGE)
else:
print(PaastaCheckMessages.MAKEFILE_RESPONDS_BUILD_IMAGE_FAIL)
if makefile_responds_to("itest"):
print(PaastaCheckMessages.MAKEFILE_RESPONDS_ITEST)
else:
print(PaastaCheckMessages.MAKEFILE_RESPONDS_ITEST_FAIL)
if makefile_responds_to("test"):
print(PaastaCheckMessages.MAKEFILE_RESPONDS_TEST)
else:
print(PaastaCheckMessages.MAKEFILE_RESPONDS_TEST_FAIL)
else:
print(PaastaCheckMessages.MAKEFILE_MISSING)
|
Detects if you have a makefile and runs some sanity tests against
it to ensure it is paasta-ready
|
makefile_check
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/check.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py
|
Apache-2.0
|
def get_deploy_groups_used_by_framework(instance_type, service, soa_dir):
"""This is a kind of funny function that gets all the instances for specified
service and framework, and massages it into a form that matches up with what
deploy.yaml's steps look like. This is only so we can compare it 1-1
with what deploy.yaml has for linting.
:param instance_type: one of the entries in utils.INSTANCE_TYPES
:param service: the service name
:param soa_dir: The SOA configuration directory to read from
:returns: a list of deploy group names used by the service.
"""
deploy_groups = []
for cluster in list_clusters(service, soa_dir):
for _, instance in get_service_instance_list(
service=service,
cluster=cluster,
instance_type=instance_type,
soa_dir=soa_dir,
):
try:
config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
soa_dir=soa_dir,
load_deployments=False,
instance_type=instance_type,
)
deploy_groups.append(config.get_deploy_group())
except NotImplementedError:
pass
return set(filter(None, deploy_groups))
|
This is a kind of funny function that gets all the instances for specified
service and framework, and massages it into a form that matches up with what
deploy.yaml's steps look like. This is only so we can compare it 1-1
with what deploy.yaml has for linting.
:param instance_type: one of the entries in utils.INSTANCE_TYPES
:param service: the service name
:param soa_dir: The SOA configuration directory to read from
:returns: a list of deploy group names used by the service.
|
get_deploy_groups_used_by_framework
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/check.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py
|
Apache-2.0
|
def deployments_check(service, soa_dir):
"""Checks for consistency between deploy.yaml and the kubernetes/etc yamls"""
the_return = True
pipeline_deploy_groups = get_pipeline_deploy_groups(
service=service, soa_dir=soa_dir
)
framework_deploy_groups = {}
in_deploy_not_frameworks = set(pipeline_deploy_groups)
for it in INSTANCE_TYPES:
framework_deploy_groups[it] = get_deploy_groups_used_by_framework(
it, service, soa_dir
)
in_framework_not_deploy = set(framework_deploy_groups[it]) - set(
pipeline_deploy_groups
)
in_deploy_not_frameworks -= set(framework_deploy_groups[it])
if len(in_framework_not_deploy) > 0:
print(
"{} There are some instance(s) you have asked to run in {} that".format(
x_mark(), it
)
)
print(" do not have a corresponding entry in deploy.yaml:")
print(" %s" % PaastaColors.bold(", ".join(in_framework_not_deploy)))
print(" You should probably configure these to use a 'deploy_group' or")
print(
" add entries to deploy.yaml for them so they are deployed to those clusters."
)
the_return = False
if len(in_deploy_not_frameworks) > 0:
print(
"%s There are some instance(s) in deploy.yaml that are not referenced"
% x_mark()
)
print(" by any instance:")
print(" %s" % PaastaColors.bold((", ".join(in_deploy_not_frameworks))))
print(
" You should probably delete these deploy.yaml entries if they are unused."
)
the_return = False
if the_return is True:
print(success("All entries in deploy.yaml correspond to a paasta instance"))
for it in INSTANCE_TYPES:
if len(framework_deploy_groups[it]) > 0:
print(
success(
"All %s instances have a corresponding deploy.yaml entry" % it
)
)
return the_return
|
Checks for consistency between deploy.yaml and the kubernetes/etc yamls
|
deployments_check
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/check.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py
|
Apache-2.0
|
def sensu_check(service, service_path, soa_dir):
"""Check whether monitoring.yaml exists in service directory,
and that the team name is declared.
:param service: name of service currently being examined
:param service_path: path to location of monitoring.yaml file"""
if is_file_in_dir("monitoring.yaml", service_path):
print(PaastaCheckMessages.SENSU_MONITORING_FOUND)
team = get_team(service=service, overrides={}, soa_dir=soa_dir)
if team is None:
print(PaastaCheckMessages.SENSU_TEAM_MISSING)
else:
print(PaastaCheckMessages.sensu_team_found(team))
else:
print(PaastaCheckMessages.SENSU_MONITORING_MISSING)
|
Check whether monitoring.yaml exists in service directory,
and that the team name is declared.
:param service: name of service currently being examined
:param service_path: path to location of monitoring.yaml file
|
sensu_check
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/check.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py
|
Apache-2.0
|
def service_dir_check(service, soa_dir):
"""Check whether directory service exists in /nail/etc/services
:param service: string of service name we wish to inspect
"""
try:
validate_service_name(service, soa_dir)
print(PaastaCheckMessages.service_dir_found(service, soa_dir))
except NoSuchService:
print(PaastaCheckMessages.service_dir_missing(service, soa_dir))
|
Check whether directory service exists in /nail/etc/services
:param service: string of service name we wish to inspect
|
service_dir_check
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/check.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py
|
Apache-2.0
|
def smartstack_check(service, service_path, soa_dir):
"""Check whether smartstack.yaml exists in service directory, and the proxy
ports are declared. Print appropriate message depending on outcome.
:param service: name of service currently being examined
:param service_path: path to location of smartstack.yaml file"""
if is_file_in_dir("smartstack.yaml", service_path):
print(PaastaCheckMessages.SMARTSTACK_YAML_FOUND)
instances = get_all_namespaces_for_service(service=service, soa_dir=soa_dir)
if len(instances) > 0:
for namespace, config in get_all_namespaces_for_service(
service=service, soa_dir=soa_dir, full_name=False
):
if "proxy_port" in config:
print(
PaastaCheckMessages.smartstack_port_found(
namespace, config.get("proxy_port")
)
)
else:
print(PaastaCheckMessages.SMARTSTACK_PORT_MISSING)
else:
print(PaastaCheckMessages.SMARTSTACK_PORT_MISSING)
|
Check whether smartstack.yaml exists in service directory, and the proxy
ports are declared. Print appropriate message depending on outcome.
:param service: name of service currently being examined
:param service_path: path to location of smartstack.yaml file
|
smartstack_check
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/check.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py
|
Apache-2.0
|
def paasta_check(args):
"""Analyze the service in the PWD to determine if it is paasta ready
:param args: argparse.Namespace obj created from sys.args by cli"""
soa_dir = args.yelpsoa_config_root
service = figure_out_service_name(args, soa_dir)
service_path = os.path.join(soa_dir, service)
service_dir_check(service, soa_dir)
deploy_check(service_path)
deploy_has_security_check(service, soa_dir)
git_repo_check(service, soa_dir)
docker_check()
makefile_check()
deployments_check(service, soa_dir)
sensu_check(service, service_path, soa_dir)
smartstack_check(service, service_path, soa_dir)
paasta_validate_soa_configs(service, service_path)
|
Analyze the service in the PWD to determine if it is paasta ready
:param args: argparse.Namespace obj created from sys.args by cli
|
paasta_check
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/check.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/check.py
|
Apache-2.0
|
def paasta_list(args):
"""Print a list of Yelp services currently running
:param args: argparse.Namespace obj created from sys.args by cli"""
if args.print_instances:
services = list_service_instances(args.soa_dir)
elif args.all:
services = list_services(args.soa_dir)
else:
services = list_paasta_services(args.soa_dir)
for service in services:
print(service)
|
Print a list of Yelp services currently running
:param args: argparse.Namespace obj created from sys.args by cli
|
paasta_list
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/list.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/list.py
|
Apache-2.0
|
def perform_http_healthcheck(url, timeout):
"""Returns true if healthcheck on url succeeds, false otherwise
:param url: the healthcheck url
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
"""
try:
with Timeout(seconds=timeout):
try:
res = requests.get(url, verify=False)
except requests.ConnectionError:
return (False, "http request failed: connection failed")
except TimeoutError:
return (False, "http request timed out after %d seconds" % timeout)
if "content-type" in res.headers and "," in res.headers["content-type"]:
print(
PaastaColors.yellow(
"Multiple content-type headers detected in response."
" The Mesos healthcheck system will treat this as a failure!"
)
)
return (False, "http request succeeded, code %d" % res.status_code)
# check if response code is valid per https://mesosphere.github.io/marathon/docs/health-checks.html
elif res.status_code >= 200 and res.status_code < 400:
return (True, "http request succeeded, code %d" % res.status_code)
else:
return (False, "http request failed, code %s" % str(res.status_code))
|
Returns true if healthcheck on url succeeds, false otherwise
:param url: the healthcheck url
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
|
perform_http_healthcheck
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/local_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py
|
Apache-2.0
|
def perform_tcp_healthcheck(url, timeout):
"""Returns true if successfully connects to host and port, false otherwise
:param url: the healthcheck url (in the form tcp://host:port)
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
"""
url_elem = urlparse(url)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((url_elem.hostname, url_elem.port))
sock.close()
if result == 0:
return (True, "tcp connection succeeded")
else:
return (False, "%s (timeout %d seconds)" % (os.strerror(result), timeout))
|
Returns true if successfully connects to host and port, false otherwise
:param url: the healthcheck url (in the form tcp://host:port)
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
|
perform_tcp_healthcheck
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/local_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py
|
Apache-2.0
|
def perform_cmd_healthcheck(docker_client, container_id, command, timeout):
"""Returns true if return code of command is 0 when executed inside container, false otherwise
:param docker_client: Docker client object
:param container_id: Docker container id
:param command: command to execute
:param timeout: timeout in seconds
:returns: True if command exits with return code 0, false otherwise
"""
(output, return_code) = execute_in_container(
docker_client, container_id, command, timeout
)
if return_code == 0:
return (True, output)
else:
return (False, output)
|
Returns true if return code of command is 0 when executed inside container, false otherwise
:param docker_client: Docker client object
:param container_id: Docker container id
:param command: command to execute
:param timeout: timeout in seconds
:returns: True if command exits with return code 0, false otherwise
|
perform_cmd_healthcheck
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/local_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py
|
Apache-2.0
|
def run_healthcheck_on_container(
docker_client, container_id, healthcheck_mode, healthcheck_data, timeout
):
"""Performs healthcheck on a container
:param container_id: Docker container id
:param healthcheck_mode: one of 'http', 'https', 'tcp', or 'cmd'
:param healthcheck_data: a URL when healthcheck_mode is 'http[s]' or 'tcp', a command if healthcheck_mode is 'cmd'
:param timeout: timeout in seconds for individual check
:returns: a tuple of (bool, output string)
"""
healthcheck_result = (False, "unknown")
if healthcheck_mode == "cmd":
healthcheck_result = perform_cmd_healthcheck(
docker_client, container_id, healthcheck_data, timeout
)
elif healthcheck_mode == "http" or healthcheck_mode == "https":
healthcheck_result = perform_http_healthcheck(healthcheck_data, timeout)
elif healthcheck_mode == "tcp":
healthcheck_result = perform_tcp_healthcheck(healthcheck_data, timeout)
else:
print(
PaastaColors.yellow(
"Healthcheck mode '%s' is not currently supported!" % healthcheck_mode
)
)
sys.exit(1)
return healthcheck_result
|
Performs healthcheck on a container
:param container_id: Docker container id
:param healthcheck_mode: one of 'http', 'https', 'tcp', or 'cmd'
:param healthcheck_data: a URL when healthcheck_mode is 'http[s]' or 'tcp', a command if healthcheck_mode is 'cmd'
:param timeout: timeout in seconds for individual check
:returns: a tuple of (bool, output string)
|
run_healthcheck_on_container
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/local_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py
|
Apache-2.0
|
def simulate_healthcheck_on_service(
instance_config,
docker_client,
container_id,
healthcheck_mode,
healthcheck_data,
healthcheck_enabled,
):
"""Simulates Marathon-style healthcheck on given service if healthcheck is enabled
:param instance_config: service manifest
:param docker_client: Docker client object
:param container_id: Docker container id
:param healthcheck_data: tuple url to healthcheck
:param healthcheck_enabled: boolean
:returns: healthcheck_passed: boolean
"""
healthcheck_link = PaastaColors.cyan(healthcheck_data)
if healthcheck_enabled:
grace_period = instance_config.get_healthcheck_grace_period_seconds()
timeout = instance_config.get_healthcheck_timeout_seconds()
interval = instance_config.get_healthcheck_interval_seconds()
max_failures = instance_config.get_healthcheck_max_consecutive_failures()
print(
"\nStarting health check via %s (waiting %s seconds before "
"considering failures due to grace period):"
% (healthcheck_link, grace_period)
)
# silently start performing health checks until grace period ends or first check succeeds
graceperiod_end_time = time.time() + grace_period
after_grace_period_attempts = 0
healthchecking = True
def _stream_docker_logs(container_id, generator):
while healthchecking:
try:
# the generator will block until another log line is available
log_line = next(generator).decode("utf-8").rstrip("\n")
if healthchecking:
print(f"container [{container_id[:12]}]: {log_line}")
else:
# stop streaming at first opportunity, since generator.close()
# cant be used until the container is dead
break
except StopIteration: # natural end of logs
break
docker_logs_generator = docker_client.logs(
container_id, stderr=True, stream=True
)
threading.Thread(
target=_stream_docker_logs,
daemon=True,
args=(container_id, docker_logs_generator),
).start()
while True:
# First inspect the container for early exits
container_state = docker_client.inspect_container(container_id)
if not container_state["State"]["Running"]:
print(
PaastaColors.red(
"Container exited with code {}".format(
container_state["State"]["ExitCode"]
)
)
)
healthcheck_passed = False
break
healthcheck_passed, healthcheck_output = run_healthcheck_on_container(
docker_client, container_id, healthcheck_mode, healthcheck_data, timeout
)
# Yay, we passed the healthcheck
if healthcheck_passed:
print(
"{}'{}' (via {})".format(
PaastaColors.green("Healthcheck succeeded!: "),
healthcheck_output,
healthcheck_link,
)
)
break
# Otherwise, print why we failed
if time.time() < graceperiod_end_time:
color = PaastaColors.grey
msg = "(disregarded due to grace period)"
extra_msg = f" (via: {healthcheck_link}. Output: {healthcheck_output})"
else:
# If we've exceeded the grace period, we start incrementing attempts
after_grace_period_attempts += 1
color = PaastaColors.red
msg = "(Attempt {} of {})".format(
after_grace_period_attempts, max_failures
)
extra_msg = f" (via: {healthcheck_link}. Output: {healthcheck_output})"
print("{}{}".format(color(f"Healthcheck failed! {msg}"), extra_msg))
if after_grace_period_attempts == max_failures:
break
time.sleep(interval)
healthchecking = False # end docker logs stream
else:
print(
"\nPaaSTA would have healthchecked your service via\n%s" % healthcheck_link
)
healthcheck_passed = True
return healthcheck_passed
|
Simulates Marathon-style healthcheck on given service if healthcheck is enabled
:param instance_config: service manifest
:param docker_client: Docker client object
:param container_id: Docker container id
:param healthcheck_data: tuple url to healthcheck
:param healthcheck_enabled: boolean
:returns: healthcheck_passed: boolean
|
simulate_healthcheck_on_service
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/local_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py
|
Apache-2.0
|
def docker_pull_image(docker_url):
"""Pull an image via ``docker pull``. Uses the actual pull command instead of the python
bindings due to the docker auth/registry transition. Once we are past Docker 1.6
we can use better credential management, but for now this function assumes the
user running the command has already been authorized for the registry"""
print(
"Please wait while the image (%s) is pulled (times out after 30m)..."
% docker_url,
file=sys.stderr,
)
with Timeout(
seconds=1800, error_message=f"Timed out pulling docker image from {docker_url}"
), open(os.devnull, mode="wb") as DEVNULL:
ret, _ = _run("docker pull %s" % docker_url, stream=True, stdin=DEVNULL)
if ret != 0:
print(
"\nPull failed. Are you authorized to run docker commands?",
file=sys.stderr,
)
sys.exit(ret)
|
Pull an image via ``docker pull``. Uses the actual pull command instead of the python
bindings due to the docker auth/registry transition. Once we are past Docker 1.6
we can use better credential management, but for now this function assumes the
user running the command has already been authorized for the registry
|
docker_pull_image
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/local_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py
|
Apache-2.0
|
def get_container_id(docker_client, container_name):
"""Use 'docker_client' to find the container we started, identifiable by
its 'container_name'. If we can't find the id, raise
LostContainerException.
"""
containers = docker_client.containers(all=False)
for container in containers:
if "/%s" % container_name in container.get("Names", []):
return container.get("Id")
raise LostContainerException(
"Can't find the container I just launched so I can't do anything else.\n"
"Try docker 'ps --all | grep %s' to see where it went.\n"
"Here were all the containers:\n"
"%s" % (container_name, containers)
)
|
Use 'docker_client' to find the container we started, identifiable by
its 'container_name'. If we can't find the id, raise
LostContainerException.
|
get_container_id
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/local_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py
|
Apache-2.0
|
def get_local_run_environment_vars(instance_config, port0, framework):
"""Returns a dictionary of environment variables to simulate what would be available to
a paasta service running in a container"""
hostname = socket.getfqdn()
docker_image = instance_config.get_docker_image()
if docker_image == "":
# In a local_run environment, the docker_image may not be available
# so we can fall-back to the injected DOCKER_TAG per the paasta contract
docker_image = os.environ["DOCKER_TAG"]
env = {
"HOST": hostname,
"PAASTA_DOCKER_IMAGE": docker_image,
"PAASTA_LAUNCHED_BY": get_possible_launched_by_user_variable_from_env(),
"PAASTA_HOST": hostname,
# Kubernetes instances remove PAASTA_CLUSTER, so we need to re-add it ourselves
"PAASTA_CLUSTER": instance_config.get_cluster(),
}
return env
|
Returns a dictionary of environment variables to simulate what would be available to
a paasta service running in a container
|
get_local_run_environment_vars
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/local_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py
|
Apache-2.0
|
def assume_aws_role(
instance_config: InstanceConfig,
service: str,
assume_role_arn: str,
assume_pod_identity: bool,
use_okta_role: bool,
aws_account: str,
) -> AWSSessionCreds:
"""Runs AWS cli to assume into the correct role, then extract and return the ENV variables from that session"""
pod_identity = instance_config.get_iam_role()
if assume_role_arn:
pod_identity = assume_role_arn
if assume_pod_identity and not pod_identity:
print(
f"Error: --assume-pod-identity passed but no pod identity was found for this instance ({instance_config.instance})",
file=sys.stderr,
)
sys.exit(1)
if pod_identity and (assume_pod_identity or assume_role_arn):
print(
"Calling aws-okta to assume role {} using account {}".format(
pod_identity, aws_account
)
)
elif use_okta_role:
print(f"Calling aws-okta using account {aws_account}")
elif "AWS_ROLE_ARN" in os.environ and "AWS_WEB_IDENTITY_TOKEN_FILE" in os.environ:
# Get a session using the current pod identity
print(
f"Found Pod Identity token in env. Assuming into role {os.environ['AWS_ROLE_ARN']}."
)
boto_session = boto3.Session()
credentials = boto_session.get_credentials()
assumed_creds_dict: AWSSessionCreds = {
"AWS_ACCESS_KEY_ID": credentials.access_key,
"AWS_SECRET_ACCESS_KEY": credentials.secret_key,
"AWS_SESSION_TOKEN": credentials.token,
"AWS_SECURITY_TOKEN": credentials.token,
}
return assumed_creds_dict
else:
# use_okta_role, assume_pod_identity, and assume_role are all empty, and there's no
# pod identity (web identity token) in the env. This shouldn't happen
print(
"Error: assume_aws_role called without required arguments and no pod identity env",
file=sys.stderr,
)
sys.exit(1)
# local-run will sometimes run as root - make sure that we get the actual
# users AWS credentials instead of looking for non-existent root AWS
# credentials
if os.getuid() == 0:
aws_okta_cmd = [
"sudo",
"-u",
get_username(),
f"HOME=/nail/home/{get_username()}",
"aws-okta",
"-a",
aws_account,
"-o",
"json",
]
else:
aws_okta_cmd = ["aws-okta", "-a", aws_account, "-o", "json"]
cmd = subprocess.run(aws_okta_cmd, stdout=subprocess.PIPE)
if cmd.returncode != 0:
print(
"Error calling aws-okta. Remove --assume-pod-identity to run without pod identity role",
file=sys.stderr,
)
sys.exit(1)
cmd_output = json.loads(cmd.stdout.decode("utf-8"))
if not use_okta_role:
boto_session = boto3.Session(
aws_access_key_id=cmd_output["AccessKeyId"],
aws_secret_access_key=cmd_output["SecretAccessKey"],
aws_session_token=cmd_output["SessionToken"],
)
sts_client = boto_session.client("sts")
assumed_role = sts_client.assume_role(
RoleArn=pod_identity, RoleSessionName=f"{get_username()}-local-run"
)
# The contents of "Credentials" key from assume_role is the same as from aws-okta
cmd_output = assumed_role["Credentials"]
creds_dict: AWSSessionCreds = {
"AWS_ACCESS_KEY_ID": cmd_output["AccessKeyId"],
"AWS_SECRET_ACCESS_KEY": cmd_output["SecretAccessKey"],
"AWS_SESSION_TOKEN": cmd_output["SessionToken"],
"AWS_SECURITY_TOKEN": cmd_output["SessionToken"],
}
return creds_dict
|
Runs AWS cli to assume into the correct role, then extract and return the ENV variables from that session
|
assume_aws_role
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/local_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py
|
Apache-2.0
|
def run_docker_container(
docker_client,
service,
instance,
docker_url,
volumes,
interactive,
command,
healthcheck,
healthcheck_only,
user_port,
instance_config,
secret_provider_name,
soa_dir=DEFAULT_SOA_DIR,
dry_run=False,
json_dict=False,
framework=None,
secret_provider_kwargs={},
skip_secrets=False,
assume_pod_identity=False,
assume_role_arn="",
use_okta_role=False,
assume_role_aws_account: Optional[str] = None,
use_service_auth_token: bool = False,
use_sso_service_auth_token: bool = False,
):
"""docker-py has issues running a container with a TTY attached, so for
consistency we execute 'docker run' directly in both interactive and
non-interactive modes.
In non-interactive mode when the run is complete, stop the container and
remove it (with docker-py).
"""
if user_port:
if check_if_port_free(user_port):
chosen_port = user_port
else:
print(
PaastaColors.red(
"The chosen port is already in use!\n"
"Try specifying another one, or omit (--port|-o) and paasta will find a free one for you"
),
file=sys.stderr,
)
sys.exit(1)
else:
chosen_port = pick_random_port(service)
environment = instance_config.get_env()
secret_volumes = {} # type: ignore
if not skip_secrets:
# if secrets_for_owner_team enabled in yelpsoa for service
if is_secrets_for_teams_enabled(service, soa_dir):
try:
kube_client = KubeClient(
config_file=KUBE_CONFIG_USER_PATH, context=instance_config.cluster
)
secret_environment = get_kubernetes_secret_env_variables(
kube_client, environment, service, instance_config.get_namespace()
)
secret_volumes = get_kubernetes_secret_volumes(
kube_client,
instance_config.get_secret_volumes(),
service,
instance_config.get_namespace(),
)
except Exception as e:
print(
f"Failed to retrieve kubernetes secrets with {e.__class__.__name__}: {e}"
)
print(
"If you don't need the secrets for local-run, you can add --skip-secrets"
)
sys.exit(1)
else:
try:
secret_environment = decrypt_secret_environment_variables(
secret_provider_name=secret_provider_name,
environment=environment,
soa_dir=soa_dir,
service_name=instance_config.get_service(),
cluster_name=instance_config.cluster,
secret_provider_kwargs=secret_provider_kwargs,
)
secret_volumes = decrypt_secret_volumes(
secret_provider_name=secret_provider_name,
secret_volumes_config=instance_config.get_secret_volumes(),
soa_dir=soa_dir,
service_name=instance_config.get_service(),
cluster_name=instance_config.cluster,
secret_provider_kwargs=secret_provider_kwargs,
)
except Exception as e:
print(f"Failed to decrypt secrets with {e.__class__.__name__}: {e}")
print(
"If you don't need the secrets for local-run, you can add --skip-secrets"
)
sys.exit(1)
environment.update(secret_environment)
if (
assume_role_arn
or assume_pod_identity
or use_okta_role
or "AWS_WEB_IDENTITY_TOKEN_FILE" in os.environ
):
aws_creds = assume_aws_role(
instance_config,
service,
assume_role_arn,
assume_pod_identity,
use_okta_role,
assume_role_aws_account,
)
environment.update(aws_creds)
if use_service_auth_token:
environment["YELP_SVC_AUTHZ_TOKEN"] = get_service_auth_token()
elif use_sso_service_auth_token:
environment["YELP_SVC_AUTHZ_TOKEN"] = get_sso_auth_token()
local_run_environment = get_local_run_environment_vars(
instance_config=instance_config, port0=chosen_port, framework=framework
)
environment.update(local_run_environment)
net = instance_config.get_net()
memory = instance_config.get_mem()
container_name = get_container_name()
docker_params = instance_config.format_docker_parameters()
healthcheck_mode, healthcheck_data = get_healthcheck_for_instance(
service, instance, instance_config, chosen_port, soa_dir=soa_dir
)
if healthcheck_mode is None:
container_port = None
interactive = True
elif not user_port and not healthcheck and not healthcheck_only:
container_port = None
else:
try:
container_port = instance_config.get_container_port()
except AttributeError:
container_port = None
simulate_healthcheck = (
healthcheck_only or healthcheck
) and healthcheck_mode is not None
for container_mount_path, secret_content in secret_volumes.items():
temp_secret_folder = tempfile.mktemp(dir=os.environ.get("TMPDIR", "/nail/tmp"))
os.makedirs(temp_secret_folder, exist_ok=True)
temp_secret_filename = os.path.join(temp_secret_folder, str(uuid.uuid4()))
# write the secret contents
# Permissions will automatically be set to readable by "users" group
# TODO: Make this readable only by "nobody" user? What about other non-standard users that people sometimes use inside the container?
# -rw-r--r-- 1 dpopes users 3.2K Nov 28 19:16 854bdbad-30b8-4681-ae4e-854cb28075c5
try:
# First try to write the file as a string
# This is for text like config files
with open(temp_secret_filename, "w") as f:
f.write(secret_content)
except TypeError:
# If that fails, try to write it as bytes
# This is for binary files like TLS keys
with open(temp_secret_filename, "wb") as fb:
fb.write(secret_content)
# Append this to the list of volumes passed to docker run
volumes.append(f"{temp_secret_filename}:{container_mount_path}:ro")
docker_run_args = dict(
memory=memory,
chosen_port=chosen_port,
container_port=container_port,
container_name=container_name,
volumes=volumes,
env=environment,
interactive=interactive,
detach=simulate_healthcheck,
docker_hash=docker_url,
command=command,
net=net,
docker_params=docker_params,
)
docker_run_cmd = get_docker_run_cmd(**docker_run_args)
joined_docker_run_cmd = " ".join(docker_run_cmd)
if dry_run:
if json_dict:
print(json.dumps(docker_run_args))
else:
print(json.dumps(docker_run_cmd))
return 0
else:
print("Running docker command:\n%s" % PaastaColors.grey(joined_docker_run_cmd))
merged_env = {**os.environ, **environment}
if interactive or not simulate_healthcheck:
# NOTE: This immediately replaces us with the docker run cmd. Docker
# run knows how to clean up the running container in this situation.
wrapper_path = shutil.which("paasta_docker_wrapper")
# To properly simulate mesos, we pop the PATH, which is not available to
# The executor
merged_env.pop("PATH")
execlpe(wrapper_path, *docker_run_cmd, merged_env)
# For testing, when execlpe is patched out and doesn't replace us, we
# still want to bail out.
return 0
container_started = False
container_id = None
try:
(returncode, output) = _run(docker_run_cmd, env=merged_env)
if returncode != 0:
print(
"Failure trying to start your container!"
"Returncode: %d"
"Output:"
"%s"
""
"Fix that problem and try again."
"http://y/paasta-troubleshooting" % (returncode, output),
sep="\n",
)
# Container failed to start so no need to cleanup; just bail.
sys.exit(1)
container_started = True
container_id = get_container_id(docker_client, container_name)
print("Found our container running with CID %s" % container_id)
if simulate_healthcheck:
healthcheck_result = simulate_healthcheck_on_service(
instance_config=instance_config,
docker_client=docker_client,
container_id=container_id,
healthcheck_mode=healthcheck_mode,
healthcheck_data=healthcheck_data,
healthcheck_enabled=healthcheck,
)
def _output_exit_code():
returncode = docker_client.inspect_container(container_id)["State"][
"ExitCode"
]
print(f"Container exited: {returncode})")
if healthcheck_only:
if container_started:
_output_exit_code()
_cleanup_container(docker_client, container_id)
if healthcheck_mode is None:
print(
"--healthcheck-only, but no healthcheck is defined for this instance!"
)
sys.exit(1)
elif healthcheck_result is True:
sys.exit(0)
else:
sys.exit(1)
running = docker_client.inspect_container(container_id)["State"]["Running"]
if running:
print("Your service is now running! Tailing stdout and stderr:")
for line in docker_client.logs(
container_id,
stderr=True,
stream=True,
):
# writing to sys.stdout.buffer lets us write the raw bytes we
# get from the docker client without having to convert them to
# a utf-8 string
sys.stdout.buffer.write(line)
sys.stdout.flush()
else:
_output_exit_code()
returncode = 3
except KeyboardInterrupt:
returncode = 3
# Cleanup if the container exits on its own or interrupted.
if container_started:
returncode = docker_client.inspect_container(container_id)["State"]["ExitCode"]
_cleanup_container(docker_client, container_id)
return returncode
|
docker-py has issues running a container with a TTY attached, so for
consistency we execute 'docker run' directly in both interactive and
non-interactive modes.
In non-interactive mode when the run is complete, stop the container and
remove it (with docker-py).
|
run_docker_container
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/local_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py
|
Apache-2.0
|
def format_command_for_type(command, instance_type, date):
"""
Given an instance_type, return a function that appropriately formats
the command to be run.
"""
if instance_type == "tron":
interpolated_command = parse_time_variables(command, date)
return interpolated_command
else:
return command
|
Given an instance_type, return a function that appropriately formats
the command to be run.
|
format_command_for_type
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/local_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py
|
Apache-2.0
|
def configure_and_run_docker_container(
docker_client,
docker_url,
docker_sha,
service,
instance,
cluster,
system_paasta_config,
args,
assume_role_aws_account,
pull_image=False,
dry_run=False,
):
"""
Run Docker container by image hash with args set in command line.
Function prints the output of run command in stdout.
"""
if instance is None and args.healthcheck_only:
print("With --healthcheck-only, --instance MUST be provided!", file=sys.stderr)
return 1
if instance is None and not sys.stdin.isatty():
print(
"--instance and --cluster must be specified when using paasta local-run without a tty!",
file=sys.stderr,
)
return 1
soa_dir = args.yelpsoa_config_root
volumes = args.volumes
load_deployments = (docker_url is None or pull_image) and not docker_sha
interactive = args.interactive
try:
if instance is None:
instance_type = "adhoc"
instance = "interactive"
instance_config = get_default_interactive_config(
service=service,
cluster=cluster,
soa_dir=soa_dir,
load_deployments=load_deployments,
)
interactive = True
else:
instance_type = validate_service_instance(
service, instance, cluster, soa_dir
)
instance_config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
load_deployments=load_deployments,
soa_dir=soa_dir,
)
except NoConfigurationForServiceError as e:
print(str(e), file=sys.stderr)
return 1
except NoDeploymentsAvailable:
print(
PaastaColors.red(
"Error: No deployments.json found in %(soa_dir)s/%(service)s. "
"You can generate this by running: "
"generate_deployments_for_service -d %(soa_dir)s -s %(service)s"
% {"soa_dir": soa_dir, "service": service}
),
sep="\n",
file=sys.stderr,
)
return 1
if docker_sha is not None:
instance_config.branch_dict = {
"git_sha": docker_sha,
"docker_image": build_docker_image_name(service=service, sha=docker_sha),
"desired_state": "start",
"force_bounce": None,
}
if docker_url is None:
try:
docker_url = instance_config.get_docker_url()
except NoDockerImageError:
if instance_config.get_deploy_group() is None:
print(
PaastaColors.red(
f"Error: {service}.{instance} has no 'deploy_group' set. Please set one so "
"the proper image can be used to run for this service."
),
sep="",
file=sys.stderr,
)
else:
print(
PaastaColors.red(
"Error: No sha has been marked for deployment for the %s deploy group.\n"
"Please ensure this service has either run through a jenkins pipeline "
"or paasta mark-for-deployment has been run for %s\n"
% (instance_config.get_deploy_group(), service)
),
sep="",
file=sys.stderr,
)
return 1
if pull_image:
docker_pull_image(docker_url)
for volume in instance_config.get_volumes(
system_paasta_config.get_volumes(),
):
if os.path.exists(volume["hostPath"]):
volumes.append(
"{}:{}:{}".format(
volume["hostPath"], volume["containerPath"], volume["mode"].lower()
)
)
else:
print(
PaastaColors.yellow(
"Warning: Path %s does not exist on this host. Skipping this binding."
% volume["hostPath"]
),
file=sys.stderr,
)
if interactive is True and args.cmd is None:
command = "bash"
elif args.cmd:
command = args.cmd
else:
command_from_config = instance_config.get_cmd()
if command_from_config:
command = format_command_for_type(
command=command_from_config, instance_type=instance_type, date=args.date
)
else:
command = instance_config.get_args()
secret_provider_kwargs = {
"vault_cluster_config": system_paasta_config.get_vault_cluster_config(),
"vault_auth_method": args.vault_auth_method,
"vault_token_file": args.vault_token_file,
}
return run_docker_container(
docker_client=docker_client,
service=service,
instance=instance,
docker_url=docker_url,
volumes=volumes,
interactive=interactive,
command=command,
healthcheck=args.healthcheck,
healthcheck_only=args.healthcheck_only,
user_port=args.user_port,
instance_config=instance_config,
soa_dir=args.yelpsoa_config_root,
dry_run=dry_run,
json_dict=args.dry_run_json_dict,
framework=instance_type,
secret_provider_name=system_paasta_config.get_secret_provider_name(),
secret_provider_kwargs=secret_provider_kwargs,
skip_secrets=args.skip_secrets,
assume_pod_identity=args.assume_pod_identity,
assume_role_arn=args.assume_role_arn,
assume_role_aws_account=assume_role_aws_account,
use_okta_role=args.use_okta_role,
use_service_auth_token=args.use_service_auth_token,
use_sso_service_auth_token=args.use_sso_service_auth_token,
)
|
Run Docker container by image hash with args set in command line.
Function prints the output of run command in stdout.
|
configure_and_run_docker_container
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/local_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/local_run.py
|
Apache-2.0
|
def build_component_descriptions(components: Mapping[str, Mapping[str, Any]]) -> str:
"""Returns a colored description string for every log component
based on its help attribute"""
output = []
for k, v in components.items():
output.append(" {}: {}".format(v["color"](k), v["help"]))
return "\n".join(output)
|
Returns a colored description string for every log component
based on its help attribute
|
build_component_descriptions
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def check_timestamp_in_range(
timestamp: datetime.datetime,
start_time: datetime.datetime,
end_time: datetime.datetime,
) -> bool:
"""A convenience function to check if a datetime.datetime timestamp is within the given start and end times,
returns true if start_time or end_time is None
:param timestamp: The timestamp to check
:param start_time: The start of the interval
:param end_time: The end of the interval
:return: True if timestamp is within start_time and end_time range, False otherwise
"""
if timestamp is not None and start_time is not None and end_time is not None:
if timestamp.tzinfo is None:
timestamp = pytz.utc.localize(timestamp)
return start_time < timestamp < end_time
else:
return True
|
A convenience function to check if a datetime.datetime timestamp is within the given start and end times,
returns true if start_time or end_time is None
:param timestamp: The timestamp to check
:param start_time: The start of the interval
:param end_time: The end of the interval
:return: True if timestamp is within start_time and end_time range, False otherwise
|
check_timestamp_in_range
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def paasta_log_line_passes_filter(
line: str,
levels: Sequence[str],
service: str,
components: Iterable[str],
clusters: Sequence[str],
instances: List[str],
pods: Iterable[str] = None,
start_time: datetime.datetime = None,
end_time: datetime.datetime = None,
) -> bool:
"""Given a (JSON-formatted) log line, return True if the line should be
displayed given the provided levels, components, and clusters; return False
otherwise.
NOTE: Pods are optional as services that use Mesos do not operate with pods.
"""
try:
parsed_line = json.loads(line)
except ValueError:
log.debug("Trouble parsing line as json. Skipping. Line: %r" % line)
return False
if (
(instances is None or parsed_line.get("instance") in instances)
and (parsed_line.get("level") is None or parsed_line.get("level") in levels)
and parsed_line.get("component") in components
and (
parsed_line.get("cluster") in clusters
or parsed_line.get("cluster") == ANY_CLUSTER
)
):
timestamp = isodate.parse_datetime(parsed_line.get("timestamp"))
if check_timestamp_in_range(timestamp, start_time, end_time):
return True
return False
|
Given a (JSON-formatted) log line, return True if the line should be
displayed given the provided levels, components, and clusters; return False
otherwise.
NOTE: Pods are optional as services that use Mesos do not operate with pods.
|
paasta_log_line_passes_filter
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def extract_utc_timestamp_from_log_line(line: str) -> datetime.datetime:
"""
Extracts the timestamp from a log line of the format "<timestamp> <other data>" and returns a UTC datetime object
or None if it could not parse the line
"""
# Extract ISO 8601 date per http://www.pelagodesign.com/blog/2009/05/20/iso-8601-date-validation-that-doesnt-suck/
iso_re = (
r"^([\+-]?\d{4}(?!\d{2}\b))((-?)((0[1-9]|1[0-2])(\3([12]\d|0[1-9]|3[01]))?|W([0-4]\d|5[0-2])(-?[1-7])?|"
r"(00[1-9]|0[1-9]\d|[12]\d{2}|3([0-5]\d|6[1-6])))([T\s]((([01]\d|2[0-3])((:?)[0-5]\d)?|24\:?00)([\.,]\d+"
r"(?!:))?)?(\17[0-5]\d([\.,]\d+)?)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?)?)? "
)
tokens = re.match(iso_re, line)
if not tokens:
# Could not parse line
return None
timestamp = tokens.group(0).strip()
dt = isodate.parse_datetime(timestamp)
utc_timestamp = datetime_convert_timezone(dt, dt.tzinfo, tz.tzutc())
return utc_timestamp
|
Extracts the timestamp from a log line of the format "<timestamp> <other data>" and returns a UTC datetime object
or None if it could not parse the line
|
extract_utc_timestamp_from_log_line
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def print_log(
line: str,
requested_levels: Sequence[str],
raw_mode: bool = False,
strip_headers: bool = False,
) -> None:
"""Mostly a stub to ease testing. Eventually this may do some formatting or
something.
"""
if raw_mode:
# suppress trailing newline since scribereader already attached one
print(line, end=" ", flush=True)
else:
print(
prettify_log_line(line, requested_levels, strip_headers),
flush=True,
)
|
Mostly a stub to ease testing. Eventually this may do some formatting or
something.
|
print_log
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def prettify_timestamp(timestamp: datetime.datetime) -> str:
"""Returns more human-friendly form of 'timestamp' without microseconds and
in local time.
"""
dt = isodate.parse_datetime(timestamp)
pretty_timestamp = datetime_from_utc_to_local(dt)
return pretty_timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
Returns more human-friendly form of 'timestamp' without microseconds and
in local time.
|
prettify_timestamp
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def prettify_log_line(
line: str, requested_levels: Sequence[str], strip_headers: bool
) -> str:
"""Given a line from the log, which is expected to be JSON and have all the
things we expect, return a pretty formatted string containing relevant values.
"""
try:
parsed_line = json.loads(line)
except ValueError:
log.debug("Trouble parsing line as json. Skipping. Line: %r" % line)
return "Invalid JSON: %s" % line
try:
if strip_headers:
return "%(timestamp)s %(message)s" % (
{
"timestamp": prettify_timestamp(parsed_line["timestamp"]),
"message": parsed_line["message"],
}
)
else:
return "%(timestamp)s %(component)s - %(message)s" % (
{
"timestamp": prettify_timestamp(parsed_line["timestamp"]),
"component": prettify_component(parsed_line["component"]),
"message": parsed_line["message"],
}
)
except KeyError:
log.debug(
"JSON parsed correctly but was missing a key. Skipping. Line: %r" % line
)
return "JSON missing keys: %s" % line
|
Given a line from the log, which is expected to be JSON and have all the
things we expect, return a pretty formatted string containing relevant values.
|
prettify_log_line
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def register_log_reader(name):
"""Returns a decorator that registers a log reader class at a given name
so get_log_reader_classes can find it."""
def outer(log_reader_class):
_log_reader_classes[name] = log_reader_class
return log_reader_class
return outer
|
Returns a decorator that registers a log reader class at a given name
so get_log_reader_classes can find it.
|
register_log_reader
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def run_code_over_scribe_envs(
self,
clusters: Sequence[str],
components: Iterable[str],
callback: Callable[..., None],
) -> None:
"""Iterates over the scribe environments for a given set of clusters and components, executing
functions for each component
:param clusters: The set of clusters
:param components: The set of components
:param callback: The callback function. Gets called with (component_name, stream_info, scribe_env, cluster)
The cluster field will only be set if the component is set to per_cluster
"""
scribe_envs: Set[str] = set()
for cluster in clusters:
scribe_envs.update(self.determine_scribereader_envs(components, cluster))
log.debug("Connect to these scribe envs to tail scribe logs: %s" % scribe_envs)
for scribe_env in scribe_envs:
# These components all get grouped in one call for backwards compatibility
grouped_components = {"build", "deploy", "monitoring"}
if any([component in components for component in grouped_components]):
stream_info = self.get_stream_info("default")
callback(components, stream_info, scribe_env, cluster=None)
non_defaults = set(components) - grouped_components
for component in non_defaults:
stream_info = self.get_stream_info(component)
if stream_info.per_cluster:
for cluster in clusters:
callback([component], stream_info, scribe_env, cluster=cluster)
else:
callback([component], stream_info, scribe_env, cluster=None)
|
Iterates over the scribe environments for a given set of clusters and components, executing
functions for each component
:param clusters: The set of clusters
:param components: The set of components
:param callback: The callback function. Gets called with (component_name, stream_info, scribe_env, cluster)
The cluster field will only be set if the component is set to per_cluster
|
run_code_over_scribe_envs
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def tail_logs(
self,
service: str,
levels: Sequence[str],
components: Iterable[str],
clusters: Sequence[str],
instances: List[str],
pods: Iterable[str] = None,
raw_mode: bool = False,
strip_headers: bool = False,
) -> None:
"""Sergeant function for spawning off all the right log tailing functions.
NOTE: This function spawns concurrent processes and doesn't necessarily
worry about cleaning them up! That's because we expect to just exit the
main process when this function returns (as main() does). Someone calling
this function directly with something like "while True: tail_paasta_logs()"
may be very sad.
NOTE: We try pretty hard to suppress KeyboardInterrupts to prevent big
useless stack traces, but it turns out to be non-trivial and we fail ~10%
of the time. We decided we could live with it and we're shipping this to
see how it fares in real world testing.
Here are some things we read about this problem:
* http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
* http://jtushman.github.io/blog/2014/01/14/python-%7C-multiprocessing-and-interrupts/
* http://bryceboe.com/2010/08/26/python-multiprocessing-and-keyboardinterrupt/
We could also try harder to terminate processes from more places. We could
use process.join() to ensure things have a chance to die. We punted these
things.
It's possible this whole multiprocessing strategy is wrong-headed. If you
are reading this code to curse whoever wrote it, see discussion in
PAASTA-214 and https://reviewboard.yelpcorp.com/r/87320/ and feel free to
implement one of the other options.
"""
queue: Queue = Queue()
spawned_processes = []
def callback(
components: Iterable[str],
stream_info: ScribeComponentStreamInfo,
scribe_env: str,
cluster: str,
) -> None:
kw = {
"scribe_env": scribe_env,
"service": service,
"levels": levels,
"components": components,
"clusters": clusters,
"instances": instances,
"pods": pods,
"queue": queue,
"filter_fn": stream_info.filter_fn,
}
if stream_info.per_cluster:
kw["stream_name"] = stream_info.stream_name_fn(service, cluster)
kw["clusters"] = [cluster]
else:
kw["stream_name"] = stream_info.stream_name_fn(service)
log.debug(
"Running the equivalent of 'scribereader {} {} {}'".format(
self.get_scribereader_selector(scribe_env),
scribe_env,
kw["stream_name"],
)
)
process = Process(target=self.scribe_tail, kwargs=kw)
spawned_processes.append(process)
process.start()
self.run_code_over_scribe_envs(
clusters=clusters, components=components, callback=callback
)
# Pull things off the queue and output them. If any thread dies we are no
# longer presenting the user with the full picture so we quit.
#
# This is convenient for testing, where a fake scribe_tail() can emit a
# fake log and exit. Without the thread aliveness check, we would just sit
# here forever even though the threads doing the tailing are all gone.
#
# NOTE: A noisy tailer in one scribe_env (such that the queue never gets
# empty) will prevent us from ever noticing that another tailer has died.
while True:
try:
# This is a blocking call with a timeout for a couple reasons:
#
# * If the queue is empty and we get_nowait(), we loop very tightly
# and accomplish nothing.
#
# * Testing revealed a race condition where print_log() is called
# and even prints its message, but this action isn't recorded on
# the patched-in print_log(). This resulted in test flakes. A short
# timeout seems to soothe this behavior: running this test 10 times
# with a timeout of 0.0 resulted in 2 failures; running it with a
# timeout of 0.1 resulted in 0 failures.
#
# * There's a race where thread1 emits its log line and exits
# before thread2 has a chance to do anything, causing us to bail
# out via the Queue Empty and thread aliveness check.
#
# We've decided to live with this for now and see if it's really a
# problem. The threads in test code exit pretty much immediately
# and a short timeout has been enough to ensure correct behavior
# there, so IRL with longer start-up times for each thread this
# will surely be fine.
#
# UPDATE: Actually this is leading to a test failure rate of about
# 1/10 even with timeout of 1s. I'm adding a sleep to the threads
# in test code to smooth this out, then pulling the trigger on
# moving that test to integration land where it belongs.
line = queue.get(block=True, timeout=0.1)
print_log(line, levels, raw_mode, strip_headers)
except Empty:
try:
# If there's nothing in the queue, take this opportunity to make
# sure all the tailers are still running.
running_processes = [tt.is_alive() for tt in spawned_processes]
if not running_processes or not all(running_processes):
log.warning(
"Quitting because I expected %d log tailers to be alive but only %d are alive."
% (len(spawned_processes), running_processes.count(True))
)
for process in spawned_processes:
if process.is_alive():
process.terminate()
break
except KeyboardInterrupt:
# Die peacefully rather than printing N threads worth of stack
# traces.
#
# This extra nested catch is because it's pretty easy to be in
# the above try block when the user hits Ctrl-C which otherwise
# dumps a stack trace.
log.warning("Terminating.")
break
except KeyboardInterrupt:
# Die peacefully rather than printing N threads worth of stack
# traces.
log.warning("Terminating.")
break
|
Sergeant function for spawning off all the right log tailing functions.
NOTE: This function spawns concurrent processes and doesn't necessarily
worry about cleaning them up! That's because we expect to just exit the
main process when this function returns (as main() does). Someone calling
this function directly with something like "while True: tail_paasta_logs()"
may be very sad.
NOTE: We try pretty hard to suppress KeyboardInterrupts to prevent big
useless stack traces, but it turns out to be non-trivial and we fail ~10%
of the time. We decided we could live with it and we're shipping this to
see how it fares in real world testing.
Here are some things we read about this problem:
* http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
* http://jtushman.github.io/blog/2014/01/14/python-%7C-multiprocessing-and-interrupts/
* http://bryceboe.com/2010/08/26/python-multiprocessing-and-keyboardinterrupt/
We could also try harder to terminate processes from more places. We could
use process.join() to ensure things have a chance to die. We punted these
things.
It's possible this whole multiprocessing strategy is wrong-headed. If you
are reading this code to curse whoever wrote it, see discussion in
PAASTA-214 and https://reviewboard.yelpcorp.com/r/87320/ and feel free to
implement one of the other options.
|
tail_logs
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def scribe_tail(
self,
scribe_env: str,
stream_name: str,
service: str,
levels: Sequence[str],
components: Iterable[str],
clusters: Sequence[str],
instances: List[str],
pods: Iterable[str],
queue: Queue,
filter_fn: Callable,
parse_fn: Callable = None,
) -> None:
"""Creates a scribetailer for a particular environment.
When it encounters a line that it should report, it sticks it into the
provided queue.
This code is designed to run in a thread as spawned by tail_paasta_logs().
"""
try:
log.debug(f"Going to tail {stream_name} scribe stream in {scribe_env}")
host, port = scribereader.get_tail_host_and_port(
**scribe_env_to_locations(scribe_env),
)
tailer = scribereader.get_stream_tailer(stream_name, host, port)
for line in tailer:
if parse_fn:
line = parse_fn(line, clusters, service)
if filter_fn(
line, levels, service, components, clusters, instances, pods
):
queue.put(line)
except KeyboardInterrupt:
# Die peacefully rather than printing N threads worth of stack
# traces.
pass
except StreamTailerSetupError as e:
if "No data in stream" in str(e):
log.warning(f"Scribe stream {stream_name} is empty on {scribe_env}")
log.warning(
"Don't Panic! This may or may not be a problem depending on if you expect there to be"
)
log.warning("output within this stream.")
# Enter a wait so the process isn't considered dead.
# This is just a large number, since apparently some python interpreters
# don't like being passed sys.maxsize.
sleep(2**16)
else:
raise
|
Creates a scribetailer for a particular environment.
When it encounters a line that it should report, it sticks it into the
provided queue.
This code is designed to run in a thread as spawned by tail_paasta_logs().
|
scribe_tail
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def determine_scribereader_envs(
self, components: Iterable[str], cluster: str
) -> Set[str]:
"""Returns a list of environments that scribereader needs to connect
to based on a given list of components and the cluster involved.
Some components are in certain environments, regardless of the cluster.
Some clusters do not match up with the scribe environment names, so
we figure that out here"""
envs: List[str] = []
for component in components:
# If a component has a 'source_env', we use that
# otherwise we lookup what scribe env is associated with a given cluster
env = LOG_COMPONENTS[component].get(
"source_env", self.cluster_to_scribe_env(cluster)
)
if "additional_source_envs" in LOG_COMPONENTS[component]:
envs += LOG_COMPONENTS[component]["additional_source_envs"]
envs.append(env)
return set(envs)
|
Returns a list of environments that scribereader needs to connect
to based on a given list of components and the cluster involved.
Some components are in certain environments, regardless of the cluster.
Some clusters do not match up with the scribe environment names, so
we figure that out here
|
determine_scribereader_envs
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def cluster_to_scribe_env(self, cluster: str) -> str:
"""Looks up the particular scribe env associated with a given paasta cluster.
Scribe has its own "environment" key, which doesn't always map 1:1 with our
cluster names, so we have to maintain a manual mapping.
This mapping is deployed as a config file via puppet as part of the public
config deployed to every server.
"""
env = self.cluster_map.get(cluster, None)
if env is None:
print("I don't know where scribe logs for %s live?" % cluster)
sys.exit(1)
else:
return env
|
Looks up the particular scribe env associated with a given paasta cluster.
Scribe has its own "environment" key, which doesn't always map 1:1 with our
cluster names, so we have to maintain a manual mapping.
This mapping is deployed as a config file via puppet as part of the public
config deployed to every server.
|
cluster_to_scribe_env
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def scribe_env_to_locations(scribe_env) -> Mapping[str, Any]:
"""Converts a scribe environment to a dictionary of locations. The
return value is meant to be used as kwargs for `scribereader.get_tail_host_and_port`.
"""
locations = {"ecosystem": None, "region": None, "superregion": None}
if scribe_env in scribereader.PROD_REGIONS:
locations["region"] = scribe_env
elif scribe_env in scribereader.PROD_SUPERREGIONS:
locations["superregion"] = scribe_env
else: # non-prod envs are expressed as ecosystems
locations["ecosystem"] = scribe_env
return locations
|
Converts a scribe environment to a dictionary of locations. The
return value is meant to be used as kwargs for `scribereader.get_tail_host_and_port`.
|
scribe_env_to_locations
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def generate_start_end_time(
from_string: str = "30m", to_string: str = None
) -> Tuple[datetime.datetime, datetime.datetime]:
"""Parses the --from and --to command line arguments to create python
datetime objects representing the start and end times for log retrieval
:param from_string: The --from argument, defaults to 30 minutes
:param to_string: The --to argument, defaults to the time right now
:return: A tuple containing start_time, end_time, which specify the interval of log retrieval
"""
if to_string is None:
end_time = datetime.datetime.utcnow()
else:
# Try parsing as a a natural time duration first, if that fails move on to
# parsing as an ISO-8601 timestamp
to_duration = timeparse(to_string)
if to_duration is not None:
end_time = datetime.datetime.utcnow() - datetime.timedelta(
seconds=to_duration
)
else:
end_time = isodate.parse_datetime(to_string)
if not end_time:
raise ValueError(
"--to argument not in ISO8601 format and not a valid pytimeparse duration"
)
from_duration = timeparse(from_string)
if from_duration is not None:
start_time = datetime.datetime.utcnow() - datetime.timedelta(
seconds=from_duration
)
else:
start_time = isodate.parse_datetime(from_string)
if not start_time:
raise ValueError(
"--from argument not in ISO8601 format and not a valid pytimeparse duration"
)
# Covert the timestamps to something timezone aware
start_time = pytz.utc.localize(start_time)
end_time = pytz.utc.localize(end_time)
if start_time > end_time:
raise ValueError("Start time bigger than end time")
return start_time, end_time
|
Parses the --from and --to command line arguments to create python
datetime objects representing the start and end times for log retrieval
:param from_string: The --from argument, defaults to 30 minutes
:param to_string: The --to argument, defaults to the time right now
:return: A tuple containing start_time, end_time, which specify the interval of log retrieval
|
generate_start_end_time
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def paasta_logs(args: argparse.Namespace) -> int:
"""Print the logs for as Paasta service.
:param args: argparse.Namespace obj created from sys.args by cli"""
soa_dir = args.soa_dir
service = figure_out_service_name(args, soa_dir)
clusters = args.cluster
if (
args.cluster is None
or args.instance is None
or len(args.instance.split(",")) > 2
):
print(
PaastaColors.red("You must specify one cluster and one instance."),
file=sys.stderr,
)
return 1
if verify_instances(args.instance, service, clusters, soa_dir):
return 1
instance = args.instance
if args.pods is None:
pods = None
else:
pods = args.pods.split(",")
components = args.components
if "app_output" in args.components:
components.remove("app_output")
components.add("stdout")
components.add("stderr")
if args.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
levels = [DEFAULT_LOGLEVEL, "debug"]
log.debug(f"Going to get logs for {service} on cluster {clusters}")
log_reader = get_log_reader(components)
if not validate_filtering_args(args, log_reader):
return 1
# They haven't specified what kind of filtering they want, decide for them
if args.line_count is None and args.time_from is None and not args.tail:
return pick_default_log_mode(
args, log_reader, service, levels, components, clusters, instance, pods
)
if args.tail:
print(
PaastaColors.cyan("Tailing logs and applying filters..."), file=sys.stderr
)
log_reader.tail_logs(
service=service,
levels=levels,
components=components,
clusters=clusters,
instances=[instance],
pods=pods,
raw_mode=args.raw_mode,
strip_headers=args.strip_headers,
)
return 0
# If the logger doesn't support offsetting the number of lines by a particular line number
# there is no point in distinguishing between a positive/negative number of lines since it
# can only get the last N lines
if not log_reader.SUPPORTS_LINE_OFFSET and args.line_count is not None:
args.line_count = abs(args.line_count)
# Handle line based filtering
if args.line_count is not None and args.line_offset is None:
log_reader.print_last_n_logs(
service=service,
line_count=args.line_count,
levels=levels,
components=components,
clusters=clusters,
instances=[instance],
pods=pods,
raw_mode=args.raw_mode,
strip_headers=args.strip_headers,
)
return 0
elif args.line_count is not None and args.line_offset is not None:
log_reader.print_logs_by_offset(
service=service,
line_count=args.line_count,
line_offset=args.line_offset,
levels=levels,
components=components,
clusters=clusters,
instances=[instance],
pods=pods,
raw_mode=args.raw_mode,
strip_headers=args.strip_headers,
)
return 0
# Handle time based filtering
try:
start_time, end_time = generate_start_end_time(args.time_from, args.time_to)
except ValueError as e:
print(PaastaColors.red(str(e)), file=sys.stderr)
return 1
log_reader.print_logs_by_time(
service=service,
start_time=start_time,
end_time=end_time,
levels=levels,
components=components,
clusters=clusters,
instances=[instance],
pods=pods,
raw_mode=args.raw_mode,
strip_headers=args.strip_headers,
)
return 0
|
Print the logs for as Paasta service.
:param args: argparse.Namespace obj created from sys.args by cli
|
paasta_logs
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/logs.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/logs.py
|
Apache-2.0
|
def can_run_metric_watcher_threads(
service: str,
soa_dir: str,
) -> bool:
"""
Cannot run slo and metric watcher threads together for now.
SLO Watcher Threads take precedence over metric watcher threads.
Metric Watcher Threads can run if there are no SLOs available.
"""
slo_files = get_files_of_type_in_dir(
file_type="slo", service=service, soa_dir=soa_dir
)
rollback_files = get_files_of_type_in_dir(
file_type="rollback", service=service, soa_dir=soa_dir
)
return bool(not slo_files and rollback_files)
|
Cannot run slo and metric watcher threads together for now.
SLO Watcher Threads take precedence over metric watcher threads.
Metric Watcher Threads can run if there are no SLOs available.
|
can_run_metric_watcher_threads
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/mark_for_deployment.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/mark_for_deployment.py
|
Apache-2.0
|
def get_slack_channel(self) -> str:
"""Safely get some slack channel to post to. Defaults to ``DEFAULT_SLACK_CHANNEL``.
Currently only uses the first slack channel available, and doesn't support
multi-channel notifications."""
if self.deploy_info.get("slack_notify", True):
try:
channel = self.deploy_info.get("slack_channels")[0]
# Nightly jenkins builds will often re-deploy master. This causes Slack noise that wasn't present before
# the auto-rollbacks work.
if self.deployment_version == self.old_deployment_version:
print(
f"Rollback image matches rollforward image: {self.deployment_version}, "
f"Sending slack notifications to {DEFAULT_SLACK_CHANNEL} instead of {channel}."
)
return DEFAULT_SLACK_CHANNEL
else:
return channel
except (IndexError, AttributeError, TypeError):
return DEFAULT_SLACK_CHANNEL
else:
return DEFAULT_SLACK_CHANNEL
|
Safely get some slack channel to post to. Defaults to ``DEFAULT_SLACK_CHANNEL``.
Currently only uses the first slack channel available, and doesn't support
multi-channel notifications.
|
get_slack_channel
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/mark_for_deployment.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/mark_for_deployment.py
|
Apache-2.0
|
def paasta_pause_service_autoscaler(args):
"""With a given cluster and duration, pauses the paasta service autoscaler
in that cluster for duration minutes"""
if args.duration > MAX_PAUSE_DURATION:
if not args.force:
print(
"Specified duration: {d} longer than max: {m}".format(
d=args.duration, m=MAX_PAUSE_DURATION
)
)
print("If you are really sure, run again with --force")
return 3
if args.info:
return_code = get_service_autoscale_pause_time(args.cluster)
elif args.resume:
return_code = delete_service_autoscale_pause_time(args.cluster)
_log_audit(action="resume-service-autoscaler", cluster=args.cluster)
else:
minutes = args.duration
return_code = update_service_autoscale_pause_time(args.cluster, minutes)
_log_audit(
action="pause-service-autoscaler",
action_details={"duration": minutes},
cluster=args.cluster,
)
return return_code
|
With a given cluster and duration, pauses the paasta service autoscaler
in that cluster for duration minutes
|
paasta_pause_service_autoscaler
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/pause_service_autoscaler.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/pause_service_autoscaler.py
|
Apache-2.0
|
def is_docker_image_already_in_registry(service: str, soa_dir: str, sha: str, image_version: Optional[str] = None) -> bool: # type: ignore
"""Verifies that docker image exists in the paasta registry.
:param service: name of the service
:param sha: git sha
:returns: True, False or raises requests.exceptions.RequestException
"""
registry_uri = get_service_docker_registry(service, soa_dir)
repository, tag = build_docker_image_name(service, sha, image_version).split(":", 1)
creds = read_docker_registry_creds(registry_uri)
uri = f"{registry_uri}/v2/{repository}/manifests/{tag}"
with requests.Session() as s:
try:
url = "https://" + uri
r = (
s.head(url, timeout=30)
if creds[0] is None
else s.head(url, auth=creds, timeout=30)
)
except SSLError:
# If no auth creds, fallback to trying http
if creds[0] is not None:
raise
url = "http://" + uri
r = s.head(url, timeout=30)
if r.status_code == 200:
return True
elif r.status_code == 404:
return False # No Such Repository Error
r.raise_for_status()
|
Verifies that docker image exists in the paasta registry.
:param service: name of the service
:param sha: git sha
:returns: True, False or raises requests.exceptions.RequestException
|
is_docker_image_already_in_registry
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/push_to_registry.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/push_to_registry.py
|
Apache-2.0
|
def get_versions_for_service(
service: str, deploy_groups: Collection[str], soa_dir: str
) -> Mapping[DeploymentVersion, Tuple[str, str]]:
"""Returns a dictionary of 2-tuples of the form (timestamp, deploy_group) for each version tuple of (deploy sha, image_version)"""
if service is None:
return {}
git_url = get_git_url(service=service, soa_dir=soa_dir)
all_deploy_groups = list_deploy_groups(service=service, soa_dir=soa_dir)
deploy_groups, _ = validate_given_deploy_groups(all_deploy_groups, deploy_groups)
previously_deployed_versions: Dict[DeploymentVersion, Tuple[str, str]] = {}
for ref, sha in list_remote_refs(git_url).items():
regex_match = extract_tags(ref)
try:
deploy_group = regex_match["deploy_group"]
tstamp = regex_match["tstamp"]
image_version = regex_match["image_version"]
except KeyError:
pass
else:
# Now we filter and dedup by picking the most recent sha for a deploy group
# Note that all strings are greater than ''
if deploy_group in deploy_groups:
version = DeploymentVersion(sha=sha, image_version=image_version)
tstamp_so_far = previously_deployed_versions.get(version, ("all", ""))[
1
]
if tstamp > tstamp_so_far:
previously_deployed_versions[version] = (tstamp, deploy_group)
return previously_deployed_versions
|
Returns a dictionary of 2-tuples of the form (timestamp, deploy_group) for each version tuple of (deploy sha, image_version)
|
get_versions_for_service
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/rollback.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/rollback.py
|
Apache-2.0
|
def paasta_rollback(args: argparse.Namespace) -> int:
"""Call mark_for_deployment with rollback parameters
:param args: contains all the arguments passed onto the script: service,
deploy groups and sha. These arguments will be verified and passed onto
mark_for_deployment.
"""
soa_dir = args.soa_dir
service = figure_out_service_name(args, soa_dir)
deploy_info = get_deploy_info(service=service, soa_dir=args.soa_dir)
if not can_user_deploy_service(deploy_info, service):
return 1
git_url = get_git_url(service, soa_dir)
if args.all_deploy_groups:
given_deploy_groups = list_deploy_groups(service=service, soa_dir=soa_dir)
else:
given_deploy_groups = {
deploy_group
for deploy_group in args.deploy_groups.split(",")
if deploy_group
}
all_deploy_groups = list_deploy_groups(service=service, soa_dir=soa_dir)
deploy_groups, invalid = validate_given_deploy_groups(
all_deploy_groups, given_deploy_groups
)
if len(invalid) > 0:
print(
PaastaColors.yellow(
"These deploy groups are not valid and will be skipped: %s.\n"
% (",").join(invalid)
)
)
if len(deploy_groups) == 0 and not args.all_deploy_groups:
print(
PaastaColors.red(
"ERROR: No valid deploy groups specified for %s.\n Use the flag -a to rollback all valid deploy groups for this service"
% (service)
)
)
return 1
versions = get_versions_for_service(service, deploy_groups, soa_dir)
commit = args.commit
image_version = args.image_version
new_version = DeploymentVersion(sha=commit, image_version=image_version)
if not commit:
print("Please specify a commit to mark for rollback (-k, --commit).")
list_previous_versions(
service, deploy_groups, bool(given_deploy_groups), versions
)
return 1
elif new_version not in versions and not args.force:
print(
PaastaColors.red(
f"This version {new_version} has never been deployed before."
)
)
print("Please double check it or use --force to skip this verification.\n")
list_previous_versions(
service, deploy_groups, bool(given_deploy_groups), versions
)
return 1
try:
validate_full_git_sha(args.commit)
except argparse.ArgumentTypeError as e:
print(PaastaColors.red(f"Error: {e}"))
return 1
# TODO: Add similar check for when image_version is empty and no-commit redeploys is enforced for requested deploy_group
returncode = 0
for deploy_group in deploy_groups:
rolled_back_from = get_currently_deployed_version(service, deploy_group)
returncode |= mark_for_deployment(
git_url=git_url,
service=service,
deploy_group=deploy_group,
commit=commit,
image_version=image_version,
)
# we could also gate this by the return code from m-f-d, but we probably care more about someone wanting to
# rollback than we care about if the underlying machinery was successfully able to complete the request
if rolled_back_from != new_version:
audit_action_details = {
"rolled_back_from": str(rolled_back_from),
"rolled_back_to": str(new_version),
"rollback_type": RollbackTypes.USER_INITIATED_ROLLBACK.value,
"deploy_group": deploy_group,
}
_log_audit(
action="rollback", action_details=audit_action_details, service=service
)
if returncode == 0:
print(
PaastaColors.yellow(
f"WARNING: You MUST manually revert changes in Git! Use 'git revert {rolled_back_from.sha}', and go through the normal push process. "
)
)
print(
PaastaColors.yellow(
f"WARNING: Failing to do so means that Jenkins will redeploy the latest code on the next scheduled build!"
)
)
return returncode
|
Call mark_for_deployment with rollback parameters
:param args: contains all the arguments passed onto the script: service,
deploy groups and sha. These arguments will be verified and passed onto
mark_for_deployment.
|
paasta_rollback
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/rollback.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/rollback.py
|
Apache-2.0
|
def _add_and_update_args(parser: argparse.ArgumentParser):
"""common args for `add` and `update`."""
parser.add_argument(
"-p",
"--plain-text",
required=False,
type=str,
help="Optionally specify the secret as a command line argument",
)
parser.add_argument(
"-i",
"--stdin",
required=False,
action="store_true",
default=False,
help="Optionally pass the plaintext from stdin",
)
parser.add_argument(
"--cross-env-motivation",
required=False,
type=str,
help=(
"Provide motivation in case the same value is being duplicated "
"across multiple runtime environments when adding or updating a secret"
),
metavar="MOTIVATION",
)
parser.add_argument(
"-n",
"--secret-name",
type=check_secret_name,
required=True,
help="The name of the secret to create/update, "
"this is the name you will reference in your "
"services yaml files and should "
"be unique per service.",
)
parser.add_argument( # type: ignore
"-c",
"--clusters",
help="A comma-separated list of clusters to create secrets for. "
"Note: this is translated to ecosystems because Vault is run "
"at an ecosystem level. As a result you can only have different "
"secrets per ecosystem. (it is not possible for example to encrypt "
"a different value for pnw-prod vs nova-prod. "
"Defaults to all clusters in which the service runs. "
"For example: --clusters pnw-prod,nova-prod ",
).completer = lazy_choices_completer(list_clusters)
|
common args for `add` and `update`.
|
_add_and_update_args
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/secret.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/secret.py
|
Apache-2.0
|
def get_docker_image(
args: argparse.Namespace, instance_config: InstanceConfig
) -> Optional[str]:
"""
Since the Docker image digest used to launch the Spark cluster is obtained by inspecting local
Docker images, we need to ensure that the Docker image exists locally or is pulled in all scenarios.
"""
# docker image is built locally then pushed
if args.build:
return build_and_push_docker_image(args)
docker_url = ""
if args.image:
docker_url = args.image
else:
try:
docker_url = instance_config.get_docker_url()
except NoDockerImageError:
print(
PaastaColors.red(
"Error: No sha has been marked for deployment for the %s deploy group.\n"
"Please ensure this service has either run through a jenkins pipeline "
"or paasta mark-for-deployment has been run for %s\n"
% (instance_config.get_deploy_group(), args.service)
),
sep="",
file=sys.stderr,
)
return None
print(
"Please wait while the image (%s) is pulled (times out after 5m)..."
% docker_url,
file=sys.stderr,
)
# Need sudo for credentials when pulling images from paasta docker registry (docker-paasta.yelpcorp.com)
# However, in CI env, we can't connect to docker via root and we can pull with user `jenkins`
is_ci_env = "CI" in os.environ
cmd_prefix = "" if is_ci_env else "sudo -H "
retcode, _ = _run(f"{cmd_prefix}docker pull {docker_url}", stream=True, timeout=300)
if retcode != 0:
print(
"\nPull failed. Are you authorized to run docker commands?",
file=sys.stderr,
)
return None
return docker_url
|
Since the Docker image digest used to launch the Spark cluster is obtained by inspecting local
Docker images, we need to ensure that the Docker image exists locally or is pulled in all scenarios.
|
get_docker_image
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/spark_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/spark_run.py
|
Apache-2.0
|
def get_spark_env(
args: argparse.Namespace,
spark_conf_str: str,
aws_creds: Tuple[Optional[str], Optional[str], Optional[str]],
ui_port: str,
system_paasta_config: SystemPaastaConfig,
) -> Dict[str, str]:
"""Create the env config dict to configure on the docker container"""
spark_env = {}
access_key, secret_key, session_token = aws_creds
if access_key:
spark_env["AWS_ACCESS_KEY_ID"] = access_key
spark_env["AWS_SECRET_ACCESS_KEY"] = secret_key
if session_token is not None:
spark_env["AWS_SESSION_TOKEN"] = session_token
spark_env["AWS_DEFAULT_REGION"] = args.aws_region
spark_env["PAASTA_LAUNCHED_BY"] = get_possible_launched_by_user_variable_from_env()
spark_env["PAASTA_INSTANCE_TYPE"] = "spark"
# Run spark (and mesos framework) as root.
spark_env["SPARK_USER"] = "root"
spark_env["SPARK_OPTS"] = spark_conf_str
# Default configs to start the jupyter notebook server
if args.cmd == "jupyter-lab":
spark_env["JUPYTER_RUNTIME_DIR"] = "/source/.jupyter"
spark_env["JUPYTER_DATA_DIR"] = "/source/.jupyter"
spark_env["JUPYTER_CONFIG_DIR"] = "/source/.jupyter"
elif args.cmd == "history-server":
dirs = args.work_dir.split(":")
spark_env["SPARK_LOG_DIR"] = dirs[1]
if not args.spark_args or not args.spark_args.startswith(
"spark.history.fs.logDirectory"
):
print(
"history-server requires spark.history.fs.logDirectory in spark-args",
file=sys.stderr,
)
sys.exit(1)
spark_env["SPARK_HISTORY_OPTS"] = (
f"-D{args.spark_args} " f"-Dspark.history.ui.port={ui_port}"
)
spark_env["SPARK_DAEMON_CLASSPATH"] = "/opt/spark/extra_jars/*"
spark_env["SPARK_NO_DAEMONIZE"] = "true"
if args.get_eks_token_via_iam_user:
with open(SPARK_DRIVER_IAM_USER) as f:
config = ConfigParser()
config.read_file(f)
# these env variables are consumed by a script specified in the spark kubeconfig - and which will result in a tightly-scoped IAM identity being used for EKS cluster access
spark_env["GET_EKS_TOKEN_AWS_ACCESS_KEY_ID"] = config["default"][
"aws_access_key_id"
]
spark_env["GET_EKS_TOKEN_AWS_SECRET_ACCESS_KEY"] = config["default"][
"aws_secret_access_key"
]
spark_env["KUBECONFIG"] = system_paasta_config.get_spark_iam_user_kubeconfig()
else:
spark_env["KUBECONFIG"] = system_paasta_config.get_spark_kubeconfig()
return spark_env
|
Create the env config dict to configure on the docker container
|
get_spark_env
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/spark_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/spark_run.py
|
Apache-2.0
|
def _calculate_docker_shared_memory_size(shm_size: Optional[str]) -> str:
"""In Order of preference:
1. Argument: --docker-shm-size
3. Default
"""
if shm_size:
return shm_size
return DEFAULT_DOCKER_SHM_SIZE
|
In Order of preference:
1. Argument: --docker-shm-size
3. Default
|
_calculate_docker_shared_memory_size
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/spark_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/spark_run.py
|
Apache-2.0
|
def build_and_push_docker_image(args: argparse.Namespace) -> Optional[str]:
"""
Build an image if the default Spark service image is not preferred.
The image needs to be pushed to a registry for the Spark executors
to pull.
"""
if not makefile_responds_to("cook-image"):
print(
"A local Makefile with a 'cook-image' target is required for --build",
file=sys.stderr,
)
return None
default_tag = "{}-{}".format(DEFAULT_SPARK_DOCKER_IMAGE_PREFIX, get_username())
docker_tag = os.environ.get("DOCKER_TAG", default_tag)
os.environ["DOCKER_TAG"] = docker_tag
cook_return = paasta_cook_image(
args=None, service=args.service, soa_dir=args.yelpsoa_config_root
)
if cook_return != 0:
return None
registry_uri = args.docker_registry or _get_adhoc_docker_registry(
service=args.service,
soa_dir=args.yelpsoa_config_root,
)
docker_url = f"{registry_uri}/{docker_tag}"
command = f"docker tag {docker_tag} {docker_url}"
print(PaastaColors.grey(command))
retcode, _ = _run(command, stream=True)
if retcode != 0:
return None
if registry_uri != DEFAULT_SPARK_DOCKER_REGISTRY:
command = "sudo -H docker push %s" % docker_url
else:
command = "docker push %s" % docker_url
print(PaastaColors.grey(command))
retcode, output = _run(command, stream=False)
if retcode != 0:
return None
# With unprivileged docker, the digest on the remote registry may not match the digest
# in the local environment. Because of this, we have to parse the digest message from the
# server response and use downstream when launching spark executors
# Output from `docker push` with unprivileged docker looks like
# Using default tag: latest
# The push refers to repository [docker-dev.yelpcorp.com/paasta-spark-run-dpopes:latest]
# latest: digest: sha256:0a43aa65174a400bd280d48d460b73eb49b0ded4072c9e173f919543bf693557
# With privileged docker, the last line has an extra "size: 123"
# latest: digest: sha256:0a43aa65174a400bd280d48d460b73eb49b0ded4072c9e173f919543bf693557 size: 52
digest_line = output.split("\n")[-1]
digest_match = re.match(r"[^:]*: [^:]*: (?P<digest>[^\s]*)", digest_line)
if not digest_match:
raise ValueError(f"Could not determine digest from output: {output}")
digest = digest_match.group("digest")
image_url = f"{docker_url}@{digest}"
# If the local digest doesn't match the remote digest AND the registry is
# non-default (which requires requires authentication, and consequently sudo),
# downstream `docker run` commands will fail trying to authenticate.
# To work around this, we can proactively `sudo docker pull` here so that
# the image exists locally and can be `docker run` without sudo
if registry_uri != DEFAULT_SPARK_DOCKER_REGISTRY:
command = f"sudo -H docker pull {image_url}"
print(PaastaColors.grey(command))
retcode, output = _run(command, stream=False)
if retcode != 0:
raise NoDockerImageError(f"Could not pull {image_url}: {output}")
return image_url
|
Build an image if the default Spark service image is not preferred.
The image needs to be pushed to a registry for the Spark executors
to pull.
|
build_and_push_docker_image
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/spark_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/spark_run.py
|
Apache-2.0
|
def update_args_from_tronfig(args: argparse.Namespace) -> Optional[Dict[str, str]]:
"""
Load and check the following config fields from the provided Tronfig.
- executor
- pool
- iam_role
- iam_role_provider
- force_spark_resource_configs
- max_runtime
- command
- env
- spark_args
Returns: environment variables dictionary or None if failed.
"""
action_dict = parse_tronfig(args.tronfig, args.job_id)
if action_dict is None:
print(
PaastaColors.red(f"Unable to get configs from job-id: {args.job_id}"),
file=sys.stderr,
)
return None
# executor === spark
if action_dict.get("executor", "") != "spark":
print(
PaastaColors.red("Invalid Tronfig: executor should be 'spark'"),
file=sys.stderr,
)
return None
# iam_role / aws_profile
if (
"iam_role" in action_dict
and action_dict.get("iam_role_provider", "aws") != "aws"
):
print(
PaastaColors.red("Invalid Tronfig: iam_role_provider should be 'aws'"),
file=sys.stderr,
)
return None
# Other args: map Tronfig YAML fields to spark-run CLI args
fields_to_args = {
"pool": "pool",
"iam_role": "assume_aws_role",
"force_spark_resource_configs": "force_spark_resource_configs",
"max_runtime": "timeout_job_runtime",
"command": "cmd",
"spark_args": "spark_args",
}
for field_name, arg_name in fields_to_args.items():
if field_name in action_dict:
value = action_dict[field_name]
# Convert spark_args values from dict to a string "k1=v1 k2=v2"
if field_name == "spark_args":
value = " ".join([f"{k}={v}" for k, v in dict(value).items()])
# Beautify for printing
arg_name_str = (f"--{arg_name.replace('_', '-')}").ljust(31, " ")
# Only load iam_role value if --aws-profile is not set
if field_name == "iam_role" and args.aws_profile is not None:
print(
PaastaColors.yellow(
f"Ignoring Tronfig: `{field_name} : {value}`, since `--aws-profile` is provided. "
f"We are giving higher priority to `--aws-profile` in case of paasta spark-run adhoc runs."
),
)
continue
if hasattr(args, arg_name):
print(
PaastaColors.yellow(
f"Overwriting args with Tronfig: {arg_name_str} => {field_name} : {value}"
),
)
setattr(args, arg_name, value)
# env (currently paasta spark-run does not support Spark driver secrets environment variables)
return action_dict.get("env", dict())
|
Load and check the following config fields from the provided Tronfig.
- executor
- pool
- iam_role
- iam_role_provider
- force_spark_resource_configs
- max_runtime
- command
- env
- spark_args
Returns: environment variables dictionary or None if failed.
|
update_args_from_tronfig
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/spark_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/spark_run.py
|
Apache-2.0
|
def make_mutate_refs_func(service_config, force_bounce, desired_state):
"""Create a function that will inform send_pack that we want to create tags
corresponding to the set of branches passed, with the given force_bounce
and desired_state parameters. These tags will point at the current tip of
the branch they associate with.
dulwich's send_pack wants a function that takes a dictionary of ref name
to sha and returns a modified version of that dictionary. send_pack will
then diff what is returned versus what was passed in, and inform the remote
git repo of our desires."""
def mutate_refs(refs):
deploy_group = service_config.get_deploy_group()
(_, head_sha, _) = get_latest_deployment_tag(refs, deploy_group)
refs[
format_tag(service_config.get_branch(), force_bounce, desired_state)
] = head_sha
return refs
return mutate_refs
|
Create a function that will inform send_pack that we want to create tags
corresponding to the set of branches passed, with the given force_bounce
and desired_state parameters. These tags will point at the current tip of
the branch they associate with.
dulwich's send_pack wants a function that takes a dictionary of ref name
to sha and returns a modified version of that dictionary. send_pack will
then diff what is returned versus what was passed in, and inform the remote
git repo of our desires.
|
make_mutate_refs_func
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/start_stop_restart.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/start_stop_restart.py
|
Apache-2.0
|
def paasta_start_or_stop(args, desired_state):
"""Requests a change of state to start or stop given branches of a service."""
soa_dir = args.soa_dir
pargs = apply_args_filters(args)
if len(pargs) == 0:
return 1
affected_services = {
s for service_list in pargs.values() for s in service_list.keys()
}
if len(affected_services) > 1:
print(
PaastaColors.red("Warning: trying to start/stop/restart multiple services:")
)
for cluster, services_instances in pargs.items():
print("Cluster %s:" % cluster)
for service, instances in services_instances.items():
print(" Service %s:" % service)
print(" Instances %s" % ",".join(instances.keys()))
if sys.stdin.isatty():
confirm = choice.Binary("Are you sure you want to continue?", False).ask()
else:
confirm = False
if not confirm:
print()
print("exiting")
return 1
if not all(
[
can_user_deploy_service(get_deploy_info(service, soa_dir), service)
for service in affected_services
]
):
print(PaastaColors.red("Exiting due to missing deploy permissions"))
return 1
invalid_deploy_groups = []
kubernetes_message_printed = False
affected_flinks = []
if args.clusters is None or args.instances is None:
if confirm_to_continue(pargs.items(), desired_state) is False:
print()
print("exiting")
return 1
for cluster, services_instances in pargs.items():
for service, instances in services_instances.items():
for instance in instances.keys():
service_config = get_instance_config(
service=service,
cluster=cluster,
instance=instance,
soa_dir=soa_dir,
load_deployments=False,
)
if isinstance(service_config, FlinkDeploymentConfig):
affected_flinks.append(service_config)
continue
try:
remote_refs = get_remote_refs(service, soa_dir)
except remote_git.LSRemoteException as e:
msg = (
"Error talking to the git server: %s\n"
"This PaaSTA command requires access to the git server to operate.\n"
"The git server may be down or not reachable from here.\n"
"Try again from somewhere where the git server can be reached, "
"like your developer environment."
) % str(e)
print(msg)
return 1
deploy_group = service_config.get_deploy_group()
(deploy_tag, _, _) = get_latest_deployment_tag(
remote_refs, deploy_group
)
if deploy_tag not in remote_refs:
invalid_deploy_groups.append(deploy_group)
else:
force_bounce = utils.format_timestamp(datetime.datetime.utcnow())
if (
isinstance(service_config, KubernetesDeploymentConfig)
and not kubernetes_message_printed
):
print_kubernetes_message(desired_state)
kubernetes_message_printed = True
issue_state_change_for_service(
service_config=service_config,
force_bounce=force_bounce,
desired_state=desired_state,
)
return_val = 0
# TODO: Refactor to discover if set_state is available for given
# instance_type in API
if affected_flinks:
print_flink_message(desired_state)
system_paasta_config = load_system_paasta_config()
for service_config in affected_flinks:
cluster = service_config.cluster
service = service_config.service
instance = service_config.instance
is_eks = isinstance(service_config, FlinkEksDeploymentConfig)
client = get_paasta_oapi_client(
cluster=get_paasta_oapi_api_clustername(cluster=cluster, is_eks=is_eks),
system_paasta_config=system_paasta_config,
)
if not client:
print("Cannot get a paasta-api client")
exit(1)
try:
client.service.instance_set_state(
service=service,
instance=instance,
desired_state=desired_state,
)
except client.api_error as exc:
print(exc.reason)
return exc.status
return_val = 0
if invalid_deploy_groups:
print(f"No deploy tags found for {', '.join(invalid_deploy_groups)}.")
print(f"Has {service} been deployed there yet?")
return_val = 1
return return_val
|
Requests a change of state to start or stop given branches of a service.
|
paasta_start_or_stop
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/start_stop_restart.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/start_stop_restart.py
|
Apache-2.0
|
def get_actual_deployments(
service: str, soa_dir: str
) -> Mapping[str, DeploymentVersion]:
"""Given a service, return a dict of instances->DeploymentVersions"""
config_loader = PaastaServiceConfigLoader(service=service, soa_dir=soa_dir)
clusters = list_clusters(service=service, soa_dir=soa_dir)
actual_deployments = {}
for cluster in clusters:
for instance_type in DEPLOYMENT_INSTANCE_CONFIG:
for instance_config in config_loader.instance_configs(
cluster=cluster, instance_type_class=instance_type
):
namespace = f"{cluster}.{instance_config.instance}"
actual_deployments[namespace] = get_deployment_version_from_dockerurl(
instance_config.get_docker_image()
)
if not actual_deployments:
print(
f"Warning: it looks like {service} has not been deployed anywhere yet!",
file=sys.stderr,
)
return actual_deployments
|
Given a service, return a dict of instances->DeploymentVersions
|
get_actual_deployments
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/status.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py
|
Apache-2.0
|
def find_instance_types(status: Any) -> List[str]:
"""
find_instance_types finds the instance types from the status api response.
It iterates over all instance type registered in `INSTANCE_TYPE_WRITERS`.
:param status: paasta api status object
:return: the list of matching instance types
"""
types: List[str] = []
for instance_type in INSTANCE_TYPE_WRITERS.keys():
if status.get(instance_type) is not None:
types.append(instance_type)
return types
|
find_instance_types finds the instance types from the status api response.
It iterates over all instance type registered in `INSTANCE_TYPE_WRITERS`.
:param status: paasta api status object
:return: the list of matching instance types
|
find_instance_types
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/status.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py
|
Apache-2.0
|
def report_status_for_cluster(
service: str,
cluster: str,
deploy_pipeline: Sequence[str],
actual_deployments: Mapping[str, DeploymentVersion],
instance_whitelist: Mapping[str, Type[InstanceConfig]],
system_paasta_config: SystemPaastaConfig,
lock: Lock,
verbose: int = 0,
new: bool = False,
all_namespaces: bool = False,
) -> Tuple[int, Sequence[str]]:
"""With a given service and cluster, prints the status of the instances
in that cluster"""
output = ["", "service: %s" % service, "cluster: %s" % cluster]
deployed_instances = []
instances = [
(instance, instance_config_class)
for instance, instance_config_class in instance_whitelist.items()
if instance_config_class in ALLOWED_INSTANCE_CONFIG
]
# Tron instance are not present in the deploy pipeline, so treat them as
# seen by default to avoid error messages
seen_instances = [
instance
for instance, instance_config_class in instance_whitelist.items()
if instance_config_class == TronActionConfig
]
for namespace in deploy_pipeline:
cluster_in_pipeline, instance = namespace.split(".")
seen_instances.append(instance)
if cluster_in_pipeline != cluster:
continue
if instances and instance not in instances:
continue
# Case: service deployed to cluster.instance
if namespace in actual_deployments:
deployed_instances.append(instance)
# Case: flink instances don't use `deployments.json`
elif instance_whitelist.get(instance) == FlinkDeploymentConfig:
deployed_instances.append(instance)
# Case: service NOT deployed to cluster.instance
else:
output.append(" instance: %s" % PaastaColors.red(instance))
output.append(" Git sha: None (not deployed yet)")
return_code = 0
return_codes = []
for deployed_instance, instance_config_class in instances:
return_codes.append(
paasta_status_on_api_endpoint(
cluster=cluster,
service=service,
instance=deployed_instance,
system_paasta_config=system_paasta_config,
lock=lock,
verbose=verbose,
new=new,
all_namespaces=all_namespaces,
is_eks=(instance_config_class in EKS_DEPLOYMENT_CONFIGS),
)
)
if any(return_codes):
return_code = 1
output.append(
report_invalid_whitelist_values(
whitelist=[instance[0] for instance in instances],
items=seen_instances,
item_type="instance",
)
)
return return_code, output
|
With a given service and cluster, prints the status of the instances
in that cluster
|
report_status_for_cluster
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/status.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py
|
Apache-2.0
|
def report_invalid_whitelist_values(
whitelist: Iterable[str], items: Sequence[str], item_type: str
) -> str:
"""Warns the user if there are entries in ``whitelist`` which don't
correspond to any item in ``items``. Helps highlight typos.
"""
return_string = ""
bogus_entries = []
if whitelist is None:
return ""
for entry in whitelist:
if entry not in items:
bogus_entries.append(entry)
if len(bogus_entries) > 0:
return_string = (
"\n" "Warning: This service does not have any %s matching these names:\n%s"
) % (item_type, ",".join(bogus_entries))
return return_string
|
Warns the user if there are entries in ``whitelist`` which don't
correspond to any item in ``items``. Helps highlight typos.
|
report_invalid_whitelist_values
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/status.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py
|
Apache-2.0
|
def get_filters(
args,
) -> Sequence[Callable[[InstanceConfig], bool]]:
"""Figures out which filters to apply from an args object, and returns them
:param args: args object
:returns: list of functions that take an instance config and returns if the instance conf matches the filter
"""
filters = []
if args.service:
filters.append(lambda conf: conf.get_service() in args.service.split(","))
if args.clusters:
filters.append(lambda conf: conf.get_cluster() in args.clusters.split(","))
if args.instances:
filters.append(lambda conf: conf.get_instance() in args.instances.split(","))
if args.deploy_group:
filters.append(
lambda conf: conf.get_deploy_group() in args.deploy_group.split(",")
)
if args.registration:
normalized_regs = normalize_registrations(
service=args.service, registrations=args.registration.split(",")
)
filters.append(
lambda conf: any(
reg in normalized_regs
for reg in (
conf.get_registrations()
if hasattr(conf, "get_registrations")
else []
)
)
)
if args.owner:
owners = args.owner.split(",")
filters.append(
# If the instance owner is None, check the service owner, else check the instance owner
lambda conf: get_team(
overrides={}, service=conf.get_service(), soa_dir=args.soa_dir
)
in owners
if conf.get_team() is None
else conf.get_team() in owners
)
return filters
|
Figures out which filters to apply from an args object, and returns them
:param args: args object
:returns: list of functions that take an instance config and returns if the instance conf matches the filter
|
get_filters
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/status.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py
|
Apache-2.0
|
def apply_args_filters(
args,
) -> Mapping[str, Mapping[str, Mapping[str, Type[InstanceConfig]]]]:
"""
Take an args object and returns the dict of cluster:service:instances
Currently, will filter by clusters, instances, services, and deploy_groups
If no instances are found, will print a message and try to find matching instances
for each service
:param args: args object containing attributes to filter by
:returns: Dict of dicts, in format {cluster_name: {service_name: {instance1, instance2}}}
"""
clusters_services_instances: DefaultDict[
str, DefaultDict[str, Dict[str, Type[InstanceConfig]]]
] = defaultdict(lambda: defaultdict(dict))
if args.service_instance:
if args.service or args.instances:
print(
PaastaColors.red(
f"Invalid command. Do not include optional arguments -s or -i "
f"when using shorthand notation."
)
)
return clusters_services_instances
if "." in args.service_instance:
args.service, args.instances = args.service_instance.split(".", 1)
else:
print(PaastaColors.red(f'Use a "." to separate service and instance name'))
return clusters_services_instances
if args.service:
try:
validate_service_name(args.service, soa_dir=args.soa_dir)
except NoSuchService:
print(PaastaColors.red(f'The service "{args.service}" does not exist.'))
all_services = list_services(soa_dir=args.soa_dir)
suggestions = difflib.get_close_matches(
args.service, all_services, n=5, cutoff=0.5
)
if suggestions:
print(PaastaColors.red(f"Did you mean any of these?"))
for suggestion in suggestions:
print(PaastaColors.red(f" {suggestion}"))
return clusters_services_instances
all_services = [args.service]
else:
args.service = None
all_services = list_services(soa_dir=args.soa_dir)
if args.service is None and args.owner is None:
args.service = figure_out_service_name(args, soa_dir=args.soa_dir)
if args.clusters:
clusters = args.clusters.split(",")
else:
clusters = list_clusters()
if args.instances:
instances = args.instances.split(",")
else:
instances = None
filters = get_filters(args)
i_count = 0
for service in all_services:
if args.service and service != args.service:
continue
for instance_conf in get_instance_configs_for_service(
service, soa_dir=args.soa_dir, clusters=clusters, instances=instances
):
if all([f(instance_conf) for f in filters]):
cluster_service = clusters_services_instances[
instance_conf.get_cluster()
][service]
cluster_service[instance_conf.get_instance()] = instance_conf.__class__
i_count += 1
if i_count == 0 and args.service and args.instances:
for service in args.service.split(","):
verify_instances(args.instances, service, clusters)
return clusters_services_instances
|
Take an args object and returns the dict of cluster:service:instances
Currently, will filter by clusters, instances, services, and deploy_groups
If no instances are found, will print a message and try to find matching instances
for each service
:param args: args object containing attributes to filter by
:returns: Dict of dicts, in format {cluster_name: {service_name: {instance1, instance2}}}
|
apply_args_filters
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/status.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py
|
Apache-2.0
|
def paasta_status(args) -> int:
"""Print the status of a Yelp service running on PaaSTA.
:param args: argparse.Namespace obj created from sys.args by cli"""
soa_dir = args.soa_dir
system_paasta_config = load_system_paasta_config()
return_codes = [0]
lock = Lock()
tasks = []
clusters_services_instances = apply_args_filters(args)
for cluster, service_instances in clusters_services_instances.items():
for service, instances in service_instances.items():
all_flink = all((i in FLINK_DEPLOYMENT_CONFIGS) for i in instances.values())
actual_deployments: Mapping[str, DeploymentVersion]
if all_flink:
actual_deployments = {}
else:
actual_deployments = get_actual_deployments(service, soa_dir)
if all_flink or actual_deployments:
deploy_pipeline = list(get_planned_deployments(service, soa_dir))
new = _use_new_paasta_status(args, system_paasta_config)
tasks.append(
(
report_status_for_cluster,
dict(
service=service,
cluster=cluster,
deploy_pipeline=deploy_pipeline,
actual_deployments=actual_deployments,
instance_whitelist=instances,
system_paasta_config=system_paasta_config,
lock=lock,
verbose=args.verbose,
new=new,
all_namespaces=args.all_namespaces,
),
)
)
else:
print(missing_deployments_message(service))
return_codes.append(1)
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
tasks = [executor.submit(t[0], **t[1]) for t in tasks] # type: ignore
try:
for future in concurrent.futures.as_completed(tasks): # type: ignore
return_code, output = future.result()
return_codes.append(return_code)
except KeyboardInterrupt:
# ideally we wouldn't need to reach into `ThreadPoolExecutor`
# internals, but so far this is the best way to stop all these
# threads until a public interface is added
executor._threads.clear() # type: ignore
concurrent.futures.thread._threads_queues.clear() # type: ignore
raise KeyboardInterrupt
return max(return_codes)
|
Print the status of a Yelp service running on PaaSTA.
:param args: argparse.Namespace obj created from sys.args by cli
|
paasta_status
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/status.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py
|
Apache-2.0
|
def _backend_report(
normal_instance_count: int, up_backends: int, system_name: BackendType
) -> str:
"""Given that a service is in smartstack, this returns a human readable
report of the up backends"""
# TODO: Take into account a configurable threshold, PAASTA-1102
crit_threshold = 50
under_replicated, ratio = is_under_replicated(
num_available=up_backends,
expected_count=normal_instance_count,
crit_threshold=crit_threshold,
)
if under_replicated:
status = PaastaColors.red("Critical")
count = PaastaColors.red(
"(%d/%d, %d%%)" % (up_backends, normal_instance_count, ratio)
)
else:
status = PaastaColors.green("Healthy")
count = PaastaColors.green("(%d/%d)" % (up_backends, normal_instance_count))
up_string = PaastaColors.bold("UP")
return f"{status} - in {system_name} with {count} total backends {up_string} in this namespace."
|
Given that a service is in smartstack, this returns a human readable
report of the up backends
|
_backend_report
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/status.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/status.py
|
Apache-2.0
|
def get_schema_validator(file_type: str) -> Draft4Validator:
"""Get the correct schema to use for validation
:param file_type: what schema type should we validate against
"""
schema_path = f"schemas/{file_type}_schema.json"
autoscaling_path = "schemas/autoscaling_schema.json"
schema = pkgutil.get_data("paasta_tools.cli", schema_path).decode()
autoscaling_ref = pkgutil.get_data("paasta_tools.cli", autoscaling_path).decode()
# This bit of code loads the base schemas and any relevant "referenced" schemas
# into a shared "store" -- so that you can reference the shared schema without
# having to find the exact right path on disk in your schema file. If you want
# to reference one schema from another, you still have to include a
# {"$ref": "<schema_id>#field"} section in your JsonSchema
#
# (see https://python-jsonschema.readthedocs.io/en/v2.6.0/references/ and this
# stack overflow answer https://stackoverflow.com/a/65150457 for details)
#
# Also note that this functionality has changed significantly in modern versions
# of python-jsonschema, so if we ever update we'll need to do some work here.
base_schema = json.loads(schema)
autoscaling_schema = json.loads(autoscaling_ref)
store = {
"base": base_schema,
autoscaling_schema["$id"]: json.loads(autoscaling_ref),
}
resolver = RefResolver.from_schema(base_schema, store=store)
return Draft4Validator(
json.loads(schema),
resolver=resolver,
format_checker=FormatChecker(),
)
|
Get the correct schema to use for validation
:param file_type: what schema type should we validate against
|
get_schema_validator
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/validate.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py
|
Apache-2.0
|
def validate_rollback_bounds(
config: Dict[str, List[ConditionConfig]], file_loc: str
) -> bool:
"""
Ensure that at least one of upper_bound or lower_bound is set (and set to non-null values)
"""
errors = []
for source, queries in config.items():
for query in queries:
if not any(
(
query.get("lower_bound"),
query.get("upper_bound"),
),
):
errors.append(
f"{file_loc}:{source}: {query['query']} needs one of lower_bound OR upper_bound set."
)
for error in errors:
print(
failure(error, link=""), # TODO: point to actual docs once they exist
)
return len(errors) == 0
|
Ensure that at least one of upper_bound or lower_bound is set (and set to non-null values)
|
validate_rollback_bounds
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/validate.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py
|
Apache-2.0
|
def validate_schema(file_path: str, file_type: str) -> bool:
"""Check if the specified config file has a valid schema
:param file_path: path to file to validate
:param file_type: what schema type should we validate against
"""
try:
validator = get_schema_validator(file_type)
except Exception as e:
print(f"{SCHEMA_ERROR}: {file_type}, error: {e!r}")
return False
basename = os.path.basename(file_path)
config_file_object = get_config_file_dict(file_path)
try:
validator.validate(config_file_object)
if file_type in K8S_TYPES and not validate_instance_names(
config_file_object, file_path
):
return False
if file_type == "rollback" and not validate_rollback_bounds(
config_file_object["conditions"],
file_path,
):
return False
except ValidationError:
print(f"{SCHEMA_INVALID}: {file_path}")
errors = validator.iter_errors(config_file_object)
print(" Validation Message: %s" % exceptions.best_match(errors).message)
return False
except Exception as e:
print(f"{SCHEMA_ERROR}: {file_type}, error: {e!r}")
return False
else:
print(f"{SCHEMA_VALID}: {basename}")
return True
|
Check if the specified config file has a valid schema
:param file_path: path to file to validate
:param file_type: what schema type should we validate against
|
validate_schema
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/validate.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py
|
Apache-2.0
|
def validate_all_schemas(service_path: str) -> bool:
"""Finds all recognized config files in service directory,
and validates their schema.
:param service_path: path to location of configuration files
"""
path = os.path.join(service_path, "**/*.yaml")
returncode = True
for file_name in glob(path, recursive=True):
if os.path.islink(file_name):
continue
filename_without_service_path = os.path.relpath(file_name, start=service_path)
for file_type in SCHEMA_TYPES:
if filename_without_service_path.startswith(file_type):
if not validate_schema(file_name, file_type):
returncode = False
return returncode
|
Finds all recognized config files in service directory,
and validates their schema.
:param service_path: path to location of configuration files
|
validate_all_schemas
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/validate.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py
|
Apache-2.0
|
def check_service_path(service_path):
"""Check that the specified path exists and has yaml files
:param service_path: Path to directory that should contain yaml files
"""
if not service_path or not os.path.isdir(service_path):
print(
failure(
"%s is not a directory" % service_path,
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
)
return False
if not glob(os.path.join(service_path, "*.yaml")):
print(
failure(
"%s does not contain any .yaml files" % service_path,
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
)
return False
return True
|
Check that the specified path exists and has yaml files
:param service_path: Path to directory that should contain yaml files
|
check_service_path
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/validate.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py
|
Apache-2.0
|
def get_service_path(service, soa_dir):
"""Determine the path of the directory containing the conf files
:param service: Name of service
:param soa_dir: Directory containing soa configs for all services
"""
if service:
service_path = os.path.join(soa_dir, service)
else:
if soa_dir == os.getcwd():
service_path = os.getcwd()
else:
print(UNKNOWN_SERVICE)
return None
return service_path
|
Determine the path of the directory containing the conf files
:param service: Name of service
:param soa_dir: Directory containing soa configs for all services
|
get_service_path
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/validate.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py
|
Apache-2.0
|
def path_to_soa_dir_service(service_path):
"""Split a service_path into its soa_dir and service name components"""
soa_dir = os.path.dirname(service_path)
service = os.path.basename(service_path)
return soa_dir, service
|
Split a service_path into its soa_dir and service name components
|
path_to_soa_dir_service
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/validate.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py
|
Apache-2.0
|
def validate_unique_instance_names(service_path):
"""Check that the service does not use the same instance name more than once"""
soa_dir, service = path_to_soa_dir_service(service_path)
check_passed = True
for cluster in list_clusters(service, soa_dir):
service_instances = get_service_instance_list(
service=service, cluster=cluster, soa_dir=soa_dir
)
instance_names = [service_instance[1] for service_instance in service_instances]
instance_name_to_count = Counter(instance_names)
duplicate_instance_names = [
instance_name
for instance_name, count in instance_name_to_count.items()
if count > 1
]
if duplicate_instance_names:
check_passed = False
print(
duplicate_instance_names_message(
service, cluster, duplicate_instance_names
)
)
else:
print(no_duplicate_instance_names_message(service, cluster))
return check_passed
|
Check that the service does not use the same instance name more than once
|
validate_unique_instance_names
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/validate.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py
|
Apache-2.0
|
def validate_autoscaling_configs(service_path: str) -> bool:
"""Validate new autoscaling configurations that are not validated by jsonschema for the service of interest.
:param service_path: Path to directory containing soa conf yaml files for service
"""
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
link = ""
skip_cpu_override_validation_list = (
load_system_paasta_config().get_skip_cpu_override_validation_services()
)
for cluster in list_clusters(service, soa_dir):
for instance, instance_config in load_all_instance_configs_for_service(
service=service, cluster=cluster, soa_dir=soa_dir
):
if instance_config.get_instance_type() not in K8S_TYPES:
continue
instance_config = cast(LongRunningServiceConfig, instance_config)
if (
# instance_config is an `InstanceConfig` object, which doesn't have an `is_autoscaling_enabled()`
# method, but by asserting that the type is in K8S_TYPES, we know we're dealing with either
# a KubernetesDeploymentConfig or an EksDeploymentConfig, so the cast is safe.
instance_config.is_autoscaling_enabled()
# we should eventually make the python templates add the override comment
# to the correspoding YAML line, but until then we just opt these out of that validation
and __is_templated(
service,
soa_dir,
cluster,
workload=instance_config.get_instance_type(),
)
is False
):
autoscaling_params = instance_config.get_autoscaling_params()
should_skip_cpu_override_validation = (
service in skip_cpu_override_validation_list
)
seen_provider_types: Set[str] = set()
configured_provider_count = len(autoscaling_params["metrics_providers"])
for metrics_provider in autoscaling_params["metrics_providers"]:
try:
# Generic validation of the config
_validate_autoscaling_config(metrics_provider)
# Multi-metrics specific validation:
# 1. Bespoke policies cannot use multi-metrics scaling
# 2. Can't set the same metrics provider multiple times
if (
metrics_provider.get("decision_policy") == "bespoke"
and configured_provider_count > 1
):
raise AutoscalingValidationError(
f"cannot use bespoke autoscaling with HPA autoscaling"
)
if metrics_provider["type"] in seen_provider_types:
raise AutoscalingValidationError(
f"cannot set the same metrics provider multiple times: {metrics_provider['type']}"
)
seen_provider_types.add(metrics_provider["type"])
# Metrics-provider specific validations
if metrics_provider["type"] == METRICS_PROVIDER_ACTIVE_REQUESTS:
_validate_active_requests_autoscaling_configs(
instance_config, metrics_provider
)
elif metrics_provider["type"] == METRICS_PROVIDER_PROMQL:
_validate_arbitrary_promql_autoscaling_configs(
metrics_provider
)
elif (
metrics_provider["type"] == METRICS_PROVIDER_CPU
# to enable kew autoscaling we just set a decision policy of "bespoke", but
# the metrics_provider is (confusingly) left as "cpu"
and metrics_provider.get("decision_policy") != "bespoke"
and not should_skip_cpu_override_validation
):
# Do some extra validation below: we don't abstract that into the above function
# call because it needs a lot of extra information
# we need access to the comments, so we need to read the config with ruamel to be able
# to actually get them in a "nice" automated fashion
config = get_config_file_dict(
os.path.join(
soa_dir,
service,
f"{instance_config.get_instance_type()}-{cluster}.yaml",
),
use_ruamel=True,
)
if config[instance].get("cpus") is None:
# If we're using multiple scaling metrics and one of them is CPU, we must
# opt out of CPU autotuning
if configured_provider_count > 1:
link = "y/override-cpu-autotune"
raise AutoscalingValidationError(
"using CPU-based scaling with multiple scaling metrics requires explicit "
"'cpus' setting; see the following link for more info:"
)
# cpu autoscaled, but using autotuned values - can skip
continue
cpu_comment = _get_comments_for_key(
data=config[instance], key="cpus"
)
# we could probably have a separate error message if there's a comment that doesn't match
# the ack pattern, but that seems like overkill - especially for something that could cause
# a DAR if people aren't being careful.
if (
cpu_comment is None
or re.search(
pattern=OVERRIDE_CPU_AUTOTUNE_ACK_PATTERN,
string=cpu_comment,
)
is None
):
link = "y/override-cpu-autotune"
raise AutoscalingValidationError(
f"CPU override detected for a CPU-autoscaled instance; "
"see the following link for next steps:"
)
except AutoscalingValidationError as e:
returncode = False
print(
failure(
msg=f"Autoscaling validation failed for {service}.{instance} in {cluster}: {str(e)}",
link=link,
)
)
return returncode
|
Validate new autoscaling configurations that are not validated by jsonschema for the service of interest.
:param service_path: Path to directory containing soa conf yaml files for service
|
validate_autoscaling_configs
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/validate.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.