code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def paasta_validate_soa_configs(
service: str, service_path: str, verbose: bool = False
) -> bool:
"""Analyze the service in service_path to determine if the conf files are valid
:param service_path: Path to directory containing soa conf yaml files for service
"""
if not check_service_path(service_path):
return False
if not validate_service_name(service):
return False
checks: List[Callable[[str], bool]] = [
validate_all_schemas,
partial(validate_tron, verbose=verbose),
validate_paasta_objects,
validate_unique_instance_names,
validate_autoscaling_configs,
validate_secrets,
validate_min_max_instances,
validate_cpu_burst,
]
# NOTE: we're explicitly passing a list comprehension to all()
# instead of a generator expression so that we run all checks
# no matter what
return all([check(service_path) for check in checks])
|
Analyze the service in service_path to determine if the conf files are valid
:param service_path: Path to directory containing soa conf yaml files for service
|
paasta_validate_soa_configs
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/validate.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py
|
Apache-2.0
|
def paasta_validate(args):
"""Generate a service_path from the provided args and call paasta_validate_soa_configs
:param args: argparse.Namespace obj created from sys.args by cli
"""
service_path = get_service_path(args.service, args.yelpsoa_config_root)
service = args.service or guess_service_name()
if not paasta_validate_soa_configs(service, service_path, args.verbose):
return 1
|
Generate a service_path from the provided args and call paasta_validate_soa_configs
:param args: argparse.Namespace obj created from sys.args by cli
|
paasta_validate
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/validate.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/validate.py
|
Apache-2.0
|
def get_latest_marked_version(
git_url: str, deploy_group: str
) -> Optional[DeploymentVersion]:
"""Return the latest marked for deployment version or None"""
# TODO: correct this function for new tag format
refs = list_remote_refs(git_url)
_, sha, image_version = get_latest_deployment_tag(refs, deploy_group)
if sha:
return DeploymentVersion(sha=sha, image_version=image_version)
# We did not find a ref for this deploy group
return None
|
Return the latest marked for deployment version or None
|
get_latest_marked_version
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/wait_for_deployment.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/wait_for_deployment.py
|
Apache-2.0
|
def validate_version_is_latest(
version: DeploymentVersion, git_url: str, deploy_group: str, service: str
):
"""Verify if the requested version is the latest marked for deployment.
Raise exception when the provided version is not the latest
marked for deployment in 'deploy_group' for 'service'.
"""
try:
marked_version = get_latest_marked_version(git_url, deploy_group)
except LSRemoteException as e:
print(
"Error talking to the git server: {}\n"
"It is not possible to verify that {} is marked for deployment in {}, "
"but I assume that it is marked and will continue waiting..".format(
e, version, deploy_group
)
)
return
if marked_version is None:
raise VersionError(
"ERROR: Nothing is marked for deployment "
"in {} for {}".format(deploy_group, service)
)
if version != marked_version:
raise VersionError(
"ERROR: The latest version marked for "
"deployment in {} is {}".format(deploy_group, marked_version)
)
|
Verify if the requested version is the latest marked for deployment.
Raise exception when the provided version is not the latest
marked for deployment in 'deploy_group' for 'service'.
|
validate_version_is_latest
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/wait_for_deployment.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/wait_for_deployment.py
|
Apache-2.0
|
def validate_deploy_group(deploy_group: str, service: str, soa_dir: str):
"""Validate deploy_group.
Raise exception if the specified deploy group is not used anywhere.
"""
in_use_deploy_groups = list_deploy_groups(service=service, soa_dir=soa_dir)
_, invalid_deploy_groups = validate_given_deploy_groups(
in_use_deploy_groups, [deploy_group]
)
if len(invalid_deploy_groups) == 1:
raise DeployGroupError(
"ERROR: These deploy groups are not currently "
"used anywhere: {}.\n"
"You probably need one of these in-use deploy "
"groups?:\n {}".format(
",".join(invalid_deploy_groups), ",".join(in_use_deploy_groups)
)
)
|
Validate deploy_group.
Raise exception if the specified deploy group is not used anywhere.
|
validate_deploy_group
|
python
|
Yelp/paasta
|
paasta_tools/cli/cmds/wait_for_deployment.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/cmds/wait_for_deployment.py
|
Apache-2.0
|
def _get_smartstack_proxy_ports_from_file(root, file):
"""Given a root and file (as from os.walk), attempt to return the highest
smartstack proxy port number (int) from that file. Returns 0 if there is no
smartstack proxy_port.
"""
ports = set()
with open(os.path.join(root, file)) as f:
data = yaml.safe_load(f)
if file.endswith("service.yaml") and "smartstack" in data:
# Specifying this in service.yaml is old and deprecated and doesn't
# support multiple namespaces.
ports = {int(data["smartstack"].get("proxy_port", 0))}
elif file.endswith("smartstack.yaml"):
for namespace in data.keys():
ports.add(data[namespace].get("proxy_port", 0))
return ports
|
Given a root and file (as from os.walk), attempt to return the highest
smartstack proxy port number (int) from that file. Returns 0 if there is no
smartstack proxy_port.
|
_get_smartstack_proxy_ports_from_file
|
python
|
Yelp/paasta
|
paasta_tools/cli/fsm/autosuggest.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/fsm/autosuggest.py
|
Apache-2.0
|
def suggest_smartstack_proxy_port(
yelpsoa_config_root, range_min=19000, range_max=21000
):
"""Pick a random available port in the 19000-21000 block"""
available_proxy_ports = set(range(range_min, range_max + 1))
for root, dirs, files in os.walk(yelpsoa_config_root):
for f in files:
if f.endswith("smartstack.yaml"):
try:
used_ports = _get_smartstack_proxy_ports_from_file(root, f)
for used_port in used_ports:
available_proxy_ports.discard(used_port)
except Exception:
pass
available_proxy_ports.difference_update(get_inuse_ports_from_etc_services())
try:
return random.choice(list(available_proxy_ports))
except IndexError:
raise Exception(
f"There are no more ports available in the range [{range_min}, {range_max}]"
)
|
Pick a random available port in the 19000-21000 block
|
suggest_smartstack_proxy_port
|
python
|
Yelp/paasta
|
paasta_tools/cli/fsm/autosuggest.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/cli/fsm/autosuggest.py
|
Apache-2.0
|
def get_deploy_durations_from_file(filename):
"""
filename: path to a file to be parsed for datetime data
The expected input is a paasta service log for the deploy events
The way I've been fetching them is by running 'internal logreader command' | grep deploy | grep event > filename
"""
file_object = open(filename, "r")
data = sorted(
[json.loads(line.rstrip("\n")) for line in file_object],
key=lambda x: get_datetime_from_ts(x["timestamp"]),
)
timedeltas = defaultdict(list)
last_time = dict()
instance_bitvector = defaultdict(bool) # defaults to False
for datum in data:
time = get_datetime_from_ts(datum["timestamp"])
instance = datum["instance"]
if "in progress" in datum["message"] and not instance_bitvector[instance]:
instance_bitvector[instance] = True
last_time[instance] = time
elif "finishing" in datum["message"]:
instance_bitvector[instance] = False
timedeltas[instance].append(time - last_time[instance])
return timedeltas
|
filename: path to a file to be parsed for datetime data
The expected input is a paasta service log for the deploy events
The way I've been fetching them is by running 'internal logreader command' | grep deploy | grep event > filename
|
get_deploy_durations_from_file
|
python
|
Yelp/paasta
|
paasta_tools/contrib/bounce_log_latency_parser.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/contrib/bounce_log_latency_parser.py
|
Apache-2.0
|
def host_to_ip(host: str, fallback: str) -> str:
"""Try to resolve a host to an IP with a fallback.
Because DNS resolution is relatively slow and can't be easily performed
using asyncio, we cheat a little and use a regex for well-formed hostnames
to try to guess the IP without doing real resolution.
A fallback is needed because in some cases the nerve registration does not
match an actual hostname (e.g. "prod-db15" or "prod-splunk-master").
"""
for match in (
re.match(r"^(\d+)-(\d+)-(\d+)-(\d+)-", host),
re.match(r"^ip-(\d+)-(\d+)-(\d+)-(\d+)", host),
):
if match:
return ".".join(match.groups())
else:
try:
return socket.gethostbyname(host)
except socket.gaierror:
return fallback
|
Try to resolve a host to an IP with a fallback.
Because DNS resolution is relatively slow and can't be easily performed
using asyncio, we cheat a little and use a regex for well-formed hostnames
to try to guess the IP without doing real resolution.
A fallback is needed because in some cases the nerve registration does not
match an actual hostname (e.g. "prod-db15" or "prod-splunk-master").
|
host_to_ip
|
python
|
Yelp/paasta
|
paasta_tools/contrib/check_orphans.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/contrib/check_orphans.py
|
Apache-2.0
|
def get_container_type(container_name: str, instance_name: str) -> str:
"""
To differentiate between main service containers and sidecars
"""
if instance_name and container_name == kubernetes_tools.sanitise_kubernetes_name(
instance_name
):
return MAIN_CONTAINER_TYPE
else:
return container_name
|
To differentiate between main service containers and sidecars
|
get_container_type
|
python
|
Yelp/paasta
|
paasta_tools/contrib/get_running_task_allocation.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/contrib/get_running_task_allocation.py
|
Apache-2.0
|
def get_report_from_splunk(creds, app, filename, criteria_filter):
"""Expect a table containing at least the following fields:
criteria (<service> kubernetes-<cluster_name> <instance>)
service_owner (Optional)
project (Required to create tickets)
estimated_monthly_savings (Optional)
search_time (Unix time)
one of the following pairs:
- current_cpus and suggested_cpus
- current_mem and suggested_mem
- current_disk and suggested_disk
- suggested_hacheck_cpus
- suggested_cpu_burst_add
- suggested_min_instances
- suggested_max_instances
"""
url = f"https://splunk-api.yelpcorp.com/servicesNS/nobody/{app}/search/jobs/export"
search = (
'| inputlookup {filename} | search criteria="{criteria_filter}"'
'| eval _time = search_time | where _time > relative_time(now(),"-7d")'
).format(filename=filename, criteria_filter=criteria_filter)
log.debug(f"Sending this query to Splunk: {search}\n")
data = {"output_mode": "json", "search": search}
creds = creds.split(":")
resp = requests.post(url, data=data, auth=(creds[0], creds[1]))
resp_text = resp.text.split("\n")
log.info("Found {} services to rightsize".format(len(resp_text) - 1))
resp_text = [x for x in resp_text if x]
resp_text = [json.loads(x) for x in resp_text]
services_to_update = {}
for d in resp_text:
if "result" not in d:
raise ValueError(f"Splunk request didn't return any results: {resp_text}")
criteria = d["result"]["criteria"]
serv = {
"cluster": criteria.split(" ")[1],
"date": d["result"]["_time"].split(" ")[0],
"instance": criteria.split(" ")[2],
"money": d["result"].get("estimated_monthly_savings", 0),
"owner": d["result"].get("service_owner", "Unavailable"),
"project": d["result"].get("project", "Unavailable"),
"service": criteria.split(" ")[0],
# only mergeable fields below
"cpu_burst_add": d["result"].get("suggested_cpu_burst_add"),
"cpus": d["result"].get("suggested_cpus"),
"disk": d["result"].get("suggested_disk"),
"hacheck_cpus": d["result"].get("suggested_hacheck_cpus"),
"max_instances": d["result"].get("suggested_max_instances"),
"mem": d["result"].get("suggested_mem"),
"min_instances": d["result"].get("suggested_min_instances"),
"old_cpus": d["result"].get("current_cpus"),
"old_disk": d["result"].get("current_disk"),
"old_mem": d["result"].get("current_mem"),
}
# the report we get is all strings, so we need to convert them to the right types
field_conversions = {
"current_cpus": float,
"suggested_cpu_burst_add": float,
"suggested_cpus": float,
"suggested_disk": int,
"suggested_hacheck_cpus": float,
"suggested_max_instances": int,
"suggested_mem": int,
"suggested_min_instances": int,
# not quite sure why these are floats...they're ints in soaconfigs
"current_disk": _force_str_to_int,
"current_mem": _force_str_to_int,
}
# merge results if we've already seen rows for this service
# NOTE: this is necessary since the Splunk search can return multiple rows
# for the same (service, cluster, instance) tuple as the autotune query
# treats certain cpu allocation changes as if the tuple was entirely different.
# this is ostensibly due to a theory that if you update resource allocation, existing
# autotune data is potentially invalidated - but in practice this ends up hampering
# autotune for services with highly variable resource allocation - e.g., we have some services
# that have their cpu allocation tweaked by +/-.1 cpu pretty frequently, but then min/max autotune
# is never updated.
if criteria in services_to_update:
for key in serv:
# we probably don't want to merge any other fields since they're going to be strings :p
if key not in field_conversions:
continue
last_proposed_suggestion = services_to_update[criteria][key]
proposed_suggestion = serv[key]
# if both are non-null, take the max of the two
if (
last_proposed_suggestion is not None
and proposed_suggestion is not None
):
services_to_update[criteria][key] = max(
last_proposed_suggestion,
proposed_suggestion,
key=field_conversions[key],
)
# otherwise, if only one of these is non-null, use that one
elif last_proposed_suggestion is not None:
services_to_update[criteria][key] = last_proposed_suggestion
elif proposed_suggestion is not None:
services_to_update[criteria][key] = proposed_suggestion
# otherwise, if we didn't enter any of the above branches, we're essentially leaving in place the
# existing None
# otherwise, simply add the service to the final report
else:
services_to_update[criteria] = serv
return {
"search": search,
"results": services_to_update,
}
|
Expect a table containing at least the following fields:
criteria (<service> kubernetes-<cluster_name> <instance>)
service_owner (Optional)
project (Required to create tickets)
estimated_monthly_savings (Optional)
search_time (Unix time)
one of the following pairs:
- current_cpus and suggested_cpus
- current_mem and suggested_mem
- current_disk and suggested_disk
- suggested_hacheck_cpus
- suggested_cpu_burst_add
- suggested_min_instances
- suggested_max_instances
|
get_report_from_splunk
|
python
|
Yelp/paasta
|
paasta_tools/contrib/paasta_update_soa_memcpu.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/contrib/paasta_update_soa_memcpu.py
|
Apache-2.0
|
def get_reviewers_in_group(group_name):
"""Using rbt's target-groups argument overrides our configured default review groups.
So we'll expand the group into usernames and pass those users in the group individually.
"""
rightsizer_reviewers = json.loads(
subprocess.check_output(
(
"rbt",
"api-get",
"--server",
"https://reviewboard.yelpcorp.com",
f"groups/{group_name}/users/",
)
).decode("UTF-8")
)
return [user.get("username", "") for user in rightsizer_reviewers.get("users", {})]
|
Using rbt's target-groups argument overrides our configured default review groups.
So we'll expand the group into usernames and pass those users in the group individually.
|
get_reviewers_in_group
|
python
|
Yelp/paasta
|
paasta_tools/contrib/paasta_update_soa_memcpu.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/contrib/paasta_update_soa_memcpu.py
|
Apache-2.0
|
def nested_inc(op, _, attr_val, attr_name, state, step=1):
"""Increments relevant counter by step from args array"""
oph = state.setdefault(op, {})
nameh = oph.setdefault(attr_name, {})
nameh.setdefault(attr_val, 0)
nameh[attr_val] += step
return state
|
Increments relevant counter by step from args array
|
nested_inc
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/constraints.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/constraints.py
|
Apache-2.0
|
def check_offer_constraints(offer, constraints, state):
"""Returns True if all constraints are satisfied by offer's attributes,
returns False otherwise. Prints a error message and re-raises if an error
was thrown."""
for (attr, op, val) in constraints:
try:
offer_attr = next((x for x in offer.attributes if x.name == attr), None)
if offer_attr is None:
print("Attribute not found for a constraint: %s" % attr)
return False
elif not (CONS_OPS[op](val, offer_attr.text.value, offer_attr.name, state)):
print(
"Constraint not satisfied: [{} {} {}] for {} with {}".format(
attr, op, val, offer_attr.text.value, state
)
)
return False
except Exception as err:
print(
"Error while matching constraint: [{} {} {}] {}".format(
attr, op, val, str(err)
)
)
raise err
return True
|
Returns True if all constraints are satisfied by offer's attributes,
returns False otherwise. Prints a error message and re-raises if an error
was thrown.
|
check_offer_constraints
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/constraints.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/constraints.py
|
Apache-2.0
|
def update_constraint_state(offer, constraints, state, step=1):
"""Mutates state for each offer attribute found in constraints by calling
relevant UPDATE_OP lambda"""
for (attr, op, val) in constraints:
for oa in offer.attributes:
if attr == oa.name:
UPDATE_OPS[op](val, oa.text.value, attr, state, step)
|
Mutates state for each offer attribute found in constraints by calling
relevant UPDATE_OP lambda
|
update_constraint_state
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/constraints.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/constraints.py
|
Apache-2.0
|
def launch_tasks_for_offers(
self, driver: MesosSchedulerDriver, offers
) -> List[TaskInfo]:
"""For each offer tries to launch all tasks that can fit in there.
Declines offer if no fitting tasks found."""
launched_tasks: List[TaskInfo] = []
for offer in offers:
with self.constraint_state_lock:
try:
tasks, new_state = self.tasks_and_state_for_offer(
driver, offer, self.constraint_state
)
if tasks is not None and len(tasks) > 0:
driver.launchTasks([offer.id], tasks)
for task in tasks:
self.task_store.add_task_if_doesnt_exist(
task["task_id"]["value"],
health=None,
mesos_task_state=TASK_STAGING,
offer=offer,
resources=task["resources"],
)
launched_tasks.extend(tasks)
self.constraint_state = new_state
else:
driver.declineOffer(offer.id)
except ConstraintFailAllTasksError:
self.log("Offer failed constraints for every task, rejecting 60s")
filters = {"refuse_seconds": 60}
driver.declineOffer(offer.id, filters)
return launched_tasks
|
For each offer tries to launch all tasks that can fit in there.
Declines offer if no fitting tasks found.
|
launch_tasks_for_offers
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/native_scheduler.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py
|
Apache-2.0
|
def task_fits(self, offer):
"""Checks whether the offer is big enough to fit the tasks"""
needed_resources = {
"cpus": self.service_config.get_cpus(),
"mem": self.service_config.get_mem(),
"disk": self.service_config.get_disk(),
}
for resource in offer.resources:
try:
if resource.scalar.value < needed_resources[resource.name]:
return False
except KeyError:
pass
return True
|
Checks whether the offer is big enough to fit the tasks
|
task_fits
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/native_scheduler.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py
|
Apache-2.0
|
def need_more_tasks(self, name, existingTasks, scheduledTasks):
"""Returns whether we need to start more tasks."""
num_have = 0
for task, parameters in existingTasks.items():
if self.is_task_new(name, task) and (
parameters.mesos_task_state in LIVE_TASK_STATES
):
num_have += 1
for task in scheduledTasks:
if task["name"] == name:
num_have += 1
return num_have < self.service_config.get_desired_instances()
|
Returns whether we need to start more tasks.
|
need_more_tasks
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/native_scheduler.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py
|
Apache-2.0
|
def tasks_and_state_for_offer(
self, driver: MesosSchedulerDriver, offer, state: ConstraintState
) -> Tuple[List[TaskInfo], ConstraintState]:
"""Returns collection of tasks that can fit inside an offer."""
tasks: List[TaskInfo] = []
offerCpus = 0.0
offerMem = 0.0
offerPorts: List[int] = []
for resource in offer.resources:
if resource.name == "cpus":
offerCpus += resource.scalar.value
elif resource.name == "mem":
offerMem += resource.scalar.value
elif resource.name == "ports":
for rg in resource.ranges.range:
# I believe mesos protobuf ranges are inclusive, but range() is exclusive
offerPorts += range(rg.begin, rg.end + 1)
remainingCpus = offerCpus
remainingMem = offerMem
remainingPorts = set(offerPorts)
base_task = self.service_config.base_task(self.system_paasta_config)
base_task["agent_id"]["value"] = offer["agent_id"]["value"]
task_mem = self.service_config.get_mem()
task_cpus = self.service_config.get_cpus()
# don't mutate existing state
new_constraint_state = copy.deepcopy(state)
total = 0
failed_constraints = 0
while self.need_more_tasks(
base_task["name"], self.task_store.get_all_tasks(), tasks
):
total += 1
if not (
remainingCpus >= task_cpus
and remainingMem >= task_mem
and self.offer_matches_pool(offer)
and len(remainingPorts) >= 1
):
break
if not (
check_offer_constraints(offer, self.constraints, new_constraint_state)
):
failed_constraints += 1
break
task_port = random.choice(list(remainingPorts))
task = copy.deepcopy(base_task)
task["task_id"] = {"value": "{}.{}".format(task["name"], uuid.uuid4().hex)}
task["container"]["docker"]["port_mappings"][0]["host_port"] = task_port
for resource in task["resources"]:
if resource["name"] == "ports":
resource["ranges"]["range"][0]["begin"] = task_port
resource["ranges"]["range"][0]["end"] = task_port
tasks.append(task)
remainingCpus -= task_cpus
remainingMem -= task_mem
remainingPorts -= {task_port}
update_constraint_state(offer, self.constraints, new_constraint_state)
# raise constraint error but only if no other tasks fit/fail the offer
if total > 0 and failed_constraints == total:
raise ConstraintFailAllTasksError
return tasks, new_constraint_state
|
Returns collection of tasks that can fit inside an offer.
|
tasks_and_state_for_offer
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/native_scheduler.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py
|
Apache-2.0
|
def healthiness_score(task_id):
"""Return a tuple that can be used as a key for sorting, that expresses our desire to keep this task around.
Higher values (things that sort later) are more desirable."""
params = all_tasks_with_params[task_id]
state_score = {
TASK_KILLING: 0,
TASK_FINISHED: 0,
TASK_FAILED: 0,
TASK_KILLED: 0,
TASK_LOST: 0,
TASK_ERROR: 0,
TASK_STAGING: 1,
TASK_STARTING: 2,
TASK_RUNNING: 3,
}[params.mesos_task_state]
# unhealthy tasks < healthy
# staging < starting < running
# old < new
return (
params.is_healthy,
state_score,
self.is_task_new(base_task_name, task_id),
)
|
Return a tuple that can be used as a key for sorting, that expresses our desire to keep this task around.
Higher values (things that sort later) are more desirable.
|
healthiness_score
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/native_scheduler.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py
|
Apache-2.0
|
def get_happy_tasks(self, tasks_with_params: Dict[str, MesosTaskParameters]):
"""Filter a dictionary of tasks->params to those that are running and not draining."""
happy_tasks = {}
for tid, params in tasks_with_params.items():
if params.mesos_task_state == TASK_RUNNING and not params.is_draining:
happy_tasks[tid] = params
return happy_tasks
|
Filter a dictionary of tasks->params to those that are running and not draining.
|
get_happy_tasks
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/native_scheduler.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py
|
Apache-2.0
|
def make_drain_task(self, task_id: str):
"""Return a DrainTask object, which is suitable for passing to drain methods."""
ports = []
params = self.task_store.get_task(task_id)
for resource in params.resources:
if resource["name"] == "ports":
for rg in resource["ranges"]["range"]:
for port in range(rg["begin"], rg["end"] + 1):
ports.append(port)
return DrainTask(
id=task_id, host=params.offer["agent_id"]["value"], ports=ports
)
|
Return a DrainTask object, which is suitable for passing to drain methods.
|
make_drain_task
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/native_scheduler.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py
|
Apache-2.0
|
def recreate_drain_method(self) -> None:
"""Re-instantiate self.drain_method. Should be called after self.service_config changes."""
self.drain_method = drain_lib.get_drain_method(
name=self.service_config.get_drain_method(
self.service_config.service_namespace_config
),
service=self.service_name,
instance=self.instance_name,
registrations=self.service_config.get_registrations(),
**self.service_config.get_drain_method_params(
self.service_config.service_namespace_config
),
)
|
Re-instantiate self.drain_method. Should be called after self.service_config changes.
|
recreate_drain_method
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/native_scheduler.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_scheduler.py
|
Apache-2.0
|
def base_task(
self, system_paasta_config: SystemPaastaConfig, portMappings=True
) -> TaskInfo:
"""Return a TaskInfo Dict with all the fields corresponding to the
configuration filled in.
Does not include task.agent_id or a task.id; those need to be
computed separately.
"""
docker_volumes = self.get_volumes(
system_volumes=system_paasta_config.get_volumes(),
)
task: TaskInfo = {
"name": "",
"task_id": {"value": ""},
"agent_id": {"value": ""},
"container": {
"type": "DOCKER",
"docker": {
"image": self.get_docker_url(),
"parameters": [
{"key": param["key"], "value": param["value"]}
for param in self.format_docker_parameters()
],
"network": self.get_mesos_network_mode(),
"port_mappings": [],
},
"volumes": [
{
"container_path": volume["containerPath"],
"host_path": volume["hostPath"],
"mode": volume["mode"].upper(),
}
for volume in docker_volumes
],
},
"command": {
"value": str(self.get_cmd()),
"uris": [
{
"value": system_paasta_config.get_dockercfg_location(),
"extract": False,
}
],
},
"resources": [
{
"name": "cpus",
"type": "SCALAR",
"scalar": {"value": self.get_cpus()},
},
{"name": "mem", "type": "SCALAR", "scalar": {"value": self.get_mem()}},
],
}
if portMappings:
task["container"]["docker"]["port_mappings"] = [
{
"container_port": self.get_container_port(),
# filled by tasks_and_state_for_offer()
"host_port": 0,
"protocol": "tcp",
}
]
task["resources"].append(
{
"name": "ports",
"type": "RANGES",
"ranges": {
# filled by tasks_and_state_for_offer
"range": [{"begin": 0, "end": 0}]
},
}
)
task["name"] = self.task_name(task)
return task
|
Return a TaskInfo Dict with all the fields corresponding to the
configuration filled in.
Does not include task.agent_id or a task.id; those need to be
computed separately.
|
base_task
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/native_service_config.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/native_service_config.py
|
Apache-2.0
|
def merge(self: _SelfT, **kwargs) -> "MesosTaskParameters":
"""Return a merged MesosTaskParameters object, where attributes in other take precedence over self."""
new_dict = copy.deepcopy(self.__dict__)
new_dict.update(kwargs)
return MesosTaskParameters(**new_dict)
|
Return a merged MesosTaskParameters object, where attributes in other take precedence over self.
|
merge
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/task_store.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/task_store.py
|
Apache-2.0
|
def add_task_if_doesnt_exist(self, task_id: str, **kwargs) -> None:
"""Add a task if it does not already exist. If it already exists, do nothing."""
if self.get_task(task_id) is not None:
return
else:
self.overwrite_task(task_id, MesosTaskParameters(**kwargs))
|
Add a task if it does not already exist. If it already exists, do nothing.
|
add_task_if_doesnt_exist
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/task_store.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/task_store.py
|
Apache-2.0
|
def _get_task(self, task_id: str) -> Tuple[MesosTaskParameters, ZnodeStat]:
"""Like get_task, but also returns the ZnodeStat that self.zk_client.get() returns"""
try:
data, stat = self.zk_client.get("/%s" % task_id)
return MesosTaskParameters.deserialize(data), stat
except NoNodeError:
return None, None
except json.decoder.JSONDecodeError:
_log(
service=self.service_name,
instance=self.instance_name,
level="debug",
component="deploy",
line=f"Warning: found non-json-decodable value in zookeeper for task {task_id}: {data}",
)
return None, None
|
Like get_task, but also returns the ZnodeStat that self.zk_client.get() returns
|
_get_task
|
python
|
Yelp/paasta
|
paasta_tools/frameworks/task_store.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/frameworks/task_store.py
|
Apache-2.0
|
def _format_remote_run_job_name(
job: V1Job,
user: str,
) -> str:
"""Format name for remote run job
:param V1Job job: job definition
:param str user: the user requesting the remote-run
:return: job name
"""
return limit_size_with_hash(f"remote-run-{user}-{job.metadata.name}")
|
Format name for remote run job
:param V1Job job: job definition
:param str user: the user requesting the remote-run
:return: job name
|
_format_remote_run_job_name
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def remote_run_start(
service: str,
instance: str,
cluster: str,
user: str,
interactive: bool,
recreate: bool,
max_duration: int,
is_toolbox: bool,
) -> RemoteRunOutcome:
"""Trigger remote-run job
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str user: the user requesting the remote-run sandbox
:param bool interactive: whether it is expected to access the remote-run job interactively
:param bool recreate: whether to recreate remote-run job if existing
:param int max_duration: maximum allowed duration for the remote-ruh job
:param bool is_toolbox: requested job is for a toolbox container
:return: outcome of the operation, and resulting Kubernetes pod information
"""
kube_client = KubeClient()
# Load the service deployment settings
deployment_config = (
generate_toolbox_deployment(service, cluster, user)
if is_toolbox
else load_eks_service_config(service, instance, cluster)
)
# Set to interactive mode
if interactive and not is_toolbox:
deployment_config.config_dict["cmd"] = f"sleep {max_duration}"
# Create the app with a new name
formatted_job = deployment_config.format_kubernetes_job(
job_label=REMOTE_RUN_JOB_LABEL,
deadline_seconds=max_duration,
keep_routable_ip=is_toolbox,
)
job_name = _format_remote_run_job_name(formatted_job, user)
formatted_job.metadata.name = job_name
app_wrapper = get_application_wrapper(formatted_job)
app_wrapper.soa_config = deployment_config
# Launch pod
logger.info(f"Starting {job_name}")
try:
app_wrapper.create(kube_client)
except ApiException as e:
if e.status != 409:
raise
if recreate:
remote_run_stop(
service=service,
instance=instance,
cluster=cluster,
user=user,
is_toolbox=is_toolbox,
)
return remote_run_start(
service=service,
instance=instance,
cluster=cluster,
user=user,
interactive=interactive,
recreate=False,
max_duration=max_duration,
is_toolbox=is_toolbox,
)
return {
"status": 200,
"message": "Remote run sandbox started",
"job_name": job_name,
}
|
Trigger remote-run job
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str user: the user requesting the remote-run sandbox
:param bool interactive: whether it is expected to access the remote-run job interactively
:param bool recreate: whether to recreate remote-run job if existing
:param int max_duration: maximum allowed duration for the remote-ruh job
:param bool is_toolbox: requested job is for a toolbox container
:return: outcome of the operation, and resulting Kubernetes pod information
|
remote_run_start
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def remote_run_ready(
service: str,
instance: str,
cluster: str,
job_name: str,
user: str,
is_toolbox: bool,
) -> RemoteRunOutcome:
"""Check if remote-run pod is ready
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str job_name: name of the remote-run job to check
:param bool is_toolbox: requested job is for a toolbox container
:return: job status, with pod info
"""
kube_client = KubeClient()
# Load the service deployment settings
deployment_config = (
generate_toolbox_deployment(service, cluster, user)
if is_toolbox
else load_eks_service_config(service, instance, cluster)
)
namespace = deployment_config.get_namespace()
pod = find_job_pod(kube_client, namespace, job_name)
if not pod:
return {"status": 404, "message": "No pod found"}
if pod.status.phase == "Running":
if pod.metadata.deletion_timestamp:
return {"status": 409, "message": "Pod is terminating"}
result: RemoteRunOutcome = {
"status": 200,
"message": "Pod ready",
"pod_name": pod.metadata.name,
"namespace": namespace,
}
if is_toolbox:
result["pod_address"] = pod.status.pod_ip
return result
return {
"status": 204,
"message": "Pod not ready",
}
|
Check if remote-run pod is ready
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str job_name: name of the remote-run job to check
:param bool is_toolbox: requested job is for a toolbox container
:return: job status, with pod info
|
remote_run_ready
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def remote_run_stop(
service: str,
instance: str,
cluster: str,
user: str,
is_toolbox: bool,
) -> RemoteRunOutcome:
"""Stop remote-run job
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str user: the user requesting the remote-run sandbox
:param bool is_toolbox: requested job is for a toolbox container
:return: outcome of the operation
"""
kube_client = KubeClient()
# Load the service deployment settings
deployment_config = (
generate_toolbox_deployment(service, cluster, user)
if is_toolbox
else load_eks_service_config(service, instance, cluster)
)
# Rebuild the job metadata
formatted_job = deployment_config.format_kubernetes_job(
job_label=REMOTE_RUN_JOB_LABEL
)
job_name = _format_remote_run_job_name(formatted_job, user)
formatted_job.metadata.name = job_name
# Stop the job
logger.info(f"Stopping {job_name}")
app_wrapper = get_application_wrapper(formatted_job)
app_wrapper.soa_config = deployment_config
app_wrapper.deep_delete(kube_client)
return {"status": 200, "message": "Job successfully removed"}
|
Stop remote-run job
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str user: the user requesting the remote-run sandbox
:param bool is_toolbox: requested job is for a toolbox container
:return: outcome of the operation
|
remote_run_stop
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def remote_run_token(
service: str,
instance: str,
cluster: str,
user: str,
) -> str:
"""Creates a short lived token for execing into a pod
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str user: the user requesting the remote-run sandbox
"""
kube_client = KubeClient()
# Load the service deployment settings
deployment_config = load_eks_service_config(service, instance, cluster)
namespace = deployment_config.get_namespace()
# Rebuild the job metadata
formatted_job = deployment_config.format_kubernetes_job(
job_label=REMOTE_RUN_JOB_LABEL
)
job_name = _format_remote_run_job_name(formatted_job, user)
# Find pod and create exec token for it
pod = find_job_pod(kube_client, namespace, job_name)
if not pod:
raise RemoteRunError(f"Pod for {job_name} not found")
pod_name = pod.metadata.name
logger.info(f"Generating temporary service account token for {pod_name}")
service_account = create_remote_run_service_account(
kube_client, namespace, pod_name, user
)
role = create_pod_scoped_role(kube_client, namespace, pod_name, user)
bind_role_to_service_account(kube_client, namespace, service_account, role, user)
return create_temp_exec_token(kube_client, namespace, service_account)
|
Creates a short lived token for execing into a pod
:param str service: service name
:param str instance: service instance
:param str cluster: paasta cluster
:param str user: the user requesting the remote-run sandbox
|
remote_run_token
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def generate_toolbox_deployment(
service: str, cluster: str, user: str
) -> EksDeploymentConfig:
"""Creates virtual EKS deployment for toolbox containers starting from adhoc configuration
:param str service: toolbox name
:param str cluster: target deployment cluster
:param str user: user requesting the toolbox
:return: deployment configuration
"""
if not user.isalnum():
raise RemoteRunError(
f"Provided username contains non-alphanumeric characters: {user}"
)
# NOTE: API authorization is enforced by service, and we want different rules
# for each toolbox, so clients send a combined service-instance string, and then
# we split it here to load the correct instance settings.
adhoc_instance = service[len(TOOLBOX_MOCK_SERVICE) + 1 :]
adhoc_deployment = load_adhoc_job_config(
TOOLBOX_MOCK_SERVICE,
adhoc_instance,
cluster,
load_deployments=False,
)
# NOTE: we're explicitly dynamically mounting a single user's public keys
# as we want these pods to only be usable by said user.
adhoc_deployment.config_dict.setdefault("extra_volumes", []).append(
{
"containerPath": f"/etc/authorized_keys.d/{user}.pub",
"hostPath": f"/etc/authorized_keys.d/{user}.pub",
"mode": "RO",
},
)
adhoc_deployment.config_dict.setdefault("env", {})["SANDBOX_USER"] = user
adhoc_deployment.config_dict["routable_ip"] = True
return EksDeploymentConfig(
service=service,
cluster=cluster,
instance="main",
config_dict=adhoc_deployment.config_dict,
branch_dict=adhoc_deployment.branch_dict,
)
|
Creates virtual EKS deployment for toolbox containers starting from adhoc configuration
:param str service: toolbox name
:param str cluster: target deployment cluster
:param str user: user requesting the toolbox
:return: deployment configuration
|
generate_toolbox_deployment
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def find_job_pod(
kube_client: KubeClient,
namespace: str,
job_name: str,
job_label: str = REMOTE_RUN_JOB_LABEL,
retries: int = 3,
) -> Optional[V1Pod]:
"""Locate pod for remote-run job
:param KubeClient kube_client: Kubernetes client
:param str namespace: the pod namespace
:param str job_name: remote-run job name
:param int retries: maximum number of attemps
:return: pod object if found
"""
selectors = (
f"{paasta_prefixed(JOB_TYPE_LABEL_NAME)}={job_label}",
f"job-name={job_name}",
)
for _ in range(retries):
pod_list = kube_client.core.list_namespaced_pod(
namespace,
label_selector=",".join(selectors),
)
if pod_list.items:
return pod_list.items[0]
sleep(0.5)
return None
|
Locate pod for remote-run job
:param KubeClient kube_client: Kubernetes client
:param str namespace: the pod namespace
:param str job_name: remote-run job name
:param int retries: maximum number of attemps
:return: pod object if found
|
find_job_pod
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def create_temp_exec_token(
kube_client: KubeClient,
namespace: str,
service_account: str,
) -> str:
"""Create a short lived token for service account
:param KubeClient kube_client: Kubernetes client
:param str namespace: service account namespace
:param str service_account: service account name
:return: token value
"""
token_spec = V1TokenRequestSpec(
expiration_seconds=600, # minimum allowed by k8s
audiences=[],
)
request = AuthenticationV1TokenRequest(spec=token_spec)
response = kube_client.core.create_namespaced_service_account_token(
service_account, namespace, request
)
return response.status.token
|
Create a short lived token for service account
:param KubeClient kube_client: Kubernetes client
:param str namespace: service account namespace
:param str service_account: service account name
:return: token value
|
create_temp_exec_token
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def get_remote_run_service_accounts(
kube_client: KubeClient, namespace: str, user: str = ""
) -> Sequence[V1ServiceAccount]:
"""List all temporary service account related to remote-run
:param KubeClient kube_client: Kubernetes client
:param str namespace: pod namespace
:param str user: optionally filter by owning user
:return: list of service accounts
"""
return get_all_service_accounts(
kube_client,
namespace=namespace,
label_selector=(f"{POD_OWNER_LABEL}={user}" if user else POD_OWNER_LABEL),
)
|
List all temporary service account related to remote-run
:param KubeClient kube_client: Kubernetes client
:param str namespace: pod namespace
:param str user: optionally filter by owning user
:return: list of service accounts
|
get_remote_run_service_accounts
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def create_remote_run_service_account(
kube_client: KubeClient,
namespace: str,
pod_name: str,
user: str,
) -> str:
"""Create service account to exec into remote-run pod
:param KubeClient kube_client: Kubernetes client
:param str namespace: pod namespace
:param str pod_name: pod name
:param str user: user requiring credentials
"""
pod_name_hash = hashlib.sha1(pod_name.encode("utf-8")).hexdigest()[:12]
service_account_name = limit_size_with_hash(f"remote-run-{user}-{pod_name_hash}")
service_accounts = get_remote_run_service_accounts(kube_client, namespace, user)
if any(item.metadata.name == service_account_name for item in service_accounts):
return service_account_name
service_account = V1ServiceAccount(
metadata=V1ObjectMeta(
name=service_account_name,
namespace=namespace,
labels={POD_OWNER_LABEL: user},
)
)
kube_client.core.create_namespaced_service_account(
namespace=namespace, body=service_account
)
return service_account_name
|
Create service account to exec into remote-run pod
:param KubeClient kube_client: Kubernetes client
:param str namespace: pod namespace
:param str pod_name: pod name
:param str user: user requiring credentials
|
create_remote_run_service_account
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def create_pod_scoped_role(
kube_client: KubeClient,
namespace: str,
pod_name: str,
user: str,
) -> str:
"""Create role with execution access to specific pod
:param KubeClient kube_client: Kubernetes client
:param str namespace: pod namespace
:param str pod_name: pod name
:param str user: user requiring the role
:return: name of the role
"""
pod_name_hash = hashlib.sha1(pod_name.encode("utf-8")).hexdigest()[:12]
role_name = f"remote-run-role-{pod_name_hash}"
policy = V1PolicyRule(
verbs=["create", "get"],
resources=["pods", "pods/exec"],
resource_names=[pod_name],
api_groups=[""],
)
role = V1Role(
rules=[policy],
metadata=V1ObjectMeta(
name=role_name,
labels={POD_OWNER_LABEL: user},
),
)
try:
kube_client.rbac.create_namespaced_role(namespace=namespace, body=role)
except ApiException as e:
if e.status != 409:
raise
return role_name
|
Create role with execution access to specific pod
:param KubeClient kube_client: Kubernetes client
:param str namespace: pod namespace
:param str pod_name: pod name
:param str user: user requiring the role
:return: name of the role
|
create_pod_scoped_role
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def bind_role_to_service_account(
kube_client: KubeClient,
namespace: str,
service_account: str,
role: str,
user: str,
) -> None:
"""Bind service account to role
:param KubeClient kube_client: Kubernetes client
:param str namespace: service account namespace
:param str service_account: service account name
:param str role: role name
:param str user: user requiring the role
"""
role_binding = V1RoleBinding(
metadata=V1ObjectMeta(
name=limit_size_with_hash(f"remote-run-binding-{role}"),
namespace=namespace,
labels={POD_OWNER_LABEL: user},
),
role_ref=V1RoleRef(
api_group="rbac.authorization.k8s.io",
kind="Role",
name=role,
),
subjects=[
V1Subject(
kind="ServiceAccount",
name=service_account,
),
],
)
try:
kube_client.rbac.create_namespaced_role_binding(
namespace=namespace,
body=role_binding,
)
except ApiException as e:
if e.status != 409:
raise
|
Bind service account to role
:param KubeClient kube_client: Kubernetes client
:param str namespace: service account namespace
:param str service_account: service account name
:param str role: role name
:param str user: user requiring the role
|
bind_role_to_service_account
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def get_remote_run_roles(kube_client: KubeClient, namespace: str) -> List[V1Role]:
"""List all temporary roles related to remote-run
:param KubeClient kube_client: Kubernetes client
:param str namespace: role namespace
:return: list of roles
"""
return kube_client.rbac.list_namespaced_role(
namespace,
label_selector=POD_OWNER_LABEL,
).items
|
List all temporary roles related to remote-run
:param KubeClient kube_client: Kubernetes client
:param str namespace: role namespace
:return: list of roles
|
get_remote_run_roles
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def get_remote_run_role_bindings(
kube_client: KubeClient, namespace: str
) -> List[V1RoleBinding]:
"""List all temporary role bindings related to remote-run
:param KubeClient kube_client: Kubernetes client
:param str namespace: role namespace
:return: list of roles
"""
return kube_client.rbac.list_namespaced_role_binding(
namespace,
label_selector=POD_OWNER_LABEL,
).items
|
List all temporary role bindings related to remote-run
:param KubeClient kube_client: Kubernetes client
:param str namespace: role namespace
:return: list of roles
|
get_remote_run_role_bindings
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def get_remote_run_jobs(kube_client: KubeClient, namespace: str) -> List[V1Job]:
"""List all remote-run jobs
:param KubeClient kube_client: Kubernetes client
:param str namespace: job namespace
"""
return kube_client.batches.list_namespaced_job(
namespace,
label_selector=f"{paasta_prefixed(JOB_TYPE_LABEL_NAME)}={REMOTE_RUN_JOB_LABEL}",
).items
|
List all remote-run jobs
:param KubeClient kube_client: Kubernetes client
:param str namespace: job namespace
|
get_remote_run_jobs
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/remote_run.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/remote_run.py
|
Apache-2.0
|
def __init__(
self,
item: Union[V1Deployment, V1StatefulSet],
logging=logging.getLogger(__name__),
) -> None:
"""
This Application wrapper is an interface for creating/deleting k8s deployments and statefulsets
soa_config is KubernetesDeploymentConfig. It is not loaded in init because it is not always required.
:param item: Kubernetes Object(V1Deployment/V1StatefulSet) that has already been filled up.
:param logging: where logs go
"""
if not item.metadata.namespace:
item.metadata.namespace = "paasta"
attrs = {
attr: item.metadata.labels.get(paasta_prefixed(attr))
for attr in [
"service",
"instance",
"git_sha",
"image_version",
"config_sha",
]
}
replicas = (
item.spec.replicas
if item.metadata.labels.get(paasta_prefixed("autoscaled"), "false")
== "false"
else None
)
self.kube_deployment = KubeDeployment(
replicas=replicas, namespace=item.metadata.namespace, **attrs
)
self.item = item
self.soa_config = None # type: KubernetesDeploymentConfig
self.logging = logging
|
This Application wrapper is an interface for creating/deleting k8s deployments and statefulsets
soa_config is KubernetesDeploymentConfig. It is not loaded in init because it is not always required.
:param item: Kubernetes Object(V1Deployment/V1StatefulSet) that has already been filled up.
:param logging: where logs go
|
__init__
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/application/controller_wrappers.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/application/controller_wrappers.py
|
Apache-2.0
|
def ensure_service_account(self, kube_client: KubeClient) -> None:
"""
Ensure that the service account for this application exists
:param kube_client:
"""
if self.soa_config.get_iam_role():
ensure_service_account(
iam_role=self.soa_config.get_iam_role(),
namespace=self.soa_config.get_namespace(),
kube_client=kube_client,
)
|
Ensure that the service account for this application exists
:param kube_client:
|
ensure_service_account
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/application/controller_wrappers.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/application/controller_wrappers.py
|
Apache-2.0
|
def deep_delete(
self, kube_client: KubeClient, propagation_policy="Foreground"
) -> None:
"""
Remove all controllers, pods, and pod disruption budgets related to this application
:param kube_client:
"""
delete_options = V1DeleteOptions(propagation_policy=propagation_policy)
try:
kube_client.deployments.delete_namespaced_deployment(
self.item.metadata.name,
self.item.metadata.namespace,
body=delete_options,
)
except ApiException as e:
if e.status == 404:
# Deployment does not exist, nothing to delete but
# we can consider this a success.
self.logging.debug(
"not deleting nonexistent deploy/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
else:
raise
else:
self.logging.info(
"deleted deploy/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
self.delete_pod_disruption_budget(kube_client)
self.delete_horizontal_pod_autoscaler(kube_client)
|
Remove all controllers, pods, and pod disruption budgets related to this application
:param kube_client:
|
deep_delete
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/application/controller_wrappers.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/application/controller_wrappers.py
|
Apache-2.0
|
def sync_horizontal_pod_autoscaler(self, kube_client: KubeClient) -> None:
"""
In order for autoscaling to work, there needs to be at least two configurations
min_instnace, max_instance, and there cannot be instance.
"""
desired_hpa_spec = self.soa_config.get_autoscaling_metric_spec(
name=self.item.metadata.name,
cluster=self.soa_config.cluster,
kube_client=kube_client,
namespace=self.item.metadata.namespace,
min_instances_override=(
self.hpa_override["min_instances"] if self.hpa_override else None
),
)
hpa_exists = self.exists_hpa(kube_client)
should_have_hpa = desired_hpa_spec and not autoscaling_is_paused()
if not should_have_hpa:
self.logging.info(
f"No HPA required for {self.item.metadata.name}/name in {self.item.metadata.namespace}"
)
if hpa_exists:
self.logging.info(
f"Deleting HPA for {self.item.metadata.name}/name in {self.item.metadata.namespace}"
)
self.delete_horizontal_pod_autoscaler(kube_client)
return
self.logging.info(
f"Syncing HPA setting for {self.item.metadata.name}/name in {self.item.metadata.namespace}"
)
self.logging.debug(desired_hpa_spec)
if not hpa_exists:
self.logging.info(
f"Creating new HPA for {self.item.metadata.name}/name in {self.item.metadata.namespace}"
)
kube_client.autoscaling.create_namespaced_horizontal_pod_autoscaler(
namespace=self.item.metadata.namespace,
body=desired_hpa_spec,
pretty=True,
)
else:
self.logging.info(
f"Updating new HPA for {self.item.metadata.name}/name in {self.item.metadata.namespace}/namespace"
)
kube_client.autoscaling.replace_namespaced_horizontal_pod_autoscaler(
name=self.item.metadata.name,
namespace=self.item.metadata.namespace,
body=desired_hpa_spec,
pretty=True,
)
|
In order for autoscaling to work, there needs to be at least two configurations
min_instnace, max_instance, and there cannot be instance.
|
sync_horizontal_pod_autoscaler
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/application/controller_wrappers.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/application/controller_wrappers.py
|
Apache-2.0
|
def deep_delete(self, kube_client: KubeClient) -> None:
"""Remove resources related to the job"""
delete_options = V1DeleteOptions(propagation_policy="Foreground")
try:
kube_client.batches.delete_namespaced_job(
self.item.metadata.name,
self.item.metadata.namespace,
body=delete_options,
)
except ApiException as e:
if e.status == 404:
# Job does not exist, nothing to delete but
# we can consider this a success.
self.logging.debug(
"not deleting nonexistent job/{} from namespace/{}".format(
self.item.metadata.name,
self.item.metadata.namespace,
)
)
else:
raise
else:
self.logging.info(
"deleted job/{} from namespace/{}".format(
self.item.metadata.name,
self.item.metadata.namespace,
)
)
|
Remove resources related to the job
|
deep_delete
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/application/controller_wrappers.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/application/controller_wrappers.py
|
Apache-2.0
|
def list_all_applications(
kube_client: KubeClient, application_types: Sequence[Any]
) -> Dict[Tuple[str, str], List[Application]]:
"""
List all applications in the cluster of the types from application_types.
Only applications with complete set of labels are included (See is_valid_application()).
:param kube_client:
:param application_types: types of applications
:return: A mapping from (service, instance) to application
"""
apps: Dict[Tuple[str, str], List[Application]] = {}
for application_type in application_types:
if application_type == V1Deployment:
apps = {**apps, **list_paasta_managed_deployments(kube_client)}
elif application_type == V1StatefulSet:
apps.update(list_paasta_managed_stateful_sets(kube_client))
return apps
|
List all applications in the cluster of the types from application_types.
Only applications with complete set of labels are included (See is_valid_application()).
:param kube_client:
:param application_types: types of applications
:return: A mapping from (service, instance) to application
|
list_all_applications
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/application/tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/application/tools.py
|
Apache-2.0
|
def set_temporary_environment_variables(
environ: Mapping[str, str]
) -> Generator[None, None, None]:
"""
*Note the return value means "yields None, takes None, and when finished, returns None"*
Modifies the os.environ variable then yields this temporary state. Resets it when finished.
:param environ: Environment variables to set
"""
old_environ = dict(os.environ) # ensure we're storing a copy
os.environ.update(environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
|
*Note the return value means "yields None, takes None, and when finished, returns None"*
Modifies the os.environ variable then yields this temporary state. Resets it when finished.
:param environ: Environment variables to set
|
set_temporary_environment_variables
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/bin/paasta_secrets_sync.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/bin/paasta_secrets_sync.py
|
Apache-2.0
|
def get_services_to_k8s_namespaces_to_allowlist(
service_list: List[str], cluster: str, soa_dir: str, kube_client: KubeClient
) -> Dict[
str, # service
Dict[
str, # namespace
Optional[Set[str]], # allowlist of secret names, None means allow all.
],
]:
"""
Generate a mapping of service -> namespace -> allowlist of secrets, e.g.
{
"yelp-main": {
"paasta": {"secret1", "secret2"},
"paastasvc-yelp-main": {"secret1", "secret3"},
"paasta-flinks": None,
},
"_shared": {
"paasta": {"sharedsecret1"},
"paastasvc-yelp-main": {"sharedsecret1", "sharedsecret2"},
"paasta-flinks": None,
}
}
This mapping is used by sync_all_secrets / sync_secrets:
sync_secrets will only sync secrets into a namespace if the allowlist is None or contains that secret's name.
"""
services_to_k8s_namespaces_to_allowlist: Dict[
str, Dict[str, Optional[Set[str]]]
] = defaultdict(dict)
for service in service_list:
if service == "_shared":
# _shared is handled specially for each service.
continue
config_loader = PaastaServiceConfigLoader(service, soa_dir)
for instance_type_class in K8S_INSTANCE_TYPE_CLASSES:
for service_instance_config in config_loader.instance_configs(
cluster=cluster, instance_type_class=instance_type_class
):
secrets_used, shared_secrets_used = get_secrets_used_by_instance(
service_instance_config
)
allowlist = services_to_k8s_namespaces_to_allowlist[service].setdefault(
service_instance_config.get_namespace(),
set(),
)
if allowlist is not None:
allowlist.update(secrets_used)
if "_shared" in service_list:
shared_allowlist = services_to_k8s_namespaces_to_allowlist[
"_shared"
].setdefault(
service_instance_config.get_namespace(),
set(),
)
if shared_allowlist is not None:
shared_allowlist.update(shared_secrets_used)
for instance_type in INSTANCE_TYPES:
if instance_type in PAASTA_K8S_INSTANCE_TYPES:
continue # handled above.
instances = get_service_instance_list(
service=service,
instance_type=instance_type,
cluster=cluster,
soa_dir=soa_dir,
)
if instances:
# Currently, all instance types besides kubernetes use one big namespace, defined in
# INSTANCE_TYPE_TO_K8S_NAMESPACE. Sync all shared secrets and all secrets belonging to any service
# which uses that instance type.
services_to_k8s_namespaces_to_allowlist[service][
INSTANCE_TYPE_TO_K8S_NAMESPACE[instance_type]
] = None
if "_shared" in service_list:
services_to_k8s_namespaces_to_allowlist["_shared"][
INSTANCE_TYPE_TO_K8S_NAMESPACE[instance_type]
] = None
return dict(services_to_k8s_namespaces_to_allowlist)
|
Generate a mapping of service -> namespace -> allowlist of secrets, e.g.
{
"yelp-main": {
"paasta": {"secret1", "secret2"},
"paastasvc-yelp-main": {"secret1", "secret3"},
"paasta-flinks": None,
},
"_shared": {
"paasta": {"sharedsecret1"},
"paastasvc-yelp-main": {"sharedsecret1", "sharedsecret2"},
"paasta-flinks": None,
}
}
This mapping is used by sync_all_secrets / sync_secrets:
sync_secrets will only sync secrets into a namespace if the allowlist is None or contains that secret's name.
|
get_services_to_k8s_namespaces_to_allowlist
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/bin/paasta_secrets_sync.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/bin/paasta_secrets_sync.py
|
Apache-2.0
|
def sync_datastore_credentials(
kube_client: KubeClient,
cluster: str,
service: str,
secret_provider_name: str,
vault_cluster_config: Dict[str, str],
soa_dir: str,
vault_token_file: str,
overwrite_namespace: Optional[str] = None,
) -> bool:
"""
Map all the passwords requested for this service-instance to a single Kubernetes Secret store.
Volume mounts will then map the associated secrets to their associated mount paths.
"""
config_loader = PaastaServiceConfigLoader(service=service, soa_dir=soa_dir)
system_paasta_config = load_system_paasta_config()
datastore_credentials_vault_overrides = (
system_paasta_config.get_datastore_credentials_vault_overrides()
)
for instance_type_class in K8S_INSTANCE_TYPE_CLASSES:
for instance_config in config_loader.instance_configs(
cluster=cluster, instance_type_class=instance_type_class
):
namespace = (
overwrite_namespace
if overwrite_namespace is not None
else instance_config.get_namespace()
)
datastore_credentials = instance_config.get_datastore_credentials()
with set_temporary_environment_variables(
datastore_credentials_vault_overrides
):
# expects VAULT_ADDR_OVERRIDE, VAULT_CA_OVERRIDE, and VAULT_TOKEN_OVERRIDE to be set
# in order to use a custom vault shard. overriden temporarily in this context
provider = get_secret_provider(
secret_provider_name=secret_provider_name,
soa_dir=soa_dir,
service_name=service,
cluster_names=[cluster],
# overridden by env variables but still needed here for spec validation
secret_provider_kwargs={
"vault_cluster_config": vault_cluster_config,
"vault_auth_method": "token",
"vault_token_file": vault_token_file,
},
)
secret_data = {}
for datastore, credentials in datastore_credentials.items():
# mypy loses type hints on '.items' and throws false positives. unfortunately have to type: ignore
# https://github.com/python/mypy/issues/7178
for credential in credentials: # type: ignore
vault_path = f"secrets/datastore/{datastore}/{credential}"
secrets = provider.get_data_from_vault_path(vault_path)
if not secrets:
# no secrets found at this path. skip syncing
log.debug(
f"Warning: no secrets found at requested path {vault_path}."
)
continue
# decrypt and save in secret_data
vault_key_path = get_vault_key_secret_name(vault_path)
# kubernetes expects data to be base64 encoded binary in utf-8 when put into secret maps
# may look like:
# {'master': {'passwd': '****', 'user': 'v-approle-mysql-serv-nVcYexH95A2'}, 'reporting': {'passwd': '****', 'user': 'v-approle-mysql-serv-GgCpRIh9Ut7'}, 'slave': {'passwd': '****', 'user': 'v-approle-mysql-serv-PzjPwqNMbqu'}
secret_data[vault_key_path] = base64.b64encode(
json.dumps(secrets).encode("utf-8")
).decode("utf-8")
create_or_update_k8s_secret(
service=service,
signature_name=instance_config.get_datastore_credentials_signature_name(),
secret_name=instance_config.get_datastore_credentials_secret_name(),
get_secret_data=(lambda: secret_data),
secret_signature=_get_dict_signature(secret_data),
kube_client=kube_client,
namespace=namespace,
)
return True
|
Map all the passwords requested for this service-instance to a single Kubernetes Secret store.
Volume mounts will then map the associated secrets to their associated mount paths.
|
sync_datastore_credentials
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/bin/paasta_secrets_sync.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/bin/paasta_secrets_sync.py
|
Apache-2.0
|
def sync_crypto_secrets(
kube_client: KubeClient,
cluster: str,
service: str,
secret_provider_name: str,
vault_cluster_config: Dict[str, str],
soa_dir: str,
vault_token_file: str,
) -> bool:
"""
For each key-name in `crypto_key`,
1. Fetch all versions of the key-name from Vault superregion mapped from cluster, e.g. `kubestage` maps to `devc` Vault server.
2. Create K8s secret from JSON blob containing all key versions.
3. Create signatures as K8s configmap based on JSON blob hash.
So each replica of a service instance gets the same key, thereby reducing requests to Vault API as we only talk to vault during secret syncing
"""
config_loader = PaastaServiceConfigLoader(service=service, soa_dir=soa_dir)
for instance_type_class in K8S_INSTANCE_TYPE_CLASSES:
for instance_config in config_loader.instance_configs(
cluster=cluster, instance_type_class=instance_type_class
):
crypto_keys = instance_config.get_crypto_keys_from_config()
if not crypto_keys:
continue
secret_data = {}
provider = get_secret_provider(
secret_provider_name=secret_provider_name,
soa_dir=soa_dir,
service_name=service,
cluster_names=[cluster],
secret_provider_kwargs={
"vault_cluster_config": vault_cluster_config,
"vault_auth_method": "token",
"vault_token_file": vault_token_file,
},
)
for key in crypto_keys:
key_versions = provider.get_key_versions(key)
if not key_versions:
log.error(
f"No key versions found for {key} on {instance_config.get_sanitised_deployment_name()}"
)
continue
secret_data[get_vault_key_secret_name(key)] = base64.b64encode(
json.dumps(key_versions).encode("utf-8")
).decode("utf-8")
if not secret_data:
continue
create_or_update_k8s_secret(
service=service,
signature_name=instance_config.get_crypto_secret_signature_name(),
# the secret name here must match the secret name given in the secret volume config,
# i.e. `kubernetes.client.V1SecretVolumeSource`'s `secret_name` must match below
secret_name=instance_config.get_crypto_secret_name(),
get_secret_data=(lambda: secret_data),
secret_signature=_get_dict_signature(secret_data),
kube_client=kube_client,
namespace=instance_config.get_namespace(),
)
return True
|
For each key-name in `crypto_key`,
1. Fetch all versions of the key-name from Vault superregion mapped from cluster, e.g. `kubestage` maps to `devc` Vault server.
2. Create K8s secret from JSON blob containing all key versions.
3. Create signatures as K8s configmap based on JSON blob hash.
So each replica of a service instance gets the same key, thereby reducing requests to Vault API as we only talk to vault during secret syncing
|
sync_crypto_secrets
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/bin/paasta_secrets_sync.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/bin/paasta_secrets_sync.py
|
Apache-2.0
|
def create_or_update_k8s_secret(
service: str,
secret_name: str,
signature_name: str,
get_secret_data: Callable[[], Dict[str, str]],
secret_signature: str,
kube_client: KubeClient,
namespace: str,
) -> None:
"""
:param get_secret_data: is a function to postpone fetching data in order to reduce service load, e.g. Vault API
"""
# In order to prevent slamming the k8s API, add some artificial delay here
delay = load_system_paasta_config().get_secret_sync_delay_seconds()
if delay:
time.sleep(delay)
kubernetes_signature = get_secret_signature(
kube_client=kube_client,
signature_name=signature_name,
namespace=namespace,
)
if not kubernetes_signature:
log.info(f"{secret_name} for {service} in {namespace} not found, creating")
try:
create_secret(
kube_client=kube_client,
service_name=service,
secret_name=secret_name,
secret_data=get_secret_data(),
namespace=namespace,
)
except ApiException as e:
if e.status == 409:
log.warning(
f"Secret {secret_name} for {service} already exists in {namespace} but no signature found. Updating secret and signature."
)
update_secret(
kube_client=kube_client,
secret_name=secret_name,
secret_data=get_secret_data(),
service_name=service,
namespace=namespace,
)
else:
raise
create_secret_signature(
kube_client=kube_client,
service_name=service,
signature_name=signature_name,
secret_signature=secret_signature,
namespace=namespace,
)
elif secret_signature != kubernetes_signature:
log.info(
f"{secret_name} for {service} in {namespace} needs updating as signature changed"
)
update_secret(
kube_client=kube_client,
secret_name=secret_name,
secret_data=get_secret_data(),
service_name=service,
namespace=namespace,
)
update_secret_signature(
kube_client=kube_client,
service_name=service,
signature_name=signature_name,
secret_signature=secret_signature,
namespace=namespace,
)
else:
log.info(f"{secret_name} for {service} in {namespace} up to date")
|
:param get_secret_data: is a function to postpone fetching data in order to reduce service load, e.g. Vault API
|
create_or_update_k8s_secret
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes/bin/paasta_secrets_sync.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes/bin/paasta_secrets_sync.py
|
Apache-2.0
|
def resolve(self, cfg):
"""Resolve the URL to the mesos master.
The value of cfg should be one of:
- host:port
- zk://host1:port1,host2:port2/path
- zk://username:password@host1:port1/path
- file:///path/to/file (where file contains one of the above)
"""
if cfg.startswith("zk:"):
return self._zookeeper_resolver(cfg)
elif cfg.startswith("file:"):
return self._file_resolver(cfg)
else:
return cfg
|
Resolve the URL to the mesos master.
The value of cfg should be one of:
- host:port
- zk://host1:port1,host2:port2/path
- zk://username:password@host1:port1/path
- file:///path/to/file (where file contains one of the above)
|
resolve
|
python
|
Yelp/paasta
|
paasta_tools/mesos/master.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos/master.py
|
Apache-2.0
|
def stream(fn, elements, workers):
"""Yield the results of fn as jobs complete."""
jobs = []
with execute(workers) as executor:
for elem in elements:
jobs.append(executor.submit(fn, elem))
for job in concurrent.futures.as_completed(jobs):
try:
yield job.result()
except exceptions.SkipResult:
pass
|
Yield the results of fn as jobs complete.
|
stream
|
python
|
Yelp/paasta
|
paasta_tools/mesos/parallel.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos/parallel.py
|
Apache-2.0
|
def get_num_masters() -> int:
"""Gets the number of masters from mesos state"""
zookeeper_host_path = get_zookeeper_host_path()
return get_number_of_mesos_masters(
zookeeper_host_path.host, zookeeper_host_path.path
)
|
Gets the number of masters from mesos state
|
get_num_masters
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def get_mesos_cpu_status(
metrics: MesosMetrics, mesos_state: MesosState
) -> Tuple[int, int, int]:
"""Takes in the mesos metrics and analyzes them, returning the status.
:param metrics: mesos metrics dictionary.
:param mesos_state: mesos state dictionary.
:returns: Tuple of total, used, and available CPUs.
"""
total = metrics["master/cpus_total"]
used = metrics["master/cpus_used"]
for slave in mesos_state["slaves"]:
used += reserved_maintenence_resources(slave["reserved_resources"])["cpus"]
available = total - used
return total, used, available
|
Takes in the mesos metrics and analyzes them, returning the status.
:param metrics: mesos metrics dictionary.
:param mesos_state: mesos state dictionary.
:returns: Tuple of total, used, and available CPUs.
|
get_mesos_cpu_status
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def get_kube_cpu_status(
nodes: Sequence[V1Node],
) -> Tuple[float, float, float]:
"""Takes in the list of Kubernetes nodes and analyzes them, returning the status.
:param nodes: list of Kubernetes nodes.
:returns: Tuple of total, used, and available CPUs.
"""
total = 0.0
available = 0.0
for node in nodes:
available += suffixed_number_value(node.status.allocatable["cpu"])
total += suffixed_number_value(node.status.capacity["cpu"])
used = total - available
return total, used, available
|
Takes in the list of Kubernetes nodes and analyzes them, returning the status.
:param nodes: list of Kubernetes nodes.
:returns: Tuple of total, used, and available CPUs.
|
get_kube_cpu_status
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def get_mesos_memory_status(
metrics: MesosMetrics, mesos_state: MesosState
) -> Tuple[int, int, int]:
"""Takes in the mesos metrics and analyzes them, returning the status.
:param metrics: mesos metrics dictionary.
:param mesos_state: mesos state dictionary.
:returns: Tuple of total, used, and available memory in Mi.
"""
total = metrics["master/mem_total"]
used = metrics["master/mem_used"]
for slave in mesos_state["slaves"]:
used += reserved_maintenence_resources(slave["reserved_resources"])["mem"]
available = total - used
return total, used, available
|
Takes in the mesos metrics and analyzes them, returning the status.
:param metrics: mesos metrics dictionary.
:param mesos_state: mesos state dictionary.
:returns: Tuple of total, used, and available memory in Mi.
|
get_mesos_memory_status
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def get_kube_memory_status(
nodes: Sequence[V1Node],
) -> Tuple[float, float, float]:
"""Takes in the list of Kubernetes nodes and analyzes them, returning the status.
:param nodes: list of Kubernetes nodes.
:returns: Tuple of total, used, and available memory in Mi.
"""
total = 0.0
available = 0.0
for node in nodes:
available += suffixed_number_value(node.status.allocatable["memory"])
total += suffixed_number_value(node.status.capacity["memory"])
total //= 1024 * 1024
available //= 1024 * 1024
used = total - available
return total, used, available
|
Takes in the list of Kubernetes nodes and analyzes them, returning the status.
:param nodes: list of Kubernetes nodes.
:returns: Tuple of total, used, and available memory in Mi.
|
get_kube_memory_status
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def get_mesos_disk_status(
metrics: MesosMetrics, mesos_state: MesosState
) -> Tuple[int, int, int]:
"""Takes in the mesos metrics and analyzes them, returning the status.
:param metrics: mesos metrics dictionary.
:param mesos_state: mesos state dictionary.
:returns: Tuple of total, used, and available disk space in Mi.
"""
total = metrics["master/disk_total"]
used = metrics["master/disk_used"]
for slave in mesos_state["slaves"]:
used += reserved_maintenence_resources(slave["reserved_resources"])["disk"]
available = total - used
return total, used, available
|
Takes in the mesos metrics and analyzes them, returning the status.
:param metrics: mesos metrics dictionary.
:param mesos_state: mesos state dictionary.
:returns: Tuple of total, used, and available disk space in Mi.
|
get_mesos_disk_status
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def get_kube_disk_status(
nodes: Sequence[V1Node],
) -> Tuple[float, float, float]:
"""Takes in the list of Kubernetes nodes and analyzes them, returning the status.
:param nodes: list of Kubernetes nodes.
:returns: Tuple of total, used, and available disk space in Mi.
"""
total = 0.0
available = 0.0
for node in nodes:
available += suffixed_number_value(node.status.allocatable["ephemeral-storage"])
total += suffixed_number_value(node.status.capacity["ephemeral-storage"])
total //= 1024 * 1024
available //= 1024 * 1024
used = total - available
return total, used, available
|
Takes in the list of Kubernetes nodes and analyzes them, returning the status.
:param nodes: list of Kubernetes nodes.
:returns: Tuple of total, used, and available disk space in Mi.
|
get_kube_disk_status
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def get_mesos_gpu_status(
metrics: MesosMetrics, mesos_state: MesosState
) -> Tuple[int, int, int]:
"""Takes in the mesos metrics and analyzes them, returning gpus status.
:param metrics: mesos metrics dictionary.
:param mesos_state: mesos state dictionary.
:returns: Tuple of total, used, and available GPUs.
"""
total = metrics["master/gpus_total"]
used = metrics["master/gpus_used"]
for slave in mesos_state["slaves"]:
used += reserved_maintenence_resources(slave["reserved_resources"])["gpus"]
available = total - used
return total, used, available
|
Takes in the mesos metrics and analyzes them, returning gpus status.
:param metrics: mesos metrics dictionary.
:param mesos_state: mesos state dictionary.
:returns: Tuple of total, used, and available GPUs.
|
get_mesos_gpu_status
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def get_kube_gpu_status(
nodes: Sequence[V1Node],
) -> Tuple[float, float, float]:
"""Takes in the list of Kubernetes nodes and analyzes them, returning the status.
:param nodes: list of Kubernetes nodes.
:returns: Tuple of total, used, and available GPUs.
"""
total = 0.0
available = 0.0
for node in nodes:
available += suffixed_number_value(
node.status.allocatable.get("nvidia.com/gpu", "0")
)
total += suffixed_number_value(node.status.capacity.get("nvidia.com/gpu", "0"))
used = total - available
return total, used, available
|
Takes in the list of Kubernetes nodes and analyzes them, returning the status.
:param nodes: list of Kubernetes nodes.
:returns: Tuple of total, used, and available GPUs.
|
get_kube_gpu_status
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def healthcheck_result_for_resource_utilization(
resource_utilization: ResourceUtilization, threshold: int
) -> HealthCheckResult:
"""Given a resource data dict, assert that cpu
data is ok.
:param resource_utilization: the resource_utilization tuple to check
:returns: a HealthCheckResult
"""
try:
utilization = percent_used(
resource_utilization.total,
resource_utilization.total - resource_utilization.free,
)
except ZeroDivisionError:
utilization = 0
message = "{}: {:.2f}/{:.2f}({:.2f}%) used. Threshold ({:.2f}%)".format(
resource_utilization.metric,
float(resource_utilization.total - resource_utilization.free),
resource_utilization.total,
utilization,
threshold,
)
healthy = utilization <= threshold
return HealthCheckResult(message=message, healthy=healthy)
|
Given a resource data dict, assert that cpu
data is ok.
:param resource_utilization: the resource_utilization tuple to check
:returns: a HealthCheckResult
|
healthcheck_result_for_resource_utilization
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def key_func_for_attribute(
attribute: str,
) -> Callable[[_SlaveT], str]:
"""Return a closure that given a slave, will return the value of a specific
attribute.
:param attribute: the attribute to inspect in the slave
:returns: a closure, which takes a slave and returns the value of an attribute
"""
def key_func(slave):
return slave["attributes"].get(attribute, "unknown")
return key_func
|
Return a closure that given a slave, will return the value of a specific
attribute.
:param attribute: the attribute to inspect in the slave
:returns: a closure, which takes a slave and returns the value of an attribute
|
key_func_for_attribute
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def key_func_for_attribute_multi(
attributes: Sequence[str],
) -> _GenericNodeGroupingFunctionT:
"""Return a closure that given a slave, will return the value of a list of
attributes, compiled into a hashable tuple
:param attributes: the attributes to inspect in the slave
:returns: a closure, which takes a slave and returns the value of those attributes
"""
def get_attribute(slave, attribute):
if attribute == "hostname":
return slave["hostname"]
else:
return slave["attributes"].get(attribute, "unknown")
def key_func(slave):
return tuple((a, get_attribute(slave, a)) for a in attributes)
return key_func
|
Return a closure that given a slave, will return the value of a list of
attributes, compiled into a hashable tuple
:param attributes: the attributes to inspect in the slave
:returns: a closure, which takes a slave and returns the value of those attributes
|
key_func_for_attribute_multi
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def key_func_for_attribute_multi_kube(
attributes: Sequence[str],
) -> Callable[[V1Node], _KeyFuncRetT]:
"""Return a closure that given a node, will return the value of a list of
attributes, compiled into a hashable tuple
:param attributes: the attributes to inspect in the slave
:returns: a closure, which takes a node and returns the value of those attributes
"""
def get_attribute(node, attribute):
return node.metadata.labels.get(paasta_prefixed(attribute), "unknown")
def key_func(node):
return tuple((a, get_attribute(node, a)) for a in attributes)
return key_func
|
Return a closure that given a node, will return the value of a list of
attributes, compiled into a hashable tuple
:param attributes: the attributes to inspect in the slave
:returns: a closure, which takes a node and returns the value of those attributes
|
key_func_for_attribute_multi_kube
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def group_slaves_by_key_func(
key_func: _GenericNodeGroupingFunctionT,
slaves: Sequence[_GenericNodeT],
sort_func: _GenericNodeSortFunctionT = None,
) -> Mapping[_KeyFuncRetT, Sequence[_GenericNodeT]]:
"""Given a function for grouping slaves, return a
dict where keys are the unique values returned by
the key_func and the values are all those slaves which
have that specific value.
:param key_func: a function which consumes a slave and returns a value
:param slaves: a list of slaves
:returns: a dict of key: [slaves]
"""
sorted_slaves: Sequence[_GenericNodeT]
if sort_func is None:
sorted_slaves = sorted(slaves, key=key_func)
else:
sorted_slaves = sort_func(slaves)
return {k: list(v) for k, v in itertools.groupby(sorted_slaves, key=key_func)}
|
Given a function for grouping slaves, return a
dict where keys are the unique values returned by
the key_func and the values are all those slaves which
have that specific value.
:param key_func: a function which consumes a slave and returns a value
:param slaves: a list of slaves
:returns: a dict of key: [slaves]
|
group_slaves_by_key_func
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def calculate_resource_utilization_for_slaves(
slaves: Sequence[_SlaveT], tasks: Sequence[MesosTask]
) -> ResourceUtilizationDict:
"""Given a list of slaves and a list of tasks, calculate the total available
resource available in that list of slaves, and the resources consumed by tasks
running on those slaves.
:param slaves: a list of slaves to calculate resource usage for
:param tasks: the list of tasks running in the mesos cluster
:returns: a dict, containing keys for "free" and "total" resources. Each of these keys
is a ResourceInfo tuple, exposing a number for cpu, disk and mem.
"""
resource_total_dict: _Counter[str] = Counter()
for slave in slaves:
filtered_resources = filter_mesos_state_metrics(slave["resources"])
resource_total_dict.update(Counter(filtered_resources))
resource_free_dict = copy.deepcopy(resource_total_dict)
for task in tasks:
task_resources = task["resources"]
resource_free_dict.subtract(Counter(filter_mesos_state_metrics(task_resources)))
for slave in slaves:
filtered_resources = filter_mesos_state_metrics(
reserved_maintenence_resources(slave["reserved_resources"])
)
resource_free_dict.subtract(Counter(filtered_resources))
return {
"free": ResourceInfo(
cpus=resource_free_dict["cpus"],
disk=resource_free_dict["disk"],
mem=resource_free_dict["mem"],
gpus=resource_free_dict.get("gpus", 0),
),
"total": ResourceInfo(
cpus=resource_total_dict["cpus"],
disk=resource_total_dict["disk"],
mem=resource_total_dict["mem"],
gpus=resource_total_dict.get("gpus", 0),
),
"slave_count": len(slaves),
}
|
Given a list of slaves and a list of tasks, calculate the total available
resource available in that list of slaves, and the resources consumed by tasks
running on those slaves.
:param slaves: a list of slaves to calculate resource usage for
:param tasks: the list of tasks running in the mesos cluster
:returns: a dict, containing keys for "free" and "total" resources. Each of these keys
is a ResourceInfo tuple, exposing a number for cpu, disk and mem.
|
calculate_resource_utilization_for_slaves
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def calculate_resource_utilization_for_kube_nodes(
nodes: Sequence[V1Node],
pods_by_node: Mapping[str, Sequence[V1Pod]],
) -> ResourceUtilizationDict:
"""Given a list of Kubernetes nodes, calculate the total available
resource available and the resources consumed in that list of nodes.
:param nodes: a list of Kubernetes nodes to calculate resource usage for
:returns: a dict, containing keys for "free" and "total" resources. Each of these keys
is a ResourceInfo tuple, exposing a number for cpu, disk and mem.
"""
resource_total_dict: _Counter[str] = Counter()
resource_free_dict: _Counter[str] = Counter()
for node in nodes:
allocatable_resources = suffixed_number_dict_values(
filter_kube_resources(node.status.allocatable)
)
resource_total_dict.update(Counter(allocatable_resources))
allocated_resources = allocated_node_resources(pods_by_node[node.metadata.name])
resource_free_dict.update(
Counter(
{
"cpu": allocatable_resources["cpu"] - allocated_resources["cpu"],
"ephemeral-storage": allocatable_resources["ephemeral-storage"]
- allocated_resources["ephemeral-storage"],
"memory": allocatable_resources["memory"]
- allocated_resources["memory"],
}
)
)
return {
"free": ResourceInfo(
cpus=resource_free_dict["cpu"],
disk=resource_free_dict["ephemeral-storage"] / (1024**2),
mem=resource_free_dict["memory"] / (1024**2),
gpus=resource_free_dict.get("nvidia.com/gpu", 0),
),
"total": ResourceInfo(
cpus=resource_total_dict["cpu"],
disk=resource_total_dict["ephemeral-storage"] / (1024**2),
mem=resource_total_dict["memory"] / (1024**2),
gpus=resource_total_dict.get("nvidia.com/gpu", 0),
),
"slave_count": len(nodes),
}
|
Given a list of Kubernetes nodes, calculate the total available
resource available and the resources consumed in that list of nodes.
:param nodes: a list of Kubernetes nodes to calculate resource usage for
:returns: a dict, containing keys for "free" and "total" resources. Each of these keys
is a ResourceInfo tuple, exposing a number for cpu, disk and mem.
|
calculate_resource_utilization_for_kube_nodes
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def filter_tasks_for_slaves(
slaves: Sequence[_SlaveT], tasks: Sequence[MesosTask]
) -> Sequence[MesosTask]:
"""Given a list of slaves and a list of tasks, return a filtered
list of tasks, where those returned belong to slaves in the list of
slaves
:param slaves: the list of slaves which the tasks provided should be
running on.
:param tasks: the tasks to filter :returns: a list of tasks,
identical to that provided by the tasks param, but with only those where
the task is running on one of the provided slaves included.
"""
slave_ids = [slave["id"] for slave in slaves]
return [task for task in tasks if task["slave_id"] in slave_ids]
|
Given a list of slaves and a list of tasks, return a filtered
list of tasks, where those returned belong to slaves in the list of
slaves
:param slaves: the list of slaves which the tasks provided should be
running on.
:param tasks: the tasks to filter :returns: a list of tasks,
identical to that provided by the tasks param, but with only those where
the task is running on one of the provided slaves included.
|
filter_tasks_for_slaves
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def filter_slaves(
slaves: Sequence[_GenericNodeT], filters: Sequence[_GenericNodeFilterFunctionT]
) -> Sequence[_GenericNodeT]:
"""Filter slaves by attributes
:param slaves: list of slaves to filter
:param filters: list of functions that take a slave and return whether the
slave should be included
:returns: list of slaves that return true for all the filters
"""
if filters is None:
return slaves
return [s for s in slaves if all([f(s) for f in filters])]
|
Filter slaves by attributes
:param slaves: list of slaves to filter
:param filters: list of functions that take a slave and return whether the
slave should be included
:returns: list of slaves that return true for all the filters
|
filter_slaves
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def get_resource_utilization_by_grouping(
grouping_func: _GenericNodeGroupingFunctionT,
mesos_state: MesosState,
filters: Sequence[_GenericNodeFilterFunctionT] = [],
sort_func: _GenericNodeSortFunctionT = None,
) -> Mapping[_KeyFuncRetT, ResourceUtilizationDict]:
"""Given a function used to group slaves and mesos state, calculate
resource utilization for each value of a given attribute.
:grouping_func: a function that given a slave, will return the value of an
attribute to group by.
:param mesos_state: the mesos state
:param filters: filters to apply to the slaves in the calculation, with
filtering preformed by filter_slaves
:param sort_func: a function that given a list of slaves, will return the
sorted list of slaves.
:returns: a dict of {attribute_value: resource_usage}, where resource usage
is the dict returned by ``calculate_resource_utilization_for_slaves`` for
slaves grouped by attribute value.
"""
slaves: Sequence[_SlaveT] = mesos_state.get("slaves", [])
slaves = filter_slaves(slaves, filters)
if not has_registered_slaves(mesos_state):
raise ValueError("There are no slaves registered in the mesos state.")
tasks = get_all_tasks_from_state(mesos_state, include_orphans=True)
non_terminal_tasks = [task for task in tasks if not is_task_terminal(task)]
slave_groupings = group_slaves_by_key_func(grouping_func, slaves, sort_func)
return {
attribute_value: calculate_resource_utilization_for_slaves(
slaves=slaves, tasks=filter_tasks_for_slaves(slaves, non_terminal_tasks)
)
for attribute_value, slaves in slave_groupings.items()
}
|
Given a function used to group slaves and mesos state, calculate
resource utilization for each value of a given attribute.
:grouping_func: a function that given a slave, will return the value of an
attribute to group by.
:param mesos_state: the mesos state
:param filters: filters to apply to the slaves in the calculation, with
filtering preformed by filter_slaves
:param sort_func: a function that given a list of slaves, will return the
sorted list of slaves.
:returns: a dict of {attribute_value: resource_usage}, where resource usage
is the dict returned by ``calculate_resource_utilization_for_slaves`` for
slaves grouped by attribute value.
|
get_resource_utilization_by_grouping
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def get_resource_utilization_by_grouping_kube(
grouping_func: _GenericNodeGroupingFunctionT,
kube_client: KubeClient,
*,
namespace: str,
filters: Sequence[_GenericNodeFilterFunctionT] = [],
sort_func: _GenericNodeSortFunctionT = None,
) -> Mapping[_KeyFuncRetT, ResourceUtilizationDict]:
"""Given a function used to group nodes, calculate resource utilization
for each value of a given attribute.
:grouping_func: a function that given a node, will return the value of an
attribute to group by.
:param kube_client: the Kubernetes client
:param filters: filters to apply to the nodes in the calculation, with
filtering preformed by filter_slaves
:param sort_func: a function that given a list of nodes, will return the
sorted list of nodes.
:returns: a dict of {attribute_value: resource_usage}, where resource usage
is the dict returned by ``calculate_resource_utilization_for_kube_nodes`` for
nodes grouped by attribute value.
"""
nodes = get_all_nodes_cached(kube_client)
nodes = filter_slaves(nodes, filters)
if len(nodes) == 0:
raise ValueError("There are no nodes registered in the Kubernetes.")
node_groupings = group_slaves_by_key_func(grouping_func, nodes, sort_func)
pods = get_all_pods_cached(kube_client, namespace)
pods_by_node = {}
for node in nodes:
pods_by_node[node.metadata.name] = [
pod for pod in pods if pod.spec.node_name == node.metadata.name
]
return {
attribute_value: calculate_resource_utilization_for_kube_nodes(
nodes, pods_by_node
)
for attribute_value, nodes in node_groupings.items()
}
|
Given a function used to group nodes, calculate resource utilization
for each value of a given attribute.
:grouping_func: a function that given a node, will return the value of an
attribute to group by.
:param kube_client: the Kubernetes client
:param filters: filters to apply to the nodes in the calculation, with
filtering preformed by filter_slaves
:param sort_func: a function that given a list of nodes, will return the
sorted list of nodes.
:returns: a dict of {attribute_value: resource_usage}, where resource usage
is the dict returned by ``calculate_resource_utilization_for_kube_nodes`` for
nodes grouped by attribute value.
|
get_resource_utilization_by_grouping_kube
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def resource_utillizations_from_resource_info(
total: ResourceInfo, free: ResourceInfo
) -> Sequence[ResourceUtilization]:
"""
Given two ResourceInfo tuples, one for total and one for free,
create a ResourceUtilization tuple for each metric in the ResourceInfo.
:param total:
:param free:
:returns: ResourceInfo for a metric
"""
return [
ResourceUtilization(metric=field, total=total[index], free=free[index])
for index, field in enumerate(ResourceInfo._fields)
]
|
Given two ResourceInfo tuples, one for total and one for free,
create a ResourceUtilization tuple for each metric in the ResourceInfo.
:param total:
:param free:
:returns: ResourceInfo for a metric
|
resource_utillizations_from_resource_info
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def has_registered_slaves(
mesos_state: MesosState,
) -> bool:
"""Return a boolean indicating if there are any slaves registered
to the master according to the mesos state.
:param mesos_state: the mesos state from the master
:returns: a boolean, indicating if there are > 0 slaves
"""
return len(mesos_state.get("slaves", [])) > 0
|
Return a boolean indicating if there are any slaves registered
to the master according to the mesos state.
:param mesos_state: the mesos state from the master
:returns: a boolean, indicating if there are > 0 slaves
|
has_registered_slaves
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def get_kube_resource_utilization_health(
kube_client: KubeClient,
) -> Sequence[HealthCheckResult]:
"""Perform healthchecks against Kubernetes.
:param kube_client: the KUbernetes client
:returns: a list of HealthCheckResult tuples
"""
nodes = get_all_nodes_cached(kube_client)
return [
assert_cpu_health(get_kube_cpu_status(nodes)),
assert_memory_health(get_kube_memory_status(nodes)),
assert_disk_health(get_kube_disk_status(nodes)),
assert_gpu_health(get_kube_gpu_status(nodes)),
assert_nodes_health(get_kube_nodes_health_status(nodes)),
]
|
Perform healthchecks against Kubernetes.
:param kube_client: the KUbernetes client
:returns: a list of HealthCheckResult tuples
|
get_kube_resource_utilization_health
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def get_kube_status(
kube_client: KubeClient, namespace: str
) -> Sequence[HealthCheckResult]:
"""Gather information about Kubernetes.
:param kube_client: the KUbernetes client
:return: string containing the status
"""
return run_healthchecks_with_param(
[kube_client, namespace], [assert_kube_deployments, assert_kube_pods_running]
)
|
Gather information about Kubernetes.
:param kube_client: the KUbernetes client
:return: string containing the status
|
get_kube_status
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def critical_events_in_outputs(healthcheck_outputs):
"""Given a list of HealthCheckResults return those which are unhealthy."""
return [
healthcheck
for healthcheck in healthcheck_outputs
if healthcheck.healthy is False
]
|
Given a list of HealthCheckResults return those which are unhealthy.
|
critical_events_in_outputs
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def generate_summary_for_check(name, ok):
"""Given a check name and a boolean indicating if the service is OK, return
a formatted message.
"""
status = PaastaColors.green("OK") if ok is True else PaastaColors.red("CRITICAL")
summary = f"{name} Status: {status}"
return summary
|
Given a check name and a boolean indicating if the service is OK, return
a formatted message.
|
generate_summary_for_check
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def healthcheck_result_resource_utilization_pair_for_resource_utilization(
utilization, threshold
):
"""Given a ResourceUtilization, produce a tuple of (HealthCheckResult, ResourceUtilization),
where that HealthCheckResult describes the 'health' of a given utilization.
:param utilization: a ResourceUtilization tuple
:param threshold: a threshold which decides the health of the given ResourceUtilization
:returns: a tuple of (HealthCheckResult, ResourceUtilization)
"""
return (
healthcheck_result_for_resource_utilization(utilization, threshold),
utilization,
)
|
Given a ResourceUtilization, produce a tuple of (HealthCheckResult, ResourceUtilization),
where that HealthCheckResult describes the 'health' of a given utilization.
:param utilization: a ResourceUtilization tuple
:param threshold: a threshold which decides the health of the given ResourceUtilization
:returns: a tuple of (HealthCheckResult, ResourceUtilization)
|
healthcheck_result_resource_utilization_pair_for_resource_utilization
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def format_table_column_for_healthcheck_resource_utilization_pair(
healthcheck_utilization_pair,
):
"""Given a tuple of (HealthCheckResult, ResourceUtilization), return a
string representation of the ResourceUtilization such that it is formatted
according to the value of HealthCheckResult.healthy.
:param healthcheck_utilization_pair: a tuple of (HealthCheckResult, ResourceUtilization)
:returns: a string representing the ResourceUtilization.
"""
color_func = (
PaastaColors.green
if healthcheck_utilization_pair[0].healthy
else PaastaColors.red
)
utilization = (
healthcheck_utilization_pair[1].total - healthcheck_utilization_pair[1].free
)
if int(healthcheck_utilization_pair[1].total) == 0:
utilization_perc = 100
else:
utilization_perc = (
utilization / float(healthcheck_utilization_pair[1].total) * 100
)
if healthcheck_utilization_pair[1].metric not in ["cpus", "gpus"]:
return color_func(
"{}/{} ({:.2f}%)".format(
naturalsize(utilization * 1024 * 1024, gnu=True),
naturalsize(
healthcheck_utilization_pair[1].total * 1024 * 1024, gnu=True
),
utilization_perc,
)
)
else:
return color_func(
"{:.2f}/{:.0f} ({:.2f}%)".format(
utilization, healthcheck_utilization_pair[1].total, utilization_perc
)
)
|
Given a tuple of (HealthCheckResult, ResourceUtilization), return a
string representation of the ResourceUtilization such that it is formatted
according to the value of HealthCheckResult.healthy.
:param healthcheck_utilization_pair: a tuple of (HealthCheckResult, ResourceUtilization)
:returns: a string representing the ResourceUtilization.
|
format_table_column_for_healthcheck_resource_utilization_pair
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def format_row_for_resource_utilization_healthchecks(healthcheck_utilization_pairs):
"""Given a list of (HealthCheckResult, ResourceUtilization) tuples, return a list with each of those
tuples represented by a formatted string.
:param healthcheck_utilization_pairs: a list of (HealthCheckResult, ResourceUtilization) tuples.
:returns: a list containing a string representation of each (HealthCheckResult, ResourceUtilization) tuple.
"""
return [
format_table_column_for_healthcheck_resource_utilization_pair(pair)
for pair in healthcheck_utilization_pairs
]
|
Given a list of (HealthCheckResult, ResourceUtilization) tuples, return a list with each of those
tuples represented by a formatted string.
:param healthcheck_utilization_pairs: a list of (HealthCheckResult, ResourceUtilization) tuples.
:returns: a list containing a string representation of each (HealthCheckResult, ResourceUtilization) tuple.
|
format_row_for_resource_utilization_healthchecks
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def get_table_rows_for_resource_info_dict(
attribute_values, healthcheck_utilization_pairs
):
"""A wrapper method to join together
:param attribute: The attribute value and formatted columns to be shown in
a single row. :param attribute_value: The value of the attribute
associated with the row. This becomes index 0 in the array returned.
:param healthcheck_utilization_pairs: a list of 2-tuples, where each tuple has the elements
(HealthCheckResult, ResourceUtilization)
:returns: a list of strings, representing a row in a table to be formatted.
"""
return attribute_values + format_row_for_resource_utilization_healthchecks(
healthcheck_utilization_pairs
)
|
A wrapper method to join together
:param attribute: The attribute value and formatted columns to be shown in
a single row. :param attribute_value: The value of the attribute
associated with the row. This becomes index 0 in the array returned.
:param healthcheck_utilization_pairs: a list of 2-tuples, where each tuple has the elements
(HealthCheckResult, ResourceUtilization)
:returns: a list of strings, representing a row in a table to be formatted.
|
get_table_rows_for_resource_info_dict
|
python
|
Yelp/paasta
|
paasta_tools/metrics/metastatus_lib.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/metrics/metastatus_lib.py
|
Apache-2.0
|
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
|
Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
|
pool
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/api_client.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/api_client.py
|
Apache-2.0
|
def sanitize_for_serialization(cls, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if isinstance(obj, (ModelNormal, ModelComposed)):
return {
key: cls.sanitize_for_serialization(val) for key, val in model_to_dict(obj, serialize=True).items()
}
elif isinstance(obj, (str, int, float, none_type, bool)):
return obj
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
elif isinstance(obj, ModelSimple):
return cls.sanitize_for_serialization(obj.value)
elif isinstance(obj, (list, tuple)):
return [cls.sanitize_for_serialization(item) for item in obj]
if isinstance(obj, dict):
return {key: cls.sanitize_for_serialization(val) for key, val in obj.items()}
raise ApiValueError('Unable to prepare type {} for serialization'.format(obj.__class__.__name__))
|
Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
|
sanitize_for_serialization
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/api_client.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/api_client.py
|
Apache-2.0
|
def deserialize(self, response, response_type, _check_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: For the response, a tuple containing:
valid classes
a list containing valid classes (for list schemas)
a dict containing a tuple of valid classes as the value
Example values:
(str,)
(Pet,)
(float, none_type)
([int, none_type],)
({str: (bool, str, int, float, date, datetime, str, none_type)},)
:param _check_type: boolean, whether to check the types of the data
received from the server
:type _check_type: bool
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == (file_type,):
content_disposition = response.getheader("Content-Disposition")
return deserialize_file(response.data, self.configuration,
content_disposition=content_disposition)
# fetch data from response object
try:
received_data = json.loads(response.data)
except ValueError:
received_data = response.data
# store our data under the key of 'received_data' so users have some
# context if they are deserializing a string and the data type is wrong
deserialized_data = validate_and_convert_types(
received_data,
response_type,
['received_data'],
True,
_check_type,
configuration=self.configuration
)
return deserialized_data
|
Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: For the response, a tuple containing:
valid classes
a list containing valid classes (for list schemas)
a dict containing a tuple of valid classes as the value
Example values:
(str,)
(Pet,)
(float, none_type)
([int, none_type],)
({str: (bool, str, int, float, date, datetime, str, none_type)},)
:param _check_type: boolean, whether to check the types of the data
received from the server
:type _check_type: bool
:return: deserialized object.
|
deserialize
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/api_client.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/api_client.py
|
Apache-2.0
|
def call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
header_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
body: typing.Optional[typing.Any] = None,
post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None,
response_type: typing.Optional[typing.Tuple[typing.Any]] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
async_req: typing.Optional[bool] = None,
_return_http_data_only: typing.Optional[bool] = None,
collection_formats: typing.Optional[typing.Dict[str, str]] = None,
_preload_content: bool = True,
_request_timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
_host: typing.Optional[str] = None,
_check_type: typing.Optional[bool] = None
):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: For the response, a tuple containing:
valid classes
a list containing valid classes (for list schemas)
a dict containing a tuple of valid classes as the value
Example values:
(str,)
(Pet,)
(float, none_type)
([int, none_type],)
({str: (bool, str, int, float, date, datetime, str, none_type)},)
:param files: key -> field name, value -> a list of open file
objects for `multipart/form-data`.
:type files: dict
:param async_req bool: execute request asynchronously
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:type collection_formats: dict, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _check_type: boolean describing if the data back from the server
should have its type checked.
:type _check_type: bool, optional
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, _host,
_check_type)
return self.pool.apply_async(self.__call_api, (resource_path,
method, path_params,
query_params,
header_params, body,
post_params, files,
response_type,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host, _check_type))
|
Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: For the response, a tuple containing:
valid classes
a list containing valid classes (for list schemas)
a dict containing a tuple of valid classes as the value
Example values:
(str,)
(Pet,)
(float, none_type)
([int, none_type],)
({str: (bool, str, int, float, date, datetime, str, none_type)},)
:param files: key -> field name, value -> a list of open file
objects for `multipart/form-data`.
:type files: dict
:param async_req bool: execute request asynchronously
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:type collection_formats: dict, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _check_type: boolean describing if the data back from the server
should have its type checked.
:type _check_type: bool, optional
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
|
call_api
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/api_client.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/api_client.py
|
Apache-2.0
|
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
|
Makes the HTTP request using RESTClient.
|
request
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/api_client.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/api_client.py
|
Apache-2.0
|
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
|
Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
|
parameters_to_tuples
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/api_client.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/api_client.py
|
Apache-2.0
|
def files_parameters(self, files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None):
"""Builds form parameters.
:param files: None or a dict with key=param_name and
value is a list of open file objects
:return: List of tuples of form parameters with file data
"""
if files is None:
return []
params = []
for param_name, file_instances in files.items():
if file_instances is None:
# if the file field is nullable, skip None values
continue
for file_instance in file_instances:
if file_instance is None:
# if the file field is nullable, skip None values
continue
if file_instance.closed is True:
raise ApiValueError(
"Cannot read a closed file. The passed in file_type "
"for %s must be open." % param_name
)
filename = os.path.basename(file_instance.name)
filedata = file_instance.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([param_name, tuple([filename, filedata, mimetype])]))
file_instance.close()
return params
|
Builds form parameters.
:param files: None or a dict with key=param_name and
value is a list of open file objects
:return: List of tuples of form parameters with file data
|
files_parameters
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/api_client.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/api_client.py
|
Apache-2.0
|
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
|
Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
|
select_header_accept
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/api_client.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/api_client.py
|
Apache-2.0
|
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
|
Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
|
select_header_content_type
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/api_client.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/api_client.py
|
Apache-2.0
|
def update_params_for_auth(self, headers, querys, auth_settings,
resource_path, method, body):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
:param resource_path: A string representation of the HTTP request resource path.
:param method: A string representation of the HTTP request method.
:param body: A object representing the body of the HTTP request.
The object type is the return value of _encoder.default().
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if auth_setting['in'] == 'cookie':
headers['Cookie'] = auth_setting['value']
elif auth_setting['in'] == 'header':
if auth_setting['type'] != 'http-signature':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ApiValueError(
'Authentication token must be in `query` or `header`'
)
|
Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
:param resource_path: A string representation of the HTTP request resource path.
:param method: A string representation of the HTTP request method.
:param body: A object representing the body of the HTTP request.
The object type is the return value of _encoder.default().
|
update_params_for_auth
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/api_client.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/api_client.py
|
Apache-2.0
|
def __init__(self, settings=None, params_map=None, root_map=None,
headers_map=None, api_client=None, callable=None):
"""Creates an endpoint
Args:
settings (dict): see below key value pairs
'response_type' (tuple/None): response type
'auth' (list): a list of auth type keys
'endpoint_path' (str): the endpoint path
'operation_id' (str): endpoint string identifier
'http_method' (str): POST/PUT/PATCH/GET etc
'servers' (list): list of str servers that this endpoint is at
params_map (dict): see below key value pairs
'all' (list): list of str endpoint parameter names
'required' (list): list of required parameter names
'nullable' (list): list of nullable parameter names
'enum' (list): list of parameters with enum values
'validation' (list): list of parameters with validations
root_map
'validations' (dict): the dict mapping endpoint parameter tuple
paths to their validation dictionaries
'allowed_values' (dict): the dict mapping endpoint parameter
tuple paths to their allowed_values (enum) dictionaries
'openapi_types' (dict): param_name to openapi type
'attribute_map' (dict): param_name to camelCase name
'location_map' (dict): param_name to 'body', 'file', 'form',
'header', 'path', 'query'
collection_format_map (dict): param_name to `csv` etc.
headers_map (dict): see below key value pairs
'accept' (list): list of Accept header strings
'content_type' (list): list of Content-Type header strings
api_client (ApiClient) api client instance
callable (function): the function which is invoked when the
Endpoint is called
"""
self.settings = settings
self.params_map = params_map
self.params_map['all'].extend([
'async_req',
'_host_index',
'_preload_content',
'_request_timeout',
'_return_http_data_only',
'_check_input_type',
'_check_return_type'
])
self.params_map['nullable'].extend(['_request_timeout'])
self.validations = root_map['validations']
self.allowed_values = root_map['allowed_values']
self.openapi_types = root_map['openapi_types']
extra_types = {
'async_req': (bool,),
'_host_index': (none_type, int),
'_preload_content': (bool,),
'_request_timeout': (none_type, int, (int,), [int]),
'_return_http_data_only': (bool,),
'_check_input_type': (bool,),
'_check_return_type': (bool,)
}
self.openapi_types.update(extra_types)
self.attribute_map = root_map['attribute_map']
self.location_map = root_map['location_map']
self.collection_format_map = root_map['collection_format_map']
self.headers_map = headers_map
self.api_client = api_client
self.callable = callable
|
Creates an endpoint
Args:
settings (dict): see below key value pairs
'response_type' (tuple/None): response type
'auth' (list): a list of auth type keys
'endpoint_path' (str): the endpoint path
'operation_id' (str): endpoint string identifier
'http_method' (str): POST/PUT/PATCH/GET etc
'servers' (list): list of str servers that this endpoint is at
params_map (dict): see below key value pairs
'all' (list): list of str endpoint parameter names
'required' (list): list of required parameter names
'nullable' (list): list of nullable parameter names
'enum' (list): list of parameters with enum values
'validation' (list): list of parameters with validations
root_map
'validations' (dict): the dict mapping endpoint parameter tuple
paths to their validation dictionaries
'allowed_values' (dict): the dict mapping endpoint parameter
tuple paths to their allowed_values (enum) dictionaries
'openapi_types' (dict): param_name to openapi type
'attribute_map' (dict): param_name to camelCase name
'location_map' (dict): param_name to 'body', 'file', 'form',
'header', 'path', 'query'
collection_format_map (dict): param_name to `csv` etc.
headers_map (dict): see below key value pairs
'accept' (list): list of Accept header strings
'content_type' (list): list of Content-Type header strings
api_client (ApiClient) api client instance
callable (function): the function which is invoked when the
Endpoint is called
|
__init__
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/api_client.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/api_client.py
|
Apache-2.0
|
def get_default_copy(cls):
"""Return new instance of configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration passed by the set_default method.
:return: The configuration object.
"""
if cls._default is not None:
return copy.deepcopy(cls._default)
return Configuration()
|
Return new instance of configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration passed by the set_default method.
:return: The configuration object.
|
get_default_copy
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/configuration.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/configuration.py
|
Apache-2.0
|
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in self.logger.items():
logger.addHandler(self.logger_file_handler)
|
The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
|
logger_file
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/configuration.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/configuration.py
|
Apache-2.0
|
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in self.logger.items():
logger.setLevel(logging.DEBUG)
# turn on http_client debug
http_client.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in self.logger.items():
logger.setLevel(logging.WARNING)
# turn off http_client debug
http_client.HTTPConnection.debuglevel = 0
|
Debug status
:param value: The debug status, True or False.
:type: bool
|
debug
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/configuration.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/configuration.py
|
Apache-2.0
|
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
|
The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
|
logger_format
|
python
|
Yelp/paasta
|
paasta_tools/paastaapi/configuration.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paastaapi/configuration.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.